hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d939eb5f2c07da1352ba118732687dbbf73d265b
| 7,376
|
py
|
Python
|
research/deeplab/evaluation/base_metric.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/deeplab/evaluation/base_metric.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/deeplab/evaluation/base_metric.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the top-level interface for evaluating segmentations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
_EPSILON = 1e-10
def realdiv_maybe_zero(x, y):
"""Element-wise x / y where y may contain zeros, for those returns 0 too."""
return np.where(
np.less(np.abs(y), _EPSILON), np.zeros_like(x), np.divide(x, y))
@six.add_metaclass(abc.ABCMeta)
class SegmentationMetric(object):
"""Abstract base class for computers of segmentation metrics.
Subclasses will implement both:
1. Comparing the predicted segmentation for an image with the groundtruth.
2. Computing the final metric over a set of images.
These are often done as separate steps, due to the need to accumulate
intermediate values other than the metric itself across images, computing the
actual metric value only on these accumulations after all the images have been
compared.
A simple usage would be:
metric = MetricImplementation(...)
for <image>, <groundtruth> in evaluation_set:
<prediction> = run_segmentation(<image>)
metric.compare_and_accumulate(<prediction>, <groundtruth>)
print(metric.result())
"""
def __init__(self, num_categories, ignored_label, max_instances_per_category,
offset):
"""Base initialization for SegmentationMetric.
Args:
num_categories: The number of segmentation categories (or "classes" in the
dataset.
ignored_label: A category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
offset: The maximum number of unique labels. This is used, by multiplying
the ground-truth labels, to generate unique ids for individual regions
of overlap between groundtruth and predicted segments.
"""
self.num_categories = num_categories
self.ignored_label = ignored_label
self.max_instances_per_category = max_instances_per_category
self.offset = offset
self.reset()
def _naively_combine_labels(self, category_array, instance_array):
"""Naively creates a combined label array from categories and instances."""
return (category_array.astype(np.uint32) * self.max_instances_per_category +
instance_array.astype(np.uint32))
@abc.abstractmethod
def compare_and_accumulate(
self, groundtruth_category_array, groundtruth_instance_array,
predicted_category_array, predicted_instance_array):
"""Compares predicted segmentation with groundtruth, accumulates its metric.
It is not assumed that instance ids are unique across different categories.
See for example combine_semantic_and_instance_predictions.py in official
PanopticAPI evaluation code for issues to consider when fusing category
and instance labels.
Instances ids of the ignored category have the meaning that id 0 is "void"
and remaining ones are crowd instances.
Args:
groundtruth_category_array: A 2D numpy uint16 array of groundtruth
per-pixel category labels.
groundtruth_instance_array: A 2D numpy uint16 array of groundtruth
instance labels.
predicted_category_array: A 2D numpy uint16 array of predicted per-pixel
category labels.
predicted_instance_array: A 2D numpy uint16 array of predicted instance
labels.
Returns:
The value of the metric over all comparisons done so far, including this
one, as a float scalar.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes the metric over all comparisons done so far."""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def detailed_results(self, is_thing=None):
"""Computes and returns the detailed final metric results.
Args:
is_thing: A boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff."
Returns:
A dictionary with a breakdown of metrics and/or metric factors by things,
stuff, and all categories.
"""
raise NotImplementedError('Not implemented in subclasses.')
@abc.abstractmethod
def result_per_category(self):
"""For supported metrics, return individual per-category metric values.
Returns:
A numpy array of shape `[self.num_categories]`, where index `i` is the
metrics value over only that category.
"""
raise NotImplementedError('Not implemented in subclass.')
def print_detailed_results(self, is_thing=None, print_digits=3):
"""Prints out a detailed breakdown of metric results.
Args:
is_thing: A boolean array of length num_categories.
`is_thing[category_id]` will say whether that category is a "thing"
rather than "stuff."
print_digits: Number of significant digits to print in computed metrics.
"""
raise NotImplementedError('Not implemented in subclass.')
@abc.abstractmethod
def merge(self, other_instance):
"""Combines the accumulated results of another instance into self.
The following two cases should put `metric_a` into an equivalent state.
Case 1 (with merge):
metric_a = MetricsSubclass(...)
metric_a.compare_and_accumulate(<comparison 1>)
metric_a.compare_and_accumulate(<comparison 2>)
metric_b = MetricsSubclass(...)
metric_b.compare_and_accumulate(<comparison 3>)
metric_b.compare_and_accumulate(<comparison 4>)
metric_a.merge(metric_b)
Case 2 (without merge):
metric_a = MetricsSubclass(...)
metric_a.compare_and_accumulate(<comparison 1>)
metric_a.compare_and_accumulate(<comparison 2>)
metric_a.compare_and_accumulate(<comparison 3>)
metric_a.compare_and_accumulate(<comparison 4>)
Args:
other_instance: Another compatible instance of the same metric subclass.
"""
raise NotImplementedError('Not implemented in subclass.')
@abc.abstractmethod
def reset(self):
"""Resets the accumulation to the metric class's state at initialization.
Note that this function will be called in SegmentationMetric.__init__.
"""
raise NotImplementedError('Must be implemented in subclasses.')
| 38.416667
| 81
| 0.708107
|
225d661ba64f4cd143cdd4eec8f14e1f68d23e13
| 7,975
|
py
|
Python
|
mwp_solver/dataloader/abstract_dataloader.py
|
max-stack/MWP-SS-Metrics
|
01268f2d6da716596216b04de4197e345b96c219
|
[
"MIT"
] | null | null | null |
mwp_solver/dataloader/abstract_dataloader.py
|
max-stack/MWP-SS-Metrics
|
01268f2d6da716596216b04de4197e345b96c219
|
[
"MIT"
] | null | null | null |
mwp_solver/dataloader/abstract_dataloader.py
|
max-stack/MWP-SS-Metrics
|
01268f2d6da716596216b04de4197e345b96c219
|
[
"MIT"
] | null | null | null |
# Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: Yihuai Lan
# @Time: 2021/08/18 11:34:06
# @File: abstract_dataloader.py
from utils.enum_type import FixType
class AbstractDataLoader(object):
"""abstract dataloader
the base class of dataloader class
"""
def __init__(self, config, dataset):
"""
:param config:
:param dataset:
expected that config includes these parameters below:
model (str): model name.
equation_fix (str): [infix | postfix | prefix], convert equation to specified format.
train_batch_size (int): the training batch size.
test_batch_size (int): the testing batch size.
symbol_for_tree (bool): build output symbols for tree or not.
share_vocab (bool): encoder and decoder of the model share the same vocabulary, often seen in Seq2Seq models.
max_len (int|None): max input length.
add_sos (bool): add sos token at the head of input sequence.
add_eos (bool): add eos token at the tail of input sequence.
"""
super().__init__()
self.model = config["model"]
self.equation_fix = config["equation_fix"]
self.train_batch_size = config["train_batch_size"]
self.test_batch_size = config["test_batch_size"]
self.symbol_for_tree = config["symbol_for_tree"]
self.share_vocab = config["share_vocab"]
self.max_len = config["max_len"]
self.max_equ_len = config["max_equ_len"]
self.add_sos = config["add_sos"]
self.add_eos = config["add_eos"]
self.filt_dirty = config["filt_dirty"]
self.device = config["device"]
self.dataset = dataset
self.in_pad_token = None
self.in_unk_token = None
self.out_pad_token = None
self.out_unk_token = None
self.temp_unk_token = None
self.temp_pad_token = None
self.trainset_batches = []
self.validset_batches = []
self.testset_batches = []
self.__trainset_batch_idx = -1
self.__validset_batch_idx = -1
self.__testset_batch_idx = -1
self.trainset_batch_nums = 0
self.validset_batch_nums = 0
self.testset_batch_nums = 0
def _pad_input_batch(self, batch_seq, batch_seq_len):
if self.max_len != None:
max_length = self.max_len
else:
max_length = max(batch_seq_len)
for idx, length in enumerate(batch_seq_len):
if length < max_length:
batch_seq[idx] += [self.in_pad_token for i in range(max_length - length)]
else:
if self.add_sos and self.add_eos:
batch_seq[idx] = [batch_seq[idx][0]] + batch_seq[idx][1:max_length-1] + [batch_seq[idx][-1]]
else:
batch_seq[idx] = batch_seq[idx][:max_length]
return batch_seq
def _pad_output_batch(self, batch_target, batch_target_len):
if self.max_equ_len != None:
max_length = self.max_equ_len
else:
max_length = max(batch_target_len)
for idx, length in enumerate(batch_target_len):
if length < max_length:
batch_target[idx] += [self.out_pad_token for i in range(max_length - length)]
else:
batch_target[idx] = batch_target[idx][:max_length]
return batch_target
def _word2idx(self, sentence):
sentence_idx = []
for word in sentence:
try:
idx = self.dataset.in_word2idx[word]
except:
idx = self.in_unk_token
sentence_idx.append(idx)
return sentence_idx
def _equ_symbol2idx(self, equation):
equ_idx = []
if self.equation_fix == FixType.MultiWayTree:
for symbol in equation:
if isinstance(symbol, list):
sub_equ_idx = self._equ_symbol2idx(symbol)
equ_idx.append(sub_equ_idx)
else:
if self.share_vocab:
try:
idx = self.dataset.in_word2idx[symbol]
except:
idx = self.in_unk_token
else:
try:
idx = self.dataset.out_symbol2idx[symbol]
except:
idx = self.out_unk_token
equ_idx.append(idx)
else:
for word in equation:
if self.share_vocab:
try:
idx = self.dataset.in_word2idx[word]
except:
idx = self.in_unk_token
else:
try:
idx = self.dataset.out_symbol2idx[word]
except:
idx = self.out_unk_token
equ_idx.append(idx)
return equ_idx
def _temp_symbol2idx(self, template):
temp_idx = []
if self.equation_fix == FixType.MultiWayTree:
for symbol in template:
if isinstance(symbol, list):
sub_equ_idx = self._equ_symbol2idx(symbol)
temp_idx.append(sub_equ_idx)
else:
if self.share_vocab:
try:
idx = self.dataset.in_word2idx[symbol]
except:
idx = self.in_unk_token
else:
try:
idx = self.dataset.temp_symbol2idx[symbol]
except:
idx = self.out_unk_token
temp_idx.append(idx)
else:
for word in template:
if self.share_vocab:
try:
idx = self.dataset.in_word2idx[word]
except:
idx = self.in_unk_token
else:
try:
idx = self.dataset.temp_symbol2idx[word]
except:
idx = self.temp_unk_token
temp_idx.append(idx)
return temp_idx
def _get_mask(self, batch_seq_len):
max_length = max(batch_seq_len)
batch_mask = []
for idx, length in enumerate(batch_seq_len):
batch_mask.append([1] * length + [0] * (max_length - length))
return batch_mask
def _get_input_mask(self, batch_seq_len):
if self.max_len:
max_length = self.max_len
else:
max_length = max(batch_seq_len)
batch_mask = []
for idx, length in enumerate(batch_seq_len):
batch_mask.append([1] * length + [0] * (max_length - length))
return batch_mask
def _build_num_stack(self, equation, num_list):
num_stack = []
for word in equation:
temp_num = []
flag_not = True
if word not in self.dataset.out_idx2symbol:
flag_not = False
if "NUM" in word:
temp_num.append(int(word[4:]))
for i, j in enumerate(num_list):
if j == word:
temp_num.append(i)
if not flag_not and len(temp_num) != 0:
num_stack.append(temp_num)
if not flag_not and len(temp_num) == 0:
num_stack.append([_ for _ in range(len(num_list))])
num_stack.reverse()
return num_stack
def load_data(self):
"""load data.
"""
raise NotImplementedError
def load_next_batch(self):
"""load data.
"""
raise NotImplementedError
def init_batches(self):
"""initialize batches.
"""
raise NotImplementedError
| 36.085973
| 117
| 0.529906
|
25bff34c849ffc5bd9b9df7255fc784e3d1dc2e4
| 10,508
|
py
|
Python
|
tests/platform_tests/daemon/test_pcied.py
|
congh-nvidia/sonic-mgmt
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
[
"Apache-2.0"
] | null | null | null |
tests/platform_tests/daemon/test_pcied.py
|
congh-nvidia/sonic-mgmt
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
[
"Apache-2.0"
] | null | null | null |
tests/platform_tests/daemon/test_pcied.py
|
congh-nvidia/sonic-mgmt
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
[
"Apache-2.0"
] | null | null | null |
"""
Check daemon status inside PMON container. Each daemon status is checked under the conditions below in this script:
* Daemon Running Status
* Daemon Stop status
* Daemon Restart status
This script is to cover the test case in the SONiC platform daemon and service test plan:
https://github.com/Azure/sonic-mgmt/blob/master/docs/testplan/PMON-Services-Daemons-test-plan.md
"""
import logging
import re
import time
from datetime import datetime
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status
from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes
from tests.common.utilities import compose_dict_from_cli, skip_release, wait_until
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.sanity_check(skip_sanity=True),
pytest.mark.disable_loganalyzer
]
expected_running_status = "RUNNING"
expected_stopped_status = "STOPPED"
expected_exited_status = "EXITED"
daemon_name = "pcied"
SIG_STOP_SERVICE = None
SIG_TERM = "-15"
SIG_KILL = "-9"
pcie_devices_status_tbl_key = ""
status_field = "status"
expected_pcied_devices_status = "PASSED"
@pytest.fixture(scope="module", autouse=True)
def setup(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_en_status = check_pmon_daemon_enable_status(duthost, daemon_name)
if daemon_en_status is False:
pytest.skip("{} is not enabled in {}".format(daemon_name, duthost.facts['platform'], duthost.os_version))
@pytest.fixture(scope="module", autouse=True)
def teardown_module(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
yield
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status is not "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
logger.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good")
check_critical_processes(duthost, watch_secs=10)
@pytest.fixture
def check_daemon_status(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status is not "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
@pytest.fixture(scope="module", autouse=True)
def get_pcie_devices_tbl_key(duthosts,rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
skip_release(duthost, ["201811", "201911"])
command_output = duthost.shell("redis-cli -n 6 keys '*' | grep PCIE_DEVICES")
global pcie_devices_status_tbl_key
pcie_devices_status_tbl_key = command_output["stdout"]
def collect_data(duthost):
keys = duthost.shell('redis-cli -n 6 keys "PCIE_DEVICE|*"')['stdout_lines']
dev_data = {}
for k in keys:
data = duthost.shell('redis-cli -n 6 hgetall "{}"'.format(k))['stdout_lines']
data = compose_dict_from_cli(data)
dev_data[k] = data
dev_summary_status = duthost.get_pmon_daemon_db_value(pcie_devices_status_tbl_key, status_field)
return {'status': dev_summary_status, 'devices': dev_data}
def wait_data(duthost, expected_key_count):
class shared_scope:
data_after_restart = {}
def _collect_data():
shared_scope.data_after_restart = collect_data(duthost)
device_keys_found = len(shared_scope.data_after_restart['devices'])
if device_keys_found != 0:
logger.info("Expected PCIE device keys :{}, Current device key count {}".format(expected_key_count, device_keys_found))
return device_keys_found == expected_key_count
pcied_pooling_interval = 60
wait_until(pcied_pooling_interval, 6, 0, _collect_data)
return shared_scope.data_after_restart
@pytest.fixture(scope='module')
def data_before_restart(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
data = collect_data(duthost)
return data
def test_pmon_pcied_running_status(duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check pcied status on dut
"""
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, daemon_status, daemon_pid))
pytest_assert(daemon_status == expected_running_status,
"{} expected running status is {} but is {}".format(daemon_name, expected_running_status, daemon_status))
pytest_assert(daemon_pid != -1,
"{} expected pid is a positive integer but is {}".format(daemon_name, daemon_pid))
daemon_db_value = data_before_restart['status']
pytest_assert(daemon_db_value == expected_pcied_devices_status,
"Expected {} {} is {} but is {}".format(get_pcie_devices_tbl_key, status_field, expected_pcied_devices_status, daemon_db_value))
pytest_assert(data_before_restart['devices'], 'pcied data not found in DB')
def test_pmon_pcied_stop_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check the pcied stopped and restarted status
"""
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_STOP_SERVICE)
time.sleep(2)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status == expected_stopped_status,
"{} expected stopped status is {} but is {}".format(daemon_name, expected_stopped_status, daemon_status))
pytest_assert(daemon_pid == -1,
"{} expected pid is -1 but is {}".format(daemon_name, daemon_pid))
data = collect_data(duthost)
pytest_assert(not data['status'], "DB data is not cleared on daemon stop")
pytest_assert(not data['devices'], "DB data is not cleared on daemon stop")
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost, len(data_before_restart['devices']))
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
def test_pmon_pcied_term_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check the pcied terminated and restarted status
"""
duthost = duthosts[rand_one_dut_hostname]
skip_release(duthost, ["201811", "201911"])
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_TERM, pre_daemon_pid)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status != expected_running_status and pre_daemon_pid != daemon_pid,
"{} status for SIG_TERM should not be {} with pid:{}!".format(daemon_name, daemon_status, daemon_pid))
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost, len(data_before_restart['devices']))
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
def test_pmon_pcied_kill_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check the pcied killed unexpectedly (automatically restarted) status
"""
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_KILL, pre_daemon_pid)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status != expected_running_status,
"{} unexpected killed status is not {}".format(daemon_name, daemon_status))
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost, len(data_before_restart['devices']))
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
| 46.702222
| 154
| 0.736582
|
c3f5a378b1c04dc3f1deaf8de78f962511f467c4
| 1,781
|
py
|
Python
|
icevision/engines/fastai/adapters/convert_dataloader_to_fastai.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | null | null | null |
icevision/engines/fastai/adapters/convert_dataloader_to_fastai.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | null | null | null |
icevision/engines/fastai/adapters/convert_dataloader_to_fastai.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | null | null | null |
__all__ = [
"convert_dataloader_to_fastai",
"convert_dataloaders_to_fastai",
]
from icevision.imports import *
from icevision.engines.fastai.imports import *
from torch.utils.data import SequentialSampler, RandomSampler
def convert_dataloader_to_fastai(dataloader: DataLoader):
def raise_error_convert(data):
raise NotImplementedError
class FastaiDataLoaderWithCollate(fastai.DataLoader):
def create_batch(self, b):
return (dataloader.collate_fn, raise_error_convert)[self.prebatched](b)
# use the type of sampler to determine if shuffle is true or false
if isinstance(dataloader.sampler, SequentialSampler):
shuffle = False
elif isinstance(dataloader.sampler, RandomSampler):
shuffle = True
else:
raise ValueError(
f"Sampler {type(dataloader.sampler)} not supported. Fastai only"
"supports RandomSampler or SequentialSampler"
)
return FastaiDataLoaderWithCollate(
dataset=dataloader.dataset,
bs=dataloader.batch_size,
num_workers=dataloader.num_workers,
drop_last=dataloader.drop_last,
shuffle=shuffle,
pin_memory=dataloader.pin_memory,
)
def convert_dataloaders_to_fastai(
dls: List[Union[DataLoader, fastai.DataLoader]], device=None
):
fastai_dls = []
for dl in dls:
if isinstance(dl, DataLoader):
fastai_dl = convert_dataloader_to_fastai(dl)
elif isinstance(dl, fastai.DataLoader):
fastai_dl = dl
else:
raise ValueError(f"dl type {type(dl)} not supported")
fastai_dls.append(fastai_dl)
device = device or fastai.default_device()
fastai_dls = fastai.DataLoaders(*fastai_dls).to(device)
return fastai_dls
| 31.245614
| 83
| 0.696238
|
5fcaaf571011adc17a340709e912469c8eb9d917
| 176
|
py
|
Python
|
pyRscript/__init__.py
|
chairco/pyRscript
|
e952f450a873de52baa4fe80ed901f0cf990c0b7
|
[
"MIT"
] | 1
|
2017-12-01T07:56:54.000Z
|
2017-12-01T07:56:54.000Z
|
pyRscript/__init__.py
|
chairco/pyRscript
|
e952f450a873de52baa4fe80ed901f0cf990c0b7
|
[
"MIT"
] | null | null | null |
pyRscript/__init__.py
|
chairco/pyRscript
|
e952f450a873de52baa4fe80ed901f0cf990c0b7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = '0.0.2'
__author__ = 'chairco'
__email__ = 'chairco@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2017, chairco.'
| 25.142857
| 46
| 0.642045
|
d430f9f465e3f89a2aa70e1e88af99a12ddc7480
| 841
|
py
|
Python
|
walk/argsextractor.py
|
willybaer/walk
|
436522bc1d9b41f46260b190e00a5eea097e40ef
|
[
"MIT"
] | 3
|
2019-02-05T14:20:35.000Z
|
2021-12-28T21:43:18.000Z
|
walk/argsextractor.py
|
willybaer/walk
|
436522bc1d9b41f46260b190e00a5eea097e40ef
|
[
"MIT"
] | null | null | null |
walk/argsextractor.py
|
willybaer/walk
|
436522bc1d9b41f46260b190e00a5eea097e40ef
|
[
"MIT"
] | 1
|
2022-01-03T10:09:29.000Z
|
2022-01-03T10:09:29.000Z
|
import sys
import getopt
from functools import reduce
def filter_args(argv:list, opts:list):
shortopts = list(map(lambda o: o[0], opts))
shortopts_str = reduce(lambda a,b: a + b, shortopts)
longopts = list(map(lambda o: o[1], opts))
try:
found_opts = getopt.getopt(argv[1:], shortopts_str, longopts)
except getopt.GetoptError as e:
print(e)
sys.exit(2)
# filter args
opts_map = {}
for opt in opts:
s_opt = opt[0].replace(':', '')
l_opt = opt[1].replace('=', '')
opts_map[s_opt] = opt[1]
opts_map[l_opt] = opt[1]
args = {}
for oa in found_opts[0]:
key = opts_map[oa[0].replace('-', '')]
if key in args:
args[key].append(oa[1].strip())
else:
args[key] = [oa[1].strip()]
return args
| 25.484848
| 69
| 0.549346
|
afdd82db4fb829d8f01e137aa0bdcf12821f5c40
| 12,144
|
py
|
Python
|
test_diffnet.py
|
proteneer/DiffNet
|
b45a3d74f6b962aef01fc135270691e97729a94f
|
[
"MIT"
] | 21
|
2019-07-15T18:24:46.000Z
|
2022-01-20T02:06:50.000Z
|
test_diffnet.py
|
proteneer/DiffNet
|
b45a3d74f6b962aef01fc135270691e97729a94f
|
[
"MIT"
] | 3
|
2020-07-08T11:12:13.000Z
|
2020-10-22T09:29:03.000Z
|
test_diffnet.py
|
proteneer/DiffNet
|
b45a3d74f6b962aef01fc135270691e97729a94f
|
[
"MIT"
] | 6
|
2019-07-15T18:01:12.000Z
|
2021-01-22T12:15:23.000Z
|
import numpy as np
import cvxopt
from cvxopt import matrix
from diffnet import *
import netbfe
import A_opt
def check_optimality( sij, nij, optimality='A', delta=1E-1, ntimes=10):
'''
Return True if nij is the optimal.
'''
K = sij.size[0]
C = covariance( cvxopt.div( nij, sij**2))
fC = dict()
if optimality=='A':
fC['A'] = np.trace( C)
if optimality=='D':
fC['D'] = np.log( linalg.det( C))
if optimality=='E':
fC['E'] = np.max( linalg.eig( C)[0]).real
if optimality=='Etree':
fC['Etree'] = np.max( linalg.eig( C)[0]).real
df = np.zeros( ntimes)
for t in xrange( ntimes):
zeta = matrix( 1. + 2*delta*(np.random.rand( K, K) - 0.5))
nijp = cvxopt.mul( nij, zeta)
nijp = 0.5*(nijp + nijp.trans()) # Symmetrize
s = sum_upper_triangle( nijp)
nijp /= s
Cp = covariance( cvxopt.div( nijp, sij**2))
if (optimality=='A'):
fCp = np.trace( Cp)
elif (optimality=='D'):
fCp = np.log( linalg.det( Cp))
elif (optimality=='E' or optimality=='Etree'):
fCp = np.max( linalg.eig( Cp)[0]).real
df[t] = fCp - fC[optimality]
print df
return np.all( df >= 0)
def check_update_A_optimal( sij, delta=5e-1, ntimes=10, tol=1e-5):
'''
'''
K = matrix(sij).size[0]
ntotal = 100
fopt = A_optimize( sij)
nopt = ntotal*fopt
# remove some random samples from the optimal
nsofar = nopt - nopt*0.1*np.random.rand( K, K)
nsofar = matrix( 0.5*(nsofar + nsofar.T))
nadd = ntotal - sum_upper_triangle( nsofar)
nnext = A_optimize( sij, nadd, nsofar)
success1 = True
if np.abs(sum_upper_triangle( matrix(nnext)) - nadd) > tol:
print 'Failed to allocate additional samples to preserve the sum!'
print '|%f - %f| > %f' % (sum_upper_triangle( matrix(nnext)), nadd, tol)
success1 = False
# The new samples and the existing samples should together make up the
# optimal allocation.
delta = sum_upper_triangle( abs( nnext + nsofar - nopt))/ntotal
delta /= (0.5*K*(K+1))
if delta > tol:
print 'Failed: Updating allocation does not yield A-optimal!'
print 'delta = %f > %f' % (delta, tol)
success1 = False
sij0 = np.random.rand( K, K)
sij0 = matrix(0.5*(sij0 + sij0.T))
nsofar = 100*A_optimize( sij0)
nadd = 100
# nnext = update_A_optimal_sdp( sij, nadd, nsofar)
nnext = A_optimize( sij, nadd, nsofar)
ntotal = matrix( nsofar + nnext)
C = covariance( cvxopt.div( ntotal/sum_upper_triangle(ntotal), matrix(sij)**2))
trC = np.trace( C)
dtr = np.zeros( ntimes)
for t in xrange( ntimes):
zeta = matrix( 1. + 2*delta*(np.random.rand( K, K) - 0.5))
nnextp = cvxopt.mul( nnext, zeta)
nnextp = 0.5*(nnextp + nnextp.trans())
s = sum_upper_triangle( nnextp)
nnextp *= (nadd/sum_upper_triangle( nnextp))
ntotal = matrix( nsofar + nnextp)
Cp = covariance( cvxopt.div( ntotal/sum_upper_triangle(ntotal), matrix(sij)**2 ))
dtr[t] = np.trace( Cp) - trC
success2 = np.all( dtr[np.abs(dtr/trC) > tol] >= 0)
# success2 = np.all( dtr >= 0)
if not success2:
print 'Iterative update of A-optimal failed to minimize tr(C)=%f!' % trC
print dtr
nnext = round_to_integers( nnext)
if sum_upper_triangle( matrix(nnext)) != nadd:
print 'Failed to allocate additional samples to preserve the sum!'
print '%d != %d' % (sum_upper_triangle( matrix(nnext)), nadd)
success2 = False
return success1 and success2
def check_sparse_A_optimal( sij, ntimes=10, delta=1e-1, tol=1e-5):
'''
'''
sij = matrix( sij)
K = sij.size[0]
nsofar = np.zeros( (K, K))
nadd = 1.
nopt = A_optimize( sij)
nij = sparse_A_optimal_network( sij, nadd, nsofar, 0, K, False)
success = True
deltan = sum_upper_triangle( abs(nopt - nij))/(0.5*K*(K+1))
if deltan > tol:
print 'FAIL: sparse optimization disagree with dense optimzation.'
print '| n - nopt | = %g > %g' % (deltan, tol)
success = False
else:
print 'SUCCESS: sparse optimization agrees with dense optimization.'
print '| n - nopt | = %g <= %g' % (deltan, tol)
n_measures = 8
connectivity = 2
nij = sparse_A_optimal_network( sij, nadd, nsofar, n_measures, connectivity,
True)
print nij
trC = np.trace( covariance( cvxopt.div( nij, sij**2)))
dtr = np.zeros( ntimes)
for t in xrange( ntimes):
zeta = matrix( 1. + 2*delta*(np.random.rand( K, K) - 0.5))
nijp = cvxopt.mul( nij, zeta)
nijp = 0.5*(nijp + nijp.trans()) # Symmetrize
s = sum_upper_triangle( nijp)
nijp *= nadd/s
trCp = np.trace( covariance( cvxopt.div( nijp, sij**2)))
dtr[t] = trCp - trC
success2 = np.all( dtr >= 0)
if not success2:
print 'FAIL: sparse optimization fail to minimize.'
print dtr
else:
print 'SUCCESS: sparse optimization minimizes.'
return success and success2
def check_relative_only_A_optimal( sij):
'''
'''
sij = matrix(sij)
K = sij.size[0]
for i in range(K): sij[i,i] = np.inf
nij = A_optimize( sij)
success = check_optimality( sij, nij)
if (not success):
print 'FAIL: A_optimize for relative-only measurements did not generate optimal.'
else:
print 'SUCCESS: A_optimize for relative-only measurements.'
return success
def check_hessian( dF, d2F, x0):
'''
Check the Hessian for correctness.
Returns:
err: float - the square root of the sum of squres of the difference
between finite difference approximation and the analytical results
at the point x0.
'''
from scipy.optimize import check_grad
N = len(x0)
esqr = 0.
for i in xrange( N):
def func( x):
return dF(x)[i]
def dfunc( x):
return d2F(x)[i,:]
e = check_grad( func, dfunc, x0)
esqr += e*e
return np.sqrt(esqr)
def fabricate_measurements( K=10, sigma=0.1, noerror=True, disconnect=False):
x0 = np.random.rand( K)
xij = np.zeros( (K, K))
invsij2 = 1/(sigma*sigma)*np.random.rand( K, K)
invsij2 = 0.5*(invsij2 + np.transpose( invsij2))
sij = np.sqrt( 1./invsij2)
if noerror: sij *= 0.
for i in xrange(K):
xij[i][i] = x0[i] + sij[i,i]*np.random.randn()
for j in xrange(i+1, K):
xij[i][j] = x0[i] - x0[j] + sij[i][j]*np.random.randn()
xij[j][i] = -xij[i][j]
if (disconnect >= 1):
# disconnect the origin and thus eliminate the individual measurements
for i in xrange(K): invsij2[i][i] = 0
if (disconnect >= 2):
# disconnect the network into the given number of disconnected
# components.
for i in xrange( K):
c1 = i % disconnect
for j in xrange( i+1, K):
c2 = j % disconnect
if (c1 != c2):
invsij2[i][j] = invsij2[j][i] = 0
return x0, xij, invsij2
def check_MLest( K=10, sigma=0.1, noerr=True, disconnect=False):
x0, xij, invsij2 = fabricate_measurements( K, sigma, noerr, disconnect)
if (not disconnect):
xML, vML = MLestimate( xij, invsij2)
else:
xML, vML = MLestimate( xij, invsij2,
np.concatenate( [x0[:disconnect+1],
[None]*(K-disconnect-1)]))
# Compute the RMSE between the input quantities and the estimation by ML.
return np.sqrt(np.sum(np.square(xML - x0))/K)
def test_covariance( K=5, nodiag=False, T=4000, tol=0.25):
sigma = 10.
x0 = 100*np.random.rand( K)
xij = np.zeros( (K, K))
sij = sigma*np.random.rand( K, K)
sij = 0.5*(sij + sij.T)
if nodiag:
for i in range(K): sij[i,i] = np.inf
xML = np.zeros( (K, T))
for t in range( T):
for i in range(K):
if not nodiag:
xij[i,i] = x0[i] + sij[i,i]*np.random.randn()
for j in range(i+1, K):
xij[i,j] = x0[i] - x0[j] + sij[i,j]*np.random.randn()
xij[j,i] = -xij[i,j]
xML[:, t], vML = MLestimate( xij, 1./sij**2, x0)
cov0 = np.cov( xML)
cov = covariance( 1/sij**2)
dx = x0 - np.mean( xML, axis=1)
if np.max( np.abs( dx)) > sigma/np.sqrt(T):
print 'WARNING: MLE deviates from reference by %g' % np.max(np.abs(dx))
success = True
dr = np.minimum( np.abs(cov - cov0), np.abs(cov/cov0 - 1.))
if np.max( np.abs( dr)) > tol:
print 'FAIL: covariance testing fails with relative deviation of %g' % np.max( np.abs( dr))
print 'covariance ='
print cov
print 'reference ='
print cov0
success = False
else:
print 'SUCCESS: covariance testing passed. Relative deviation < %g' % np.max( np.abs( dr))
return success
def unitTest( tol=1.e-4):
if (True):
K = 10
sij = np.random.rand( K, K)
sij = matrix( 0.5*(sij + sij.T))
# nij = A_optimize( sij)
nij = sparse_A_optimal_network( sij )
if (True):
sij = matrix( [[ 1.5, 0.1, 0.2, 0.5],
[ 0.1, 1.1, 0.3, 0.2],
[ 0.2, 0.3, 1.2, 0.1],
[ 0.5, 0.2, 0.1, 0.9]])
elif (False):
sij = np.ones( (4, 4), dtype=float)
sij += np.diag( 4.*np.ones( 4))
sij = matrix( sij)
else:
sij = matrix ( [[ 1., 0.1, 0.1 ],
[ 0.1, 1., 0.1 ],
[ 0.1, 0.1, 1.2 ]])
from scipy.optimize import check_grad
def F( x):
return lndetC( sij, x)[0]
def dF( x):
return np.array( lndetC( sij, x)[1])[0]
def d2F( x):
return np.array( lndetC( sij, x, True)[2])
K = sij.size[0]
x0 = np.random.rand( K*(K+1)/2)
err = check_grad( F, dF, x0)
print 'Gradient check for ln(det(C)) error=%g:' % err,
if (err < tol):
print 'Passed!'
else:
print 'Failed!'
err = check_hessian( dF, d2F, x0)
print 'Hessian check for ln(det(C)) error=%g:' % err,
if (err < tol):
print 'Passed!'
else:
print 'Failed!'
print 'Testing ML estimator'
for disconnect, label in [
(False, 'Full-rank'),
(1, 'No individual measurement'),
(2, '2-disconnected') ]:
err = check_MLest( K, disconnect=disconnect)
print '%s: RMSE( x0, xML) = %g' % (label, err),
if (err < tol):
print 'Passed!'
else:
print 'Failed!'
results = optimize( sij)
for o in [ 'D', 'A', 'E', 'Etree' ]:
nij = results[o]
C = covariance( cvxopt.div( nij, sij**2))
print '%s-optimality' % o
print 'n (sum=%g):' % sum_upper_triangle( nij)
print nij
D = np.log(linalg.det( C))
A = np.trace( C)
E = np.max(linalg.eig(C)[0]).real
print 'C: (ln(det(C))=%.4f; tr(C)=%.4f; max(eig(C))=%.4f)' % \
( D, A, E )
print C
if (check_optimality( sij, nij, o)):
print '%s-optimality check passed!' % o
else:
print '%s-optimality check failed!' % o
# Check iteration update
success = check_update_A_optimal( sij)
if success:
print 'Iterative update of A-optimal passed!'
# Check sparse A-optimal
if (check_sparse_A_optimal( sij)):
print 'Sparse A-optimal passed!'
# Check A-optimal when only relative measurements are included.
if (check_relative_only_A_optimal( sij)):
print 'Relative-only A-optimal passed!'
# Test covariance computation
if (test_covariance(5, T=4000)):
print 'Covariance computation passed!'
if (test_covariance(5, T=4000, nodiag=True)):
print 'Covariance with only relative values passed!'
if __name__ == '__main__':
unitTest()
A_opt.unit_test()
netbfe.unit_test()
| 32.733154
| 99
| 0.550395
|
438cf54e2fc22168cae622d5b518972257068740
| 459
|
py
|
Python
|
PythonCurso01/aula78CombinationsPermutationsProduct/exemplo03.py
|
AlissonAnjos21/Aprendendo
|
9454d9e53ef9fb8bc61bf481b6592164f5bf8695
|
[
"MIT"
] | null | null | null |
PythonCurso01/aula78CombinationsPermutationsProduct/exemplo03.py
|
AlissonAnjos21/Aprendendo
|
9454d9e53ef9fb8bc61bf481b6592164f5bf8695
|
[
"MIT"
] | null | null | null |
PythonCurso01/aula78CombinationsPermutationsProduct/exemplo03.py
|
AlissonAnjos21/Aprendendo
|
9454d9e53ef9fb8bc61bf481b6592164f5bf8695
|
[
"MIT"
] | null | null | null |
# Produto - A ordem importa, os valores únicos SÃO repetidos
from itertools import product
frutas = ['Melão', 'Morango', 'Melancia', 'Mamão', 'Manga']
# Na função product deve se informar o que quer combinar junto com a quantidade que ele deverá repetir os casos idênticos.
for conjunto in product(frutas, repeat=2):
# Percebe-se que neste caso ele repete o que já apareceu antes, e também mostra as combinações de casos idênticos.
print(conjunto)
| 41.727273
| 122
| 0.751634
|
1c4ffb7b67b38e42b7a5e7ef680cdc8bc8cdc604
| 4,376
|
py
|
Python
|
examples/basics/scene/volume.py
|
lcampagn/vispy
|
28c25d6904d697cde9bb4c37909bc3f934621134
|
[
"BSD-3-Clause"
] | 1
|
2015-12-03T02:03:50.000Z
|
2015-12-03T02:03:50.000Z
|
examples/basics/scene/volume.py
|
lcampagn/vispy
|
28c25d6904d697cde9bb4c37909bc3f934621134
|
[
"BSD-3-Clause"
] | 19
|
2015-06-16T14:33:22.000Z
|
2015-07-27T21:18:15.000Z
|
examples/basics/scene/volume.py
|
astrofrog/vispy
|
fa5e2eab9bb3d956f87ae68a56e342913e58a305
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# vispy: gallery 2
"""
Example volume rendering
Controls:
* 1 - toggle camera between first person (fly), regular 3D (turntable) and
arcball
* 2 - toggle between volume rendering methods
* 3 - toggle between stent-CT / brain-MRI image
* 4 - toggle between colormaps
* 0 - reset cameras
* [] - decrease/increase isosurface threshold
With fly camera:
* WASD or arrow keys - move around
* SPACE - brake
* FC - move up-down
* IJKL or mouse - look around
"""
from itertools import cycle
import numpy as np
from vispy import app, scene, io
from vispy.color import get_colormaps, BaseColormap
# Read volume
vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
vol2 = np.load(io.load_data_file('brain/mri.npz'))['data']
vol2 = np.flipud(np.rollaxis(vol2, 1))
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set whether we are emulating a 3D texture
emulate_texture = False
# Create the volume visuals, only one is visible
volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))
volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.2,
emulate_texture=emulate_texture)
volume2.visible = False
# Create two cameras (1 for firstperson, 3 for 3d person)
fov = 60.
cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly')
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
name='Turntable')
cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball')
view.camera = cam2 # Select turntable at first
# create colormaps that work well for translucent and additive volume rendering
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
vec4 translucent_grays(float t) {
return vec4(t, t, t, t*0.05);
}
"""
# Setup colormap iterators
opaque_cmaps = cycle(get_colormaps())
translucent_cmaps = cycle([TransFire(), TransGrays()])
opaque_cmap = next(opaque_cmaps)
translucent_cmap = next(translucent_cmaps)
# Implement key presses
@canvas.events.key_press.connect
def on_key_press(event):
global opaque_cmap, translucent_cmap
if event.text == '1':
cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1}
view.camera = cam_toggle.get(view.camera, cam2)
print(view.camera.name + ' camera')
elif event.text == '2':
methods = ['mip', 'translucent', 'iso', 'additive']
method = methods[(methods.index(volume1.method) + 1) % 4]
print("Volume render method: %s" % method)
cmap = opaque_cmap if method in ['mip', 'iso'] else translucent_cmap
volume1.method = method
volume1.cmap = cmap
volume2.method = method
volume2.cmap = cmap
elif event.text == '3':
volume1.visible = not volume1.visible
volume2.visible = not volume1.visible
elif event.text == '4':
if volume1.method in ['mip', 'iso']:
cmap = opaque_cmap = next(opaque_cmaps)
else:
cmap = translucent_cmap = next(translucent_cmaps)
volume1.cmap = cmap
volume2.cmap = cmap
elif event.text == '0':
cam1.set_range()
cam3.set_range()
elif event.text != '' and event.text in '[]':
s = -0.025 if event.text == '[' else 0.025
volume1.threshold += s
volume2.threshold += s
th = volume1.threshold if volume1.visible else volume2.threshold
print("Isosurface threshold: %0.3f" % th)
# for testing performance
#@canvas.connect
#def on_draw(ev):
#canvas.update()
if __name__ == '__main__':
print(__doc__)
app.run()
| 31.941606
| 79
| 0.638483
|
8fe78b91e03f34d072d5c648698affb266789724
| 447
|
py
|
Python
|
tests/filehandling.py
|
mberz/spharpy
|
e74c30c297dd9ad887e7345c836a515daa6f21f4
|
[
"MIT"
] | null | null | null |
tests/filehandling.py
|
mberz/spharpy
|
e74c30c297dd9ad887e7345c836a515daa6f21f4
|
[
"MIT"
] | null | null | null |
tests/filehandling.py
|
mberz/spharpy
|
e74c30c297dd9ad887e7345c836a515daa6f21f4
|
[
"MIT"
] | null | null | null |
"""
File handling helper functions
"""
import numpy as np
from scipy.io import loadmat
def read_2d_matrix_from_csv(filename, dtype='double'):
"""
Read 2d matrix from csv file
"""
matrix = np.genfromtxt(open(filename, "rb"), delimiter=",", dtype=dtype)
return matrix
def read_matrix_from_mat(filename):
"""
Read matrix from .mat file as numpy ndarray
"""
matrix = loadmat(filename)['matrix']
return matrix
| 22.35
| 76
| 0.675615
|
f17abfe1310afdb7acc27daeaa23097c9264a0e7
| 1,174
|
py
|
Python
|
riaps-x86runtime/env_setup_tests/WeatherMonitor/TempSensor.py
|
timkrentz/riaps-integration
|
22ceacb3043af3c726a31ff3ea337337e619377e
|
[
"Apache-2.0"
] | 7
|
2019-01-10T13:12:41.000Z
|
2021-06-12T12:25:20.000Z
|
riaps-x86runtime/env_setup_tests/WeatherMonitor/TempSensor.py
|
timkrentz/riaps-integration
|
22ceacb3043af3c726a31ff3ea337337e619377e
|
[
"Apache-2.0"
] | 10
|
2018-11-12T12:42:26.000Z
|
2022-03-11T07:25:06.000Z
|
riaps-x86runtime/env_setup_tests/WeatherMonitor/TempSensor.py
|
timkrentz/riaps-integration
|
22ceacb3043af3c726a31ff3ea337337e619377e
|
[
"Apache-2.0"
] | 4
|
2020-05-08T04:56:50.000Z
|
2021-04-29T20:07:17.000Z
|
'''
Created on Jan 25, 2017
@author: metelko
'''
# riaps:keep_import:begin
from riaps.run.comp import Component
import logging
import time
import os
# riaps:keep_import:end
class TempSensor(Component):
# riaps:keep_constr:begin
def __init__(self):
super(TempSensor, self).__init__()
self.pid = os.getpid()
self.temperature = 65
now = time.ctime(int(time.time()))
self.logger.info("(PID %s)-starting TempSensor, %s" % (str(self.pid),str(now)))
self.logger.info("Initial temp:%d, %s" % (self.temperature,str(now)))
# riaps:keep_constr:end
# riaps:keep_clock:begin
def on_clock(self):
now = time.ctime(int(time.time()))
msg = self.clock.recv_pyobj()
self.temperature = self.temperature + 1
msg = str(self.temperature)
msg = (now,msg)
self.logger.info("on_clock(): Temperature - %s, PID %s, %s" % (str(msg[1]),str(self.pid),str(now)))
self.ready.send_pyobj(msg)
# riaps:keep_clock:end
# riaps:keep_impl:begin
def __destroy__(self):
now = time.time()
self.logger.info("%s - stopping TempSensor, %s" % (str(self.pid),now))
# riaps:keep_impl:end
| 30.102564
| 107
| 0.638842
|
2322b927eabeb501146922146ef8d8333a262d77
| 4,671
|
py
|
Python
|
mainproject/settings.py
|
sandeepnaik9/ReportGenerator
|
77259fb5ce682b6952f3dd60ad18d21417d25650
|
[
"MIT"
] | null | null | null |
mainproject/settings.py
|
sandeepnaik9/ReportGenerator
|
77259fb5ce682b6952f3dd60ad18d21417d25650
|
[
"MIT"
] | null | null | null |
mainproject/settings.py
|
sandeepnaik9/ReportGenerator
|
77259fb5ce682b6952f3dd60ad18d21417d25650
|
[
"MIT"
] | null | null | null |
"""
Django settings for mainproject project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-c2g*0^a26p$t6apyha@ncd8+%7_7u(k_y*k-_)7_-z=%t9@a0o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'calc.apps.CalcConfig',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'calc.middlewares.OnSessionPerUser',
]
ROOT_URLCONF = 'mainproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mainproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
STATIC_ROOT = os.path.join(BASE_DIR,'assets')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SITE_ID = 1
LOGIN_REDIRECT_URL = "/Profile"
ACCOUNT_LOGOUT_REDIRECT_URL = '/'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.associate_by_email',
'posts.views.update_user_social_data',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'sandeepnaik9900@gmail.com'
EMAIL_HOST_PASSWORD = 'godisthere.jaisrirama1'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
| 24.584211
| 91
| 0.702419
|
b6f9a7555a3cb34d70289fd5f2bc90d47d58d7c4
| 216
|
py
|
Python
|
app/quotes/models.py
|
agile-fan430/RestAPI_Django_AlphaVantage
|
a6129f9124f22d9e1034f6507106fe9355466e4a
|
[
"MIT"
] | null | null | null |
app/quotes/models.py
|
agile-fan430/RestAPI_Django_AlphaVantage
|
a6129f9124f22d9e1034f6507106fe9355466e4a
|
[
"MIT"
] | null | null | null |
app/quotes/models.py
|
agile-fan430/RestAPI_Django_AlphaVantage
|
a6129f9124f22d9e1034f6507106fe9355466e4a
|
[
"MIT"
] | null | null | null |
from django.db import models
class BTCPrice(models.Model):
price = models.FloatField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| 30.857143
| 56
| 0.777778
|
576b632ae6ca3512d50b230e4821d8cdec6eeb83
| 103
|
py
|
Python
|
yt_dlp/version.py
|
Tellybots/yt-dlp
|
b21553d1e9be4a09a5aec43ef16e758c45be799b
|
[
"Unlicense"
] | null | null | null |
yt_dlp/version.py
|
Tellybots/yt-dlp
|
b21553d1e9be4a09a5aec43ef16e758c45be799b
|
[
"Unlicense"
] | null | null | null |
yt_dlp/version.py
|
Tellybots/yt-dlp
|
b21553d1e9be4a09a5aec43ef16e758c45be799b
|
[
"Unlicense"
] | 1
|
2022-02-21T05:27:35.000Z
|
2022-02-21T05:27:35.000Z
|
# Autogenerated by devscripts/update-version.py
__version__ = '1.2.3'
RELEASE_GIT_HEAD = 'c1653e9ef'
| 17.166667
| 47
| 0.76699
|
96e480f9ad0c5bce56239ddce849c054bc299404
| 815
|
py
|
Python
|
092_SquareDigitChains.py
|
joetache4/project-euler
|
82f9e25b414929d9f62d94905906ba2f57db7935
|
[
"MIT"
] | null | null | null |
092_SquareDigitChains.py
|
joetache4/project-euler
|
82f9e25b414929d9f62d94905906ba2f57db7935
|
[
"MIT"
] | null | null | null |
092_SquareDigitChains.py
|
joetache4/project-euler
|
82f9e25b414929d9f62d94905906ba2f57db7935
|
[
"MIT"
] | null | null | null |
"""
A number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before.
For example,
44 → 32 → 13 → 10 → 1 → 1
85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89
Therefore any chain that arrives at 1 or 89 will become stuck in an endless loop. What is most amazing is that EVERY starting number will eventually arrive at 1 or 89.
How many starting numbers below ten million will arrive at 89?
ans: 8581146
"""
mem = dict()
def test(n):
v = []
while True:
mem[n] = v
n = sum(( int(d)**2 for d in str(n) ))
if n in mem and len(mem[n]) > 0:
v.append(mem[n][0])
break
if n == 1:
v.append(0)
break
if n == 89:
v.append(1)
break
for n in range(1, 10**7):
test(n)
print(sum(( v[0] for v in mem.values() )))
| 20.375
| 167
| 0.625767
|
6679e83998654748d7308f35451cb7ae58e28399
| 7,642
|
py
|
Python
|
tasks/probStim.py
|
djmhunt/TTpy
|
0f0997314bf0f54831494b2ef1a64f1bff95c097
|
[
"MIT"
] | null | null | null |
tasks/probStim.py
|
djmhunt/TTpy
|
0f0997314bf0f54831494b2ef1a64f1bff95c097
|
[
"MIT"
] | 4
|
2020-04-19T11:43:41.000Z
|
2020-07-21T09:57:51.000Z
|
tasks/probStim.py
|
djmhunt/TTpy
|
0f0997314bf0f54831494b2ef1a64f1bff95c097
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
:Author: Dominic Hunt
"""
import numpy as np
from numpy import nan
from numpy import float as npfloat
from tasks.taskTemplate import Task
from model.modelTemplate import Stimulus, Rewards
# TODO: Create a set of test cues
cueSets = {"Test": []}
defaultCues = cueSets["Test"]
actualityLists = {}
class Probstim(Task):
"""
Basic probabilistic
Many methods are inherited from the tasks.taskTemplate.Task class.
Refer to its documentation for missing methods.
Attributes
----------
Name : string
The name of the class used when recording what has been used.
Parameters
----------
actualities: int, optional
The actual reality the cues pointed to. The correct response the participant is trying to get correct
cues: array of floats, optional
The cues used to guess the actualities
trialsteps: int, optional
If no provided cues, it is the number of trialsteps for the generated set of cues. Default ``100``
numStimuli: int, optional
If no provided cues, it is the number of distinct stimuli for the generated set of cues. Default ``4``
correctProb: float in [0,1], optional
If no actualities provided, it is the probability of the correct answer being answer 1 rather than answer 0.
The default is ``0.8``
correctProbs: list or array of floats in [0,1], optional
If no actualities provided, it is the probability of the correct answer being answer 1 rather than answer 0 for
each of the different stimuli. Default ``[corrProb, 1-corrProb] * (numStimuli//2) + [corrProb] * (numStimuli%2)``
rewardlessT: int, optional
If no actualities provided, it is the number of actualities at the end of the tasks that will have a
``None`` reward. Default ``2*numStimuli``
"""
def __init__(self,
cues=None,
actualities=None,
trialsteps=100,
numStimuli=4,
correctProb=0.8,
correctProbabilities=None,
rewardlessT=None):
super(Probstim, self).__init__()
if isinstance(cues, str):
if cues in cueSets:
self.cues = cueSets[cues]
self.T = len(self.cues)
numStimuli = len(self.cues[0])
else:
raise Exception("Unknown cue sets")
elif isinstance(cues, (list, np.ndarray)):
self.cues = cues
self.T = len(self.cues)
numStimuli = len(self.cues[0])
else:
self.T = trialsteps
numStimuli = numStimuli
stimuli = np.zeros((self.T, numStimuli))
stimuli[list(range(self.T)), np.random.randint(numStimuli, size=self.T)] = 1
self.cues = stimuli
if isinstance(actualities, str):
if actualities in actualityLists:
self.actualities = actualityLists[actualities]
rewardlessT = np.sum(np.isnan(np.array(self.actualities, dtype=npfloat)))
else:
raise Exception("Unknown actualities list")
elif isinstance(actualities, (list, np.ndarray)):
self.actualities = actualities
rewardlessT = np.sum(np.isnan(np.array(actualities, dtype=npfloat)))
else:
corrProbDefault = [correctProb, 1-correctProb] * (numStimuli // 2) + [correctProb] * (numStimuli % 2)
if not correctProbabilities:
correctProbabilities = corrProbDefault
if not rewardlessT:
rewardlessT = 2 * numStimuli
corrChoiceProb = np.sum(self.cues * correctProbabilities, 1)
correctChoice = list((np.random.rand(self.T) < corrChoiceProb) * 1)
correctChoice[-rewardlessT:] = [nan] * rewardlessT
self.actualities = correctChoice
self.parameters["Actualities"] = np.array(self.actualities)
self.parameters["Cues"] = np.array(self.cues)
self.parameters["numtrialsteps"] = self.T
self.parameters["numRewardless"] = rewardlessT
self.parameters["number_cues"] = numStimuli
# Set draw count
self.t = -1
self.action = None
# Recording variables
self.recAction = [-1] * self.T
def __next__(self):
"""
Produces the next stimulus for the iterator
Returns
-------
stimulus : Tuple
The current cues
nextValidActions : Tuple of ints or ``None``
The list of valid actions that the model can respond with. Set to (0,1), as they never vary.
Raises
------
StopIteration
"""
self.t += 1
if self.t == self.T:
raise StopIteration
nextStim = self.cues[self.t]
nextValidActions = (0, 1)
return nextStim, nextValidActions
def receiveAction(self, action):
"""
Receives the next action from the participant
Parameters
----------
action : int or string
The action taken by the model
"""
self.action = action
def feedback(self):
"""
Feedback to the action from the participant
"""
response = self.actualities[self.t]
self.storeState()
return response
def proceed(self):
"""
Updates the task after feedback
"""
pass
def returnTaskState(self):
"""
Returns all the relevant data for this task run
Returns
-------
results : dictionary
A dictionary containing the class parameters as well as the other useful data
"""
results = self.standardResultOutput()
results["Actions"] = self.recAction
return results
def storeState(self):
""" Stores the state of all the important variables so that they can be
output later """
self.recAction[self.t] = self.action
class StimulusProbStimDirect(Stimulus):
"""
Processes the stimuli for models expecting just the event
"""
def processStimulus(self, observation):
"""
Processes the decks stimuli for models expecting just the event
Returns
-------
stimuliPresent : int or list of int
The elements present of the stimulus
stimuliActivity : float or list of float
The activity of each of the elements
"""
return observation, observation
class RewardProbStimDiff(Rewards):
"""
Processes the reward for models expecting reward corrections
"""
def processFeedback(self, feedback, lastAction, stimuli):
"""
Returns
-------
modelFeedback:
"""
if feedback == lastAction:
return 1
else:
return 0
class RewardProbStimDualCorrection(Rewards):
"""
Processes the reward for models expecting the reward correction
from two possible actions.
"""
epsilon = 1
def processFeedback(self, feedback, lastAction, stimuli):
"""
Returns
-------
modelFeedback:
"""
rewardProc = np.zeros((2, len(stimuli))) + self.epsilon
rewardProc[feedback, stimuli] = 1
return np.array(rewardProc)
| 29.851563
| 122
| 0.573279
|
cdb9690806b74f8a4d1808ff00d887d638b0f2d6
| 123
|
gyp
|
Python
|
src/BinaryScale/binding.gyp
|
HalZhan/imager
|
90bd14604c87a980a1a7734f3df180571847b57e
|
[
"MIT"
] | null | null | null |
src/BinaryScale/binding.gyp
|
HalZhan/imager
|
90bd14604c87a980a1a7734f3df180571847b57e
|
[
"MIT"
] | null | null | null |
src/BinaryScale/binding.gyp
|
HalZhan/imager
|
90bd14604c87a980a1a7734f3df180571847b57e
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "imager",
"sources": [ "binaryScale.cpp", "../dib/cdib.cpp" ]
}
]
}
| 15.375
| 57
| 0.447154
|
a4f5feb11046db9236014d6f6033077e54974f4d
| 5,114
|
py
|
Python
|
ggpy/cruft/autocode/DeORer.py
|
hobson/ggpy
|
4e6e6e876c3a4294cd711647051da2d9c1836b60
|
[
"MIT"
] | 1
|
2015-01-26T19:07:45.000Z
|
2015-01-26T19:07:45.000Z
|
ggpy/cruft/autocode/DeORer.py
|
hobson/ggpy
|
4e6e6e876c3a4294cd711647051da2d9c1836b60
|
[
"MIT"
] | null | null | null |
ggpy/cruft/autocode/DeORer.py
|
hobson/ggpy
|
4e6e6e876c3a4294cd711647051da2d9c1836b60
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
""" generated source for module DeORer """
# package: org.ggp.base.util.gdl.transforms
import java.util.ArrayList
import java.util.List
import org.ggp.base.util.gdl.grammar.Gdl
import org.ggp.base.util.gdl.grammar.GdlConstant
import org.ggp.base.util.gdl.grammar.GdlDistinct
import org.ggp.base.util.gdl.grammar.GdlFunction
import org.ggp.base.util.gdl.grammar.GdlLiteral
import org.ggp.base.util.gdl.grammar.GdlNot
import org.ggp.base.util.gdl.grammar.GdlOr
import org.ggp.base.util.gdl.grammar.GdlPool
import org.ggp.base.util.gdl.grammar.GdlProposition
import org.ggp.base.util.gdl.grammar.GdlRelation
import org.ggp.base.util.gdl.grammar.GdlRule
import org.ggp.base.util.gdl.grammar.GdlVariable
#
# * As a GDL transformer, this class takes in a GDL description of a game,
# * transforms it in some way, and outputs a new GDL descriptions of a game
# * which is functionally equivalent to the original game.
# *
# * DeORer removes OR rules from the GDL. Technically, these rules shouldn't
# * be in the GDL in the first place, but it's very straightforward to remove
# * them, so we do that so that we can handle GDL descriptions that use OR.
# *
# * @author Ethan Dreyfuss
#
class DeORer(object):
""" generated source for class DeORer """
@classmethod
def run(cls, description):
""" generated source for method run """
newDesc = ArrayList()
for gdl in description:
if isinstance(gdl, (GdlRule, )):
for body in newBodies:
newDesc.add(GdlPool.getRule(rule.getHead(), body))
else:
newDesc.add(gdl)
return newDesc
@classmethod
def deOr(cls, rhs):
""" generated source for method deOr """
wrapped = ArrayList()
wrapped.add(rhs)
return deOr2(wrapped)
@classmethod
def deOr2(cls, rhsList):
""" generated source for method deOr2 """
rval = ArrayList()
expandedSomething = False
for rhs in rhsList:
if not expandedSomething:
for lit in rhs:
if not expandedSomething:
if len(expandedList) > 1:
for replacement in expandedList:
if not (isinstance(replacement, (GdlLiteral, ))):
raise RuntimeException("Top level return value is different type of gdl.")
newRhs.set(i, newLit)
rval.add(newRhs)
expandedSomething = True
break
i += 1
if not expandedSomething:
rval.add(rhs)
else:
rval.add(rhs)
# If I've already expanded this function call
if not expandedSomething:
return rhsList
else:
return cls.deOr2(rval)
@classmethod
def expandFirstOr(cls, gdl):
""" generated source for method expandFirstOr """
rval = List()
expandedChild = List()
if isinstance(gdl, (GdlDistinct, )):
# Can safely be ignored, won't contain 'or'
rval = ArrayList()
rval.add(gdl)
return rval
elif isinstance(gdl, (GdlNot, )):
expandedChild = cls.expandFirstOr(not_.getBody())
rval = ArrayList()
for g in expandedChild:
if not (isinstance(g, (GdlLiteral, ))):
raise RuntimeException("Not must have literal child.")
rval.add(GdlPool.getNot(lit))
return rval
elif isinstance(gdl, (GdlOr, )):
rval = ArrayList()
while i < or_.arity():
rval.add(or_.get(i))
i += 1
return rval
elif isinstance(gdl, (GdlProposition, )):
# Can safely be ignored, won't contain 'or'
rval = ArrayList()
rval.add(gdl)
return rval
elif isinstance(gdl, (GdlRelation, )):
# Can safely be ignored, won't contain 'or'
rval = ArrayList()
rval.add(gdl)
return rval
elif isinstance(gdl, (GdlRule, )):
raise RuntimeException("This should be used to remove 'or's from the body of a rule, and rules can't be nested")
elif isinstance(gdl, (GdlConstant, )):
# Can safely be ignored, won't contain 'or'
rval = ArrayList()
rval.add(gdl)
return rval
elif isinstance(gdl, (GdlFunction, )):
# Can safely be ignored, won't contain 'or'
rval = ArrayList()
rval.add(gdl)
return rval
elif isinstance(gdl, (GdlVariable, )):
# Can safely be ignored, won't contain 'or'
rval = ArrayList()
rval.add(gdl)
return rval
else:
raise RuntimeException("Uh oh, gdl hierarchy must have been extended without updating this code.")
| 35.027397
| 124
| 0.566875
|
572bf22c4c0380aa2359b6dbe7c869262e5ca136
| 17,939
|
py
|
Python
|
speech_model/SpeechModel251.py
|
GangTimes/ChineseASR
|
2678055ca90da2178193e2153f0adbc1d8966ed6
|
[
"Apache-2.0"
] | 4
|
2018-12-28T07:34:56.000Z
|
2020-05-10T03:59:18.000Z
|
speech_model/SpeechModel251.py
|
GangTimes/ChineseASR
|
2678055ca90da2178193e2153f0adbc1d8966ed6
|
[
"Apache-2.0"
] | null | null | null |
speech_model/SpeechModel251.py
|
GangTimes/ChineseASR
|
2678055ca90da2178193e2153f0adbc1d8966ed6
|
[
"Apache-2.0"
] | 2
|
2019-05-20T07:16:17.000Z
|
2020-09-25T13:35:09.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: nl8590687
"""
import platform as plat
import os
import time
from general_function.file_wav import *
from general_function.file_dict import *
from general_function.gen_func import *
# LSTM_CNN
import keras as kr
import numpy as np
import random
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Reshape, BatchNormalization # , Flatten
from keras.layers import Lambda, TimeDistributed, Activation,Conv2D, MaxPooling2D #, Merge
from keras import backend as K
from keras.optimizers import SGD, Adadelta, Adam
from readdata24 import DataSpeech
from Base import Config
abspath = ''
ModelName='251'
#NUM_GPU = 2
class ModelSpeech(): # 语音模型类
def __init__(self):
'''
初始化
尽管我已经在字典中加入了空白块,但是仍然不行 猜想空白块会自动在CTC中加入,所以类别标签要多给1
'''
datapath=Config.data_dir
MS_OUTPUT_SIZE = 1472
self.MS_OUTPUT_SIZE = MS_OUTPUT_SIZE # 神经网络最终输出的每一个字符向量维度的大小
#self.BATCH_SIZE = BATCH_SIZE # 一次训练的batch
self.label_max_string_length = 64
self.AUDIO_LENGTH = 1600
self.AUDIO_FEATURE_LENGTH = 200
self._model, self.base_model = self.CreateModel()
self.datapath = datapath
self.slash = ''
system_type = plat.system() # 由于不同的系统的文件路径表示不一样,需要进行判断
if(system_type == 'Windows'):
self.slash='\\' # 反斜杠
elif(system_type == 'Linux'):
self.slash='/' # 正斜杠
else:
print('*[Message] Unknown System\n')
self.slash='/' # 正斜杠
if(self.slash != self.datapath[-1]): # 在目录路径末尾增加斜杠
self.datapath = self.datapath + self.slash
def CreateModel(self):
'''
定义CNN/LSTM/CTC模型,使用函数式模型
输入层:200维的特征值序列,一条语音数据的最大长度设为1600(大约16s)
隐藏层:卷积池化层,卷积核大小为3x3,池化窗口大小为2
隐藏层:全连接层
输出层:全连接层,神经元数量为self.MS_OUTPUT_SIZE,使用softmax作为激活函数,
CTC层:使用CTC的loss作为损失函数,实现连接性时序多输出
'''
input_data = Input(name='the_input', shape=(self.AUDIO_LENGTH, self.AUDIO_FEATURE_LENGTH, 1))
layer_h1 = Conv2D(32, (3,3), use_bias=False, activation='relu', padding='same', kernel_initializer='he_normal')(input_data) # 卷积层
#layer_h1 = Dropout(0.05)(layer_h1)
layer_h1=BatchNormalization()(layer_h1)
layer_h2 = Conv2D(32, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h1) # 卷积层
layer_h3 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h2) # 池化层
#layer_h3 = Dropout(0.05)(layer_h3)
layer_h3=BatchNormalization()(layer_h3)
layer_h4 = Conv2D(64, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h3) # 卷积层
#layer_h4 = Dropout(0.1)(layer_h4)
layer_h4=BatchNormalization()(layer_h4)
layer_h5 = Conv2D(64, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h4) # 卷积层
layer_h6 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h5) # 池化层
#layer_h6 = Dropout(0.1)(layer_h6)
layer_h6=BatchNormalization()(layer_h6)
layer_h7 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h6) # 卷积层
#layer_h7 = Dropout(0.15)(layer_h7)
layer_h7=BatchNormalization()(layer_h7)
layer_h8 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h7) # 卷积层
layer_h9 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8) # 池化层
#layer_h9 = Dropout(0.15)(layer_h9)
layer_h9=BatchNormalization()(layer_h9)
layer_h10 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h9) # 卷积层
#layer_h10 = Dropout(0.2)(layer_h10)
layer_h10=BatchNormalization()(layer_h10)
layer_h11 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h10) # 卷积层
layer_h12 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h11) # 池化层
#layer_h12 = Dropout(0.2)(layer_h12)
layer_h12=BatchNormalization()(layer_h12)
layer_h13 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h12) # 卷积层
#layer_h13 = Dropout(0.2)(layer_h13)
layer_h13=BatchNormalization()(layer_h13)
layer_h14 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h13) # 卷积层
layer_h15 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h14) # 池化层
#test=Model(inputs = input_data, outputs = layer_h12)
#test.summary()
layer_h16 = Reshape((200, 3200))(layer_h15) #Reshape层
#layer_h5 = LSTM(256, activation='relu', use_bias=True, return_sequences=True)(layer_h4) # LSTM层
layer_h16 = Dropout(0.3)(layer_h16)
layer_h16=BatchNormalization()(layer_h16)
layer_h17 = Dense(128, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_h16) # 全连接层
#layer_h17 = Dropout(0.3)(layer_h17)
layer_h17=BatchNormalization()(layer_h17)
layer_h18 = Dense(self.MS_OUTPUT_SIZE, use_bias=True, kernel_initializer='he_normal')(layer_h17) # 全连接层
y_pred = Activation('softmax', name='Activation0')(layer_h18)
model_data = Model(inputs = input_data, outputs = y_pred)
#model_data.summary()
labels = Input(name='the_labels', shape=[self.label_max_string_length], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
#layer_out = Lambda(ctc_lambda_func,output_shape=(self.MS_OUTPUT_SIZE, ), name='ctc')([y_pred, labels, input_length, label_length])#(layer_h6) # CTC
print(y_pred,labels)
loss_out = Lambda(self.ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
model.summary()
# clipnorm seems to speeds up convergence
#sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
#opt = Adadelta(lr = 0.01, rho = 0.95, epsilon = 1e-06)
opt = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, decay = 0.0, epsilon = 10e-8)
#model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer = opt)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
#print('[*提示] 创建模型成功,模型编译成功')
print('[*Info] Create Model Successful, Compiles Model Successful. ')
return model, model_data
def ctc_lambda_func(self, args):
y_pred, labels, input_length, label_length = args
y_pred = y_pred[:, :, :]
#y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def TrainModel(self, epoch = 2, save_step = 1000, batch_size = 32, filename = abspath + 'model_speech/m' + ModelName + '/speech_model'+ModelName):
'''
训练模型
参数:
datapath: 数据保存的路径
epoch: 迭代轮数
save_step: 每多少步保存一次模型
filename: 默认保存文件名,不含文件后缀名
'''
data=DataSpeech( 'train')
num_data = data.GetDataNum() # 获取数据的数量
yielddatas = data.data_genetator(batch_size, self.AUDIO_LENGTH)
for epoch in range(epoch): # 迭代轮数
print('[running] train epoch %d .' % epoch)
n_step = 0 # 迭代数据数
while True:
try:
print('[message] epoch %d . Have train datas %d+'%(epoch, n_step*save_step))
# data_genetator是一个生成器函数
#self._model.fit_generator(yielddatas, save_step, nb_worker=2)
self._model.fit_generator(yielddatas, save_step)
n_step += 1
except StopIteration:
print('[error] generator error. please check data format.')
break
self.SaveModel(comment='_e_'+str(epoch)+'_step_'+str(n_step * save_step))
self.TestModel( str_dataset='train', data_count = 4)
self.TestModel( str_dataset='dev', data_count = 4)
def LoadModel(self,filename = abspath + 'model_speech/m'+ModelName+'/speech_model'+ModelName+'.model'):
'''
加载模型参数
'''
self._model.load_weights(filename)
self.base_model.load_weights(filename + '.base')
def SaveModel(self,filename = abspath + 'model_speech/m'+ModelName+'/speech_model'+ModelName,comment=''):
'''
保存模型参数
'''
self._model.save_weights(filename+comment+'.model')
self.base_model.save_weights(filename + comment + '.model.base')
f = open('step'+ModelName+'.txt','w')
f.write(filename+comment)
f.close()
def TestModel(self, str_dataset='dev', data_count = 32, out_report = False, show_ratio = True, io_step_print = 10, io_step_file = 10):
'''
测试检验模型效果
io_step_print
为了减少测试时标准输出的io开销,可以通过调整这个参数来实现
io_step_file
为了减少测试时文件读写的io开销,可以通过调整这个参数来实现
'''
data=DataSpeech( str_dataset)
#data.LoadDataList(str_dataset)
num_data = data.GetDataNum() # 获取数据的数量
if(data_count <= 0 or data_count > num_data): # 当data_count为小于等于0或者大于测试数据量的值时,则使用全部数据来测试
data_count = num_data
try:
ran_num = random.randint(0,num_data - 1) # 获取一个随机数
words_num = 0
word_error_num = 0
nowtime = time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
if(out_report == True):
txt_obj = open('Test_Report_' + str_dataset + '_' + nowtime + '.txt', 'w', encoding='UTF-8') # 打开文件并读入
txt = '测试报告\n模型编号 ' + ModelName + '\n\n'
for i in range(data_count):
data_input, data_labels = data.GetData((ran_num + i) % num_data) # 从随机数开始连续向后取一定数量数据
# 数据格式出错处理 开始
# 当输入的wav文件长度过长时自动跳过该文件,转而使用下一个wav文件来运行
num_bias = 0
while(data_input.shape[0] > self.AUDIO_LENGTH):
print('*[Error]','wave data lenghth of num',(ran_num + i) % num_data, 'is too long.','\n A Exception raise when test Speech Model.')
num_bias += 1
data_input, data_labels = data.GetData((ran_num + i + num_bias) % num_data) # 从随机数开始连续向后取一定数量数据
# 数据格式出错处理 结束
pre = self.Predict(data_input, data_input.shape[0] // 8)
words_n = data_labels.shape[0] # 获取每个句子的字数
words_num += words_n # 把句子的总字数加上
edit_distance = GetEditDistance(data_labels, pre) # 获取编辑距离
if(edit_distance <= words_n): # 当编辑距离小于等于句子字数时
word_error_num += edit_distance # 使用编辑距离作为错误字数
else: # 否则肯定是增加了一堆乱七八糟的奇奇怪怪的字
word_error_num += words_n # 就直接加句子本来的总字数就好了
if((i % io_step_print == 0 or i == data_count - 1) and show_ratio == True):
#print('测试进度:',i,'/',data_count)
print('Test Count: ',i,'/',data_count)
if(out_report == True):
if(i % io_step_file == 0 or i == data_count - 1):
txt_obj.write(txt)
txt = ''
pys=[data.id2py[idx] for idx in data_labels]
pres=[data.id2py[idx] for idx in pre]
pys=' '.join(pys)
pres=' '.join(pres)
txt += str(i) + '\n'
txt += 'True:\t' +pys + '\n'
txt += 'Pred:\t' +pres+ '\n'
txt += '\n'
#print('*[测试结果] 语音识别 ' + str_dataset + ' 集语音单字错误率:', word_error_num / words_num * 100, '%')
print('*[Test Result] Speech Recognition ' + str_dataset + ' set word error ratio: ', word_error_num / words_num * 100, '%')
if(out_report == True):
txt += '*[测试结果] 语音识别 ' + str_dataset + ' 集语音单字错误率: ' + str(word_error_num / words_num * 100) + ' %'
txt_obj.write(txt)'
txt = ''
txt_obj.close()
except StopIteration:
print('[Error] Model Test Error. please check data format.')
def Predict(self, data_input, input_len):
'''
预测结果
返回语音识别后的拼音符号列表
'''
batch_size = 1
in_len = np.zeros((batch_size),dtype = np.int32)
in_len[0] = input_len
x_in = np.zeros((batch_size, 1600, self.AUDIO_FEATURE_LENGTH, 1), dtype=np.float)
for i in range(batch_size):
x_in[i,0:len(data_input)] = data_input
base_pred = self.base_model.predict(x = x_in)
#print('base_pred:\n', base_pred)
#y_p = base_pred
#for j in range(200):
# mean = np.sum(y_p[0][j]) / y_p[base_model0][j].shape[0]
# print('max y_p:',np.max(y_p[0][j]),'min y_p:',np.min(y_p[0][j]),'mean y_p:',mean,'mid y_p:',y_p[0][j][100])
# print('argmin:',np.argmin(y_p[0][j]),'argmax:',np.argmax(y_p[0][j]))
# count=0
# for i in range(y_p[0][j].shape[0]):
# if(y_p[0][j][i] < mean):
# count += 1
# print('count:',count)
base_pred =base_pred[:, :, :]
#base_pred =base_pred[:, 2:, :]
r = K.ctc_decode(base_pred, in_len, greedy = True, beam_width=100, top_paths=1)
#print('r', r)
r1 = K.get_value(r[0][0])
#print('r1', r1)
#r2 = K.get_value(r[1])
#print(r2)
r1=r1[0]
return r1
pass
def RecognizeSpeech(self, wavsignal, fs):
'''
最终做语音识别用的函数,识别一个wav序列的语音
不过这里现在还有bug
'''
#data = self.data
#data = DataSpeech('E:\\语音数据集')
#data.LoadDataList('dev')
# 获取输入特征
#data_input = GetMfccFeature(wavsignal, fs)
#t0=time.time()
data_input = GetFrequencyFeature3(wavsignal, fs)
#t1=time.time()
#print('time cost:',t1-t0)
input_length = len(data_input)
input_length = input_length // 8
data_input = np.array(data_input, dtype = np.float)
#print(data_input,data_input.shape)
data_input = data_input.reshape(data_input.shape[0],data_input.shape[1],1)
#t2=time.time()
r1 = self.Predict(data_input, input_length)
#t3=time.time()
#print('time cost:',t3-t2)
list_symbol_dic = GetSymbolList() # 获取拼音列表
r_str=[]
for i in r1:
r_str.append(list_symbol_dic[i])
return r_str
pass
def RecognizeSpeech_FromFile(self, filename):
'''
最终做语音识别用的函数,识别指定文件名的语音
'''
wavsignal,fs = read_wav_data(filename)
r = self.RecognizeSpeech(wavsignal, fs)
return r
pass
@property
def model(self):
'''
返回keras model
'''
return self._model
if(__name__=='__main__'):
#import tensorflow as tf
#from keras.backend.tensorflow_backend import set_session
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#进行配置,使用70%的GPU
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.95
#config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
#set_session(tf.Session(config=config))
datapath = abspath + ''
modelpath = abspath + 'model_speech'
if(not os.path.exists(modelpath)): # 判断保存模型的目录是否存在
os.makedirs(modelpath) # 如果不存在,就新建一个,避免之后保存模型的时候炸掉
system_type = plat.system() # 由于不同的系统的文件路径表示不一样,需要进行判断
if(system_type == 'Windows'):
datapath = 'E:\\语音数据集'
modelpath = modelpath + '\\'
elif(system_type == 'Linux'):
datapath = abspath + Config.path_dir
modelpath = modelpath + '/'
else:
print('*[Message] Unknown System\n')
datapath = 'dataset'
modelpath = modelpath + '/'
ms = ModelSpeech(datapath)
#ms.LoadModel(modelpath + 'speech_model251_e_0_step_12000.model')
ms.TrainModel(datapath, epoch = 50, batch_size = 64, save_step = 500)
#t1=time.time()
#ms.TestModel(datapath, str_dataset='train', data_count = 128, out_report = True)
#ms.TestModel(datapath, str_dataset='dev', data_count = 128, out_report = True)
#ms.TestModel(datapath, str_dataset='test', data_count = 128, out_report = True)
#t2=time.time()
#print('Test Model Time Cost:',t2-t1,'s')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\ST-CMDS-20170001_1-OS\\20170001P00241I0053.wav')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\ST-CMDS-20170001_1-OS\\20170001P00020I0087.wav')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\wav\\train\\A11\\A11_167.WAV')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\wav\\test\\D4\\D4_750.wav')
#print('*[提示] 语音识别结果:\n',r)
| 38.74514
| 157
| 0.58855
|
69faef362eec2f4e2ecf1a4d12e845dd6a7d4cb5
| 4,182
|
py
|
Python
|
python/test/open3d_test.py
|
leomariga/Open3D
|
d197339fcd29ad0803a182ef8953d89e563f94d7
|
[
"MIT"
] | 8
|
2021-03-17T14:24:12.000Z
|
2022-03-30T15:35:27.000Z
|
python/test/open3d_test.py
|
leomariga/Open3D
|
d197339fcd29ad0803a182ef8953d89e563f94d7
|
[
"MIT"
] | 1
|
2021-11-04T09:22:25.000Z
|
2022-02-14T01:32:31.000Z
|
python/test/open3d_test.py
|
leomariga/Open3D
|
d197339fcd29ad0803a182ef8953d89e563f94d7
|
[
"MIT"
] | 2
|
2021-08-24T18:06:55.000Z
|
2021-12-17T10:48:34.000Z
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import os
import sys
import urllib.request
import zipfile
import numpy as np
import pytest
# Avoid pathlib to be compatible with Python 3.5+.
_pwd = os.path.dirname(os.path.realpath(__file__))
test_data_dir = os.path.join(_pwd, os.pardir, os.pardir, "examples",
"test_data")
# Whenever you import open3d_test, the test data will be downloaded
# automatically to Open3D/examples/test_data/open3d_downloads. Therefore, make
# sure to import open3d_test or anything inside open3d_test before running
# unit tests. See https://github.com/intel-isl/open3d_downloads for details on
# how to manage the test data files.
sys.path.append(test_data_dir)
from download_utils import download_all_files as _download_all_files
_download_all_files()
def torch_available():
try:
import torch
import torch.utils.dlpack
except ImportError:
return False
return True
def list_devices():
"""
If Open3D is built with CUDA support:
- If cuda device is available, returns [Device("CPU:0"), Device("CUDA:0")].
- If cuda device is not available, returns [Device("CPU:0")].
If Open3D is built without CUDA support:
- returns [Device("CPU:0")].
"""
import open3d as o3d
if o3d.core.cuda.device_count() > 0:
return [o3d.core.Device("CPU:0"), o3d.core.Device("CUDA:0")]
else:
return [o3d.core.Device("CPU:0")]
def list_devices_with_torch():
"""
Similar to list_devices(), but take PyTorch available devices into account.
The returned devices are compatible on both PyTorch and Open3D.
If PyTorch is not available at all, empty list will be returned, thus the
test is effectively skipped.
"""
if torch_available():
import open3d as o3d
import torch
if (o3d.core.cuda.device_count() > 0 and torch.cuda.is_available() and
torch.cuda.device_count() > 0):
return [o3d.core.Device("CPU:0"), o3d.core.Device("CUDA:0")]
else:
return [o3d.core.Device("CPU:0")]
else:
return []
def download_fountain_dataset():
fountain_path = os.path.join(test_data_dir, "fountain_small")
fountain_zip_path = os.path.join(test_data_dir, "fountain.zip")
if not os.path.exists(fountain_path):
print("Downloading fountain dataset")
url = "https://github.com/intel-isl/open3d_downloads/releases/download/open3d_tutorial/fountain.zip"
urllib.request.urlretrieve(url, fountain_zip_path)
print("Extracting fountain dataset")
with zipfile.ZipFile(fountain_zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.dirname(fountain_path))
os.remove(fountain_zip_path)
return fountain_path
| 39.45283
| 108
| 0.664036
|
2532e3e6ddd6f5a2c3cc67163d6554bcdbb187b1
| 1,269
|
py
|
Python
|
ssseg/cfgs/apcnet/cfgs_voc_resnet101os8.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | 1
|
2021-05-28T06:42:37.000Z
|
2021-05-28T06:42:37.000Z
|
ssseg/cfgs/apcnet/cfgs_voc_resnet101os8.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
ssseg/cfgs/apcnet/cfgs_voc_resnet101os8.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
'''define the config file for voc and resnet101os8'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'voc',
'set': 'trainaug',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
DATASET_CFG['test'].update(
{
'type': 'voc',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'apcnet_resnet101os8_voc_train',
'logfilepath': 'apcnet_resnet101os8_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'apcnet_resnet101os8_voc_test',
'logfilepath': 'apcnet_resnet101os8_voc_test/test.log',
'resultsavepath': 'apcnet_resnet101os8_voc_test/apcnet_resnet101os8_voc_results.pkl'
}
)
| 23.5
| 92
| 0.669031
|
e9f0a45cfd492831ae4dfcb17b9033903bd9198e
| 3,679
|
py
|
Python
|
payload/Library/Python/2.7/site-packages/touchbarlib.py
|
homebysix/touchbarlib
|
450228a78bee6c00b9dec21387b877f5a3116298
|
[
"Apache-2.0"
] | 7
|
2018-04-20T09:44:12.000Z
|
2019-08-15T10:56:52.000Z
|
payload/Library/Python/2.7/site-packages/touchbarlib.py
|
homebysix/touchbarlib
|
450228a78bee6c00b9dec21387b877f5a3116298
|
[
"Apache-2.0"
] | null | null | null |
payload/Library/Python/2.7/site-packages/touchbarlib.py
|
homebysix/touchbarlib
|
450228a78bee6c00b9dec21387b877f5a3116298
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""Routines for manipulating the TouchBar"""
import os
import subprocess
# pylint: disable=E0611
from Foundation import NSURL
from Foundation import CFPreferencesAppSynchronize
from Foundation import CFPreferencesCopyAppValue
from Foundation import CFPreferencesSetAppValue
# pylint: enable=E0611
class TouchBarError(Exception):
"""Basic exception"""
pass
class TouchBar:
"""Class to handle TouchBar operations"""
_DOMAIN = "com.apple.controlstrip"
_TOUCHBAR_PLIST = os.path.expanduser(
"~/Library/Preferences/com.apple.controlstrip.plist"
)
_SECTIONS = ["FullCustomized", "MiniCustomized"]
items = {}
default_settings = {
"FullCustomized": (
"com.apple.system.group.brightness",
"com.apple.system.mission-control",
"com.apple.system.launchpad",
"com.apple.system.group.keyboard-brightness",
"com.apple.system.group.media",
"com.apple.system.group.volume",
"com.apple.system.siri",
),
"MiniCustomized": (
"com.apple.system.brightness",
"com.apple.system.volume",
"com.apple.system.mute",
"com.apple.system.siri",
),
}
def __init__(self):
for key in self._SECTIONS:
try:
section = CFPreferencesCopyAppValue(key, self._DOMAIN)
self.items[key] = section.mutableCopy()
except AttributeError:
self.items[key] = self.default_settings[key]
except Exception:
raise
def isDefault(self):
return bool(self.items == self.default_settings)
def save(self):
"""saves our (modified) TouchBar preferences"""
for key in self._SECTIONS:
try:
CFPreferencesSetAppValue(key, self.items[key], self._DOMAIN)
except Exception:
raise TouchBarError
if not CFPreferencesAppSynchronize(self._DOMAIN):
raise TouchBarError
# restart the TouchBar
subprocess.call(["/usr/bin/killall", "ControlStrip"])
def findExistingItem(self, test_identifier, section="FullCustomized"):
"""returns index of item with identifier matching test_identifier
or -1 if not found"""
for index in range(len(self.items[section])):
if self.items[section][index] == test_identifier:
return index
return -1
def addItem(self, identifier, section="FullCustomized", index=None):
"""Adds a TouchBar item with the specified identifier."""
found_index = self.findExistingItem(identifier, section=section)
if found_index == -1:
if index:
self.items[section].insert(index, identifier)
else:
self.items[section].append(identifier)
def removeItem(self, identifier, section=None):
"""Removes a TouchBar item with matching identifier, if any"""
if section:
sections = [section]
else:
sections = self._SECTIONS
for section in sections:
found_index = self.findExistingItem(identifier, section=section)
if found_index > -1:
del self.items[section][found_index]
def replaceItem(self, old_identifier, new_identifier, section="FullCustomized"):
"""Replaces a TouchBar item. The new item replaces an item with the given
identifier"""
found_index = self.findExistingItem(old_identifier, section=section)
if found_index > -1:
self.items[section][found_index] = new_identifier
| 33.445455
| 84
| 0.621365
|
13cb60d8b750fa20aa1b0b8981a6867e2571cf02
| 858
|
py
|
Python
|
coding_interviews/elements_of_programming_interview/longest_subarray_length_with_same_integers.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 205
|
2018-12-01T17:49:49.000Z
|
2021-12-22T07:02:27.000Z
|
coding_interviews/elements_of_programming_interview/longest_subarray_length_with_same_integers.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 2
|
2020-01-01T16:34:29.000Z
|
2020-04-26T19:11:13.000Z
|
coding_interviews/elements_of_programming_interview/longest_subarray_length_with_same_integers.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 50
|
2018-11-28T20:51:36.000Z
|
2021-11-29T04:08:25.000Z
|
'''
Write a program that takes an array of integers and finds
the length of a longest subarray all of whose entries are equal.
'''
def longest_subarray_length_with_same_integers(numbers):
if not numbers:
return 0
longest_subarray_length, counter, current_comparator = 0, 0, numbers[0]
for number in numbers:
if number == current_comparator:
counter += 1
else:
counter = 1
current_comparator = number
longest_subarray_length = max(longest_subarray_length, counter)
return longest_subarray_length
numbers = [260, 290, 290, 250, 250, 250]
prices = [310, 315, 275, 295, 260, 270, 290, 230, 255, 250]
print(longest_subarray_length_with_same_integers(numbers))
print(longest_subarray_length_with_same_integers(prices))
print(longest_subarray_length_with_same_integers([]))
| 29.586207
| 75
| 0.717949
|
aeee7f29c4d9a25a42b0519900eada65c6818361
| 165
|
py
|
Python
|
debug/convert_scores_compo60.py
|
DavidSabbagh/meeg_power_regression
|
d9cd5e30028ffc24f08a52966c7641f611e92ee6
|
[
"BSD-3-Clause"
] | 1
|
2020-12-18T06:10:16.000Z
|
2020-12-18T06:10:16.000Z
|
debug/convert_scores_compo60.py
|
DavidSabbagh/meeg_power_regression
|
d9cd5e30028ffc24f08a52966c7641f611e92ee6
|
[
"BSD-3-Clause"
] | null | null | null |
debug/convert_scores_compo60.py
|
DavidSabbagh/meeg_power_regression
|
d9cd5e30028ffc24f08a52966c7641f611e92ee6
|
[
"BSD-3-Clause"
] | 2
|
2021-03-01T01:36:38.000Z
|
2021-03-01T13:44:02.000Z
|
import pandas as pd
import numpy as np
X = np.load("all_scores_mag_compo60.npy", allow_pickle=True).item()
df = pd.DataFrame(X)
df.to_csv("scores_mag_compo60.csv")
| 23.571429
| 67
| 0.763636
|
52b8916da0285acc7b3d51afd5cdb404be8edd9e
| 803
|
py
|
Python
|
tests/test_util.py
|
aaronm759/us-congress-pizza-flag-tracker
|
2dcd407135655e48742a3f3b4c391222d815c4f2
|
[
"CC0-1.0"
] | null | null | null |
tests/test_util.py
|
aaronm759/us-congress-pizza-flag-tracker
|
2dcd407135655e48742a3f3b4c391222d815c4f2
|
[
"CC0-1.0"
] | null | null | null |
tests/test_util.py
|
aaronm759/us-congress-pizza-flag-tracker
|
2dcd407135655e48742a3f3b4c391222d815c4f2
|
[
"CC0-1.0"
] | null | null | null |
import pytest
import cv2
from io import BytesIO
# from qrtools.qrtools import QR
from app import app
from controllers import get_qrcode
import io
import numpy as np
from cachelib import file
class TestUtils():
@pytest.mark.skip(reason="function works, but test does not")
def test_qrcode(self):
qrcodeValue = "https://example.com/A43X2Q3"
with app.test_client() as c:
response = c.get('/qrcode?value=' + qrcodeValue)
imgData = BytesIO(response.data)
imgData.seek(0)
data = np.fromstring(imgData.getvalue(), dtype=np.uint8)
cv2Img = cv2.imdecode(data, 0)
detector = cv2.QRCodeDetector()
data, bbox, straight_qrcode = detector.detectAndDecode(cv2Img)
assert data == qrcodeValue
| 29.740741
| 74
| 0.655044
|
1d03972e09340e1a48c4312005fdcf97511223b4
| 7,759
|
py
|
Python
|
source/scipy_test.py
|
yux1991/PyRHEED
|
b39ad03651c92e3649069919ae48b1e5158cd3dd
|
[
"MIT"
] | 14
|
2019-01-08T14:32:31.000Z
|
2021-11-17T21:07:10.000Z
|
source/scipy_test.py
|
yux1991/PyRHEED
|
b39ad03651c92e3649069919ae48b1e5158cd3dd
|
[
"MIT"
] | 2
|
2019-05-14T08:56:36.000Z
|
2020-12-22T16:44:30.000Z
|
source/scipy_test.py
|
yux1991/PyRHEED
|
b39ad03651c92e3649069919ae48b1e5158cd3dd
|
[
"MIT"
] | 4
|
2019-03-12T20:03:54.000Z
|
2022-03-08T14:24:46.000Z
|
import time
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import NearestNDInterpolator
import pandas
from collections import Counter
from scipy.stats import chisquare
from scipy.stats import power_divergence
from sklearn.mixture import BayesianGaussianMixture
class Test():
def __init__(self):
# Parameters of the dataset
self.random_state, self.n_components = 2, 4
self.fit_colors = list(mcolors.XKCD_COLORS.values())
self.covars = np.array([[[.1, .0], [.0, .1]],
[[.1, .0], [.0, .1]],
[[.1, .0], [.0, .1]],
[[.1, .0], [.0, .1]]])
self.samples = np.array([2000, 5000, 7000, 2000])
self.means = np.array([[-1.0, -.70],
[.0, .0],
[.5, .30],
[1.0, .70]])
def chi_square(self,c,n):
s = np.ceil(np.random.rand(n)*c)
ct = list(Counter(s).values())
print(chisquare(ct))
print(power_divergence(ct,lambda_=1))
def gmm(self):
# mean_precision_prior= 0.8 to minimize the influence of the prior
estimators = [
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3 * self.n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=self.random_state, verbose=0), [1, 1000, 100000])]
# Generate data
rng = np.random.RandomState(self.random_state)
X = np.vstack([
rng.multivariate_normal(self.means[j], self.covars[j], self.samples[j])
for j in range(self.n_components)])
y = np.concatenate([np.full(self.samples[j], j, dtype=int)
for j in range(self.n_components)])
# Plot results in two different figures
for (title, estimator, concentrations_prior) in estimators:
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05,
left=.03, right=.99)
gs = gridspec.GridSpec(3, len(concentrations_prior))
for k, concentration in enumerate(concentrations_prior):
estimator.weight_concentration_prior = concentration
estimator.fit(X)
self.plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator,
X, y, r"%s$%.1e$" % (title, concentration),
plot_title=k == 0)
plt.show()
def samp(self):
start_time = time.time()
raw_3d = pandas.read_csv(filepath_or_buffer="c:/users/yux20/documents/05042018 MoS2/3D_Map_04162019.txt",sep=" ",names=["x","y","z","intensity"],na_values="NaN")
length = raw_3d.index[-1]+1
x_min,x_max = raw_3d["x"].min(), raw_3d["x"].max()
y_min,y_max = raw_3d["y"].min(), raw_3d["y"].max()
z_min,z_max = raw_3d["z"].min(), raw_3d["z"].max()
nx,ny = 500,500
nz = int((z_max-z_min)/(x_max-x_min)*nx)
x_range = np.linspace(int(x_min/np.sqrt(2)),int(x_max/np.sqrt(2)),nx)
y_range = np.linspace(int(x_min/np.sqrt(2)),int(x_max/np.sqrt(2)),ny)
z_range = np.linspace(z_min,z_max,nz)
x,y,z=np.meshgrid(x_range,y_range,z_range)
subset=[]
i = 0
while i < length:
radius = abs(raw_3d.iat[i,0])
intensity = raw_3d.iat[i,3]
step = int(x_max/radius*10) if radius>x_max*0.2 else 50
subset.append(i)
i +=step
print("length of the resampled data is {}".format(len(subset)))
print("finished meshgrid, using {:.2f}s".format(time.time()-start_time))
start_time = time.time()
rawx = raw_3d.iloc[subset,[0]].T.to_numpy()*np.cos(raw_3d.iloc[subset,[1]].T.to_numpy()/np.pi)
rawy = raw_3d.iloc[subset,[0]].T.to_numpy()*np.sin(raw_3d.iloc[subset,[1]].T.to_numpy()/np.pi)
rawz = raw_3d.iloc[subset,[2]].T.to_numpy()
intensity = np.power(raw_3d.iloc[subset,[3]].T.to_numpy()[0],4)
print("finished converting, using {:.2f}s".format(time.time()-start_time))
start_time = time.time()
interp = LinearNDInterpolator(list(zip(rawx[0],rawy[0],rawz[0])),intensity,fill_value=0)
print("finished generating interpolator, using {:.2f}s".format(time.time()-start_time))
start_time = time.time()
interp_3d = interp(x,y,z)
print("finished interpolation, using {:.2f}s".format(time.time()-start_time))
start_time = time.time()
intensity_sum = np.sum(np.concatenate(interp_3d))
print("finished sum, using {:.2f}s".format(time.time()-start_time))
start_time = time.time()
output = open("c:/users/yux20/documents/05042018 MoS2/interpolated_3D_map.txt",mode='w')
for i in range(nx):
for j in range(ny):
for k in range(nz):
row = "\t".join([str(np.around(x[j][i][k],4)),str(np.around(y[j][i][k],4)),str(np.around(z[j][i][k],4)),str(np.around(interp_3d[j][i][k]/intensity_sum,10))])+"\n"
output.write(row)
output.close()
print("finished writting, using {:.2f}s".format(time.time()-start_time))
def plot_ellipses(self,ax, weights, means, covars):
for n in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[n])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
# Ellipse needs degrees
angle = 180 * angle / np.pi
# eigenvector normalization
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1],
180 + angle, edgecolor='black')
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ell.set_facecolor(self.fit_colors[n])
ax.add_artist(ell)
def plot_results(self,ax1, ax2, estimator, X, y, title, plot_title=False):
ax1.set_title(title)
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color='lightgray', alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
self.plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for n in range(estimator.means_.shape[0]):
k,w = n, estimator.weights_[n]
ax2.bar(k, w, width=0.9, color=self.fit_colors[k], zorder=3,
align='center', edgecolor='black')
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * self.n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left=False,
right=False, labelleft=False)
ax2.tick_params(axis='x', which='both', top=False)
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
if __name__ == "__main__":
test = Test()
#test.chi_square(c=100,n=100000)
#test.gmm()
test.samp()
| 44.337143
| 182
| 0.571852
|
c0ee4a19350f690c977312994e7feef9ed48ed8b
| 69,886
|
py
|
Python
|
api/vm/define/serializers.py
|
DigitalOzUT/esdc-ce
|
e0d918994204f3ca69f363c71941c7a1bb123109
|
[
"Apache-2.0"
] | null | null | null |
api/vm/define/serializers.py
|
DigitalOzUT/esdc-ce
|
e0d918994204f3ca69f363c71941c7a1bb123109
|
[
"Apache-2.0"
] | null | null | null |
api/vm/define/serializers.py
|
DigitalOzUT/esdc-ce
|
e0d918994204f3ca69f363c71941c7a1bb123109
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLogger
from django.core.validators import RegexValidator
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.utils.six import iteritems
from django.core import validators
from django.conf import settings
from api.mon import MonitoringBackend
from gui.models import User
from vms.models import VmTemplate, Vm, Node, Image, Subnet, IPAddress, NodeStorage, DefaultDc
from api import serializers as s
from api.decorators import catch_api_exception
from api.exceptions import APIError, ObjectAlreadyExists
from api.validators import validate_owner, validate_mdata, mod2_validator
from api.vm.utils import get_nodes, get_templates, get_images, get_subnets, get_zpools, get_owners
from api.vm.base.serializers import VmBaseSerializer
from api.dns.record.api_views import RecordView
PERMISSION_DENIED = _('Permission denied')
INVALID_HOSTNAMES = frozenset(['define', 'status', 'backup', 'snapshot'])
NIC_ALLOWED_IPS_MAX = 8
logger = getLogger(__name__)
def get_vm_template(request, data, prefix=''):
if not data:
return None
template_name = data.get(prefix + 'template', None)
if template_name:
try:
return get_templates(request).get(name=template_name)
except VmTemplate.DoesNotExist:
pass # this should be stopped by default validate_template
return None
def is_kvm(vm, data=None, prefix='', ostype=None, template=None):
if vm:
return vm.is_kvm()
if data is not None:
ostype = data.get(prefix + 'ostype', None)
if ostype is None and template:
ostype = template.vm_define.get('ostype', None) or template.ostype
if ostype:
try:
return int(ostype) in Vm.KVM
except (TypeError, ValueError):
pass
return True
def validate_zpool(request, name, node=None):
try:
qs = get_zpools(request)
if node:
return qs.select_related('storage').get(node=node, zpool=name)
elif not qs.filter(zpool=name).exists():
raise NodeStorage.DoesNotExist
except NodeStorage.DoesNotExist:
raise s.ValidationError(_('Storage with zpool=%s does not exist.') % name)
return None
def validate_nic_tags(vm, new_node=None, new_net=None):
"""VM nic tags must exists on compute node before deploy - bug #chili-593"""
if not new_node:
new_node = vm.node
node_nic_tags = set([nictag['name'] for nictag in new_node.nictags])
vm_nic_tags = set([Subnet.objects.get(uuid=nic['network_uuid']).nic_tag for nic in vm.json_get_nics()])
if new_net:
vm_nic_tags.add(new_net.nic_tag)
if not vm_nic_tags.issubset(node_nic_tags):
raise s.ValidationError(_('Network is not available on compute node.'))
return None
class VmDefineSerializer(VmBaseSerializer):
uuid = s.CharField(read_only=True)
hostname = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\.-]+[A-Za-z0-9]$', max_length=128, min_length=4)
alias = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\.-]+[A-Za-z0-9]$', max_length=24, min_length=4, required=False)
ostype = s.IntegerChoiceField(choices=Vm.OSTYPE, default=settings.VMS_VM_OSTYPE_DEFAULT)
cpu_type = s.ChoiceField(choices=Vm.CPU_TYPE, default=settings.VMS_VM_CPU_TYPE_DEFAULT)
vcpus = s.IntegerField(max_value=1024) # vv (min_value set below)
ram = s.IntegerField(max_value=1048576, min_value=1)
note = s.CharField(required=False)
owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, read_only=False, required=False) # vv
node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects, read_only=False, required=False) # vv
template = s.SlugRelatedField(slug_field='name', queryset=VmTemplate.objects, read_only=False, required=False) # vv
tags = s.TagField(required=False, default=[]) # null value checked in TagField
monitored_internal = s.BooleanField(default=settings.MON_ZABBIX_ENABLED)
monitored = s.BooleanField(default=settings.VMS_VM_MONITORED_DEFAULT)
monitoring_hostgroups = s.ArrayField(max_items=16, default=[],
validators=(
RegexValidator(regex=MonitoringBackend.RE_MONITORING_HOSTGROUPS),))
monitoring_templates = s.ArrayField(max_items=32, default=[])
installed = s.BooleanField(default=False)
snapshot_limit_manual = s.IntegerField(required=False) # Removed from json if null, limits set below
snapshot_size_limit = s.IntegerField(required=False) # Removed from json if null, limits set below
cpu_cap = s.IntegerField(read_only=True)
cpu_shares = s.IntegerField(default=settings.VMS_VM_CPU_SHARES_DEFAULT, min_value=0, max_value=1048576)
zfs_io_priority = s.IntegerField(default=settings.VMS_VM_ZFS_IO_PRIORITY_DEFAULT, min_value=0, max_value=1024)
zpool = s.CharField(default=Node.ZPOOL, max_length=64)
resolvers = s.ArrayField(read_only=True)
maintain_resolvers = s.BooleanField(default=True) # OS only
dns_domain = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\ \._/-]*$', max_length=1024, required=False) # OS only
routes = s.RoutesField(default={}) # OS only
vga = s.ChoiceField(choices=Vm.VGA_MODEL, default=settings.VMS_VGA_MODEL_DEFAULT) # KVM only
mdata = s.MetadataField(default=settings.VMS_VM_MDATA_DEFAULT,
validators=(validate_mdata(Vm.RESERVED_MDATA_KEYS),))
locked = s.BooleanField(read_only=True, required=False)
created = s.DateTimeField(read_only=True, required=False)
def __init__(self, request, *args, **kwargs): # noqa: R701
self.request = request
self.old_hostname = None
self.hostname_changed = False
self.zpool_changed = False
self.node_changed = False
self.update_node_resources = False
self.update_storage_resources = []
self.check_node_resources = kwargs.pop('check_node_resources', True)
self.zone_img = None
self.dc_settings = dc_settings = self.request.dc.settings
hostname = kwargs.pop('hostname', None)
data = kwargs.get('data', None)
super(VmDefineSerializer, self).__init__(request, *args, **kwargs)
if self.request.method == 'POST':
vm_template = get_vm_template(request, data)
else:
vm_template = None
self._is_kvm = kvm = is_kvm(self.object, data, template=vm_template)
if kvm:
del self.fields['maintain_resolvers']
del self.fields['routes']
del self.fields['dns_domain']
else:
del self.fields['cpu_type']
del self.fields['vga']
if not kwargs.get('many', False):
self.fields['owner'].default = request.user.username # Does not work
self.fields['ostype'].default = dc_settings.VMS_VM_OSTYPE_DEFAULT
self.fields['zpool'].default = dc_settings.VMS_STORAGE_DEFAULT
# noinspection PyProtectedMember
self.fields['monitored_internal'].default = DefaultDc().settings.MON_ZABBIX_ENABLED \
and dc_settings._MON_ZABBIX_VM_SYNC
self.fields['monitored'].default = dc_settings.MON_ZABBIX_ENABLED and dc_settings.MON_ZABBIX_VM_SYNC \
and dc_settings.VMS_VM_MONITORED_DEFAULT
self.fields['cpu_shares'].default = dc_settings.VMS_VM_CPU_SHARES_DEFAULT
self.fields['zfs_io_priority'].default = dc_settings.VMS_VM_ZFS_IO_PRIORITY_DEFAULT
self.fields['owner'].queryset = get_owners(self.request)
self.fields['template'].queryset = get_templates(self.request)
self.fields['node'].queryset = get_nodes(self.request, is_compute=True)
self.fields['mdata'].default = dc_settings.VMS_VM_MDATA_DEFAULT
field_snapshot_limit_manual = self.fields['snapshot_limit_manual']
field_snapshot_size_limit = self.fields['snapshot_size_limit']
field_snapshot_limit_manual.default = dc_settings.VMS_VM_SNAPSHOT_LIMIT_MANUAL_DEFAULT
field_snapshot_size_limit.default = dc_settings.VMS_VM_SNAPSHOT_SIZE_LIMIT_DEFAULT
if dc_settings.VMS_VM_SNAPSHOT_LIMIT_MANUAL is None:
min_snap, max_snap = 0, 65536
else:
min_snap, max_snap = 1, int(dc_settings.VMS_VM_SNAPSHOT_LIMIT_MANUAL)
field_snapshot_limit_manual.required = field_snapshot_limit_manual.disallow_empty = True
field_snapshot_limit_manual.validators.append(validators.MinValueValidator(min_snap))
field_snapshot_limit_manual.validators.append(validators.MaxValueValidator(max_snap))
if dc_settings.VMS_VM_SNAPSHOT_SIZE_LIMIT is None:
min_snaps_size, max_snaps_size = 0, 2147483647
else:
min_snaps_size, max_snaps_size = 1, int(dc_settings.VMS_VM_SNAPSHOT_SIZE_LIMIT)
field_snapshot_size_limit.required = field_snapshot_size_limit.disallow_empty = True
field_snapshot_size_limit.validators.append(validators.MinValueValidator(min_snaps_size))
field_snapshot_size_limit.validators.append(validators.MaxValueValidator(max_snaps_size))
if kvm:
self.fields['vga'].default = dc_settings.VMS_VGA_MODEL_DEFAULT
if kvm or dc_settings.VMS_VM_CPU_CAP_REQUIRED:
vcpus_min = 1
else:
vcpus_min = 0
# vcpus can be set to 0 only for zones and when VMS_VM_CPU_CAP_REQUIRED=False
self.fields['vcpus'].validators.append(validators.MinValueValidator(vcpus_min))
# defaults
if self.request.method == 'POST':
self.fields['hostname'].default = hostname
self.fields['alias'].default = hostname
# default dns_domain for zones is domain part of hostname
if not kvm:
if '.' in hostname:
self.fields['dns_domain'].default = hostname.split('.', 1)[-1]
else:
self.fields['dns_domain'].default = ''
# defaults from template
if vm_template:
# ostype is in own column
if vm_template.ostype is not None:
self.fields['ostype'].default = vm_template.ostype
# all serializer attributes are in json['vm_define'] object
# (also the ostype can be defined here)
for field, value in vm_template.vm_define.items():
try:
self.fields[field].default = value
except KeyError:
pass
def restore_object(self, attrs, instance=None):
if instance is not None: # set (PUT)
vm = instance
else: # create (POST)
vm = Vm(dc=self.request.dc)
# Set owner first (needed for hostname_is_valid_fqdn)
if 'owner' in attrs and attrs['owner'] is not None:
vm.owner = attrs['owner']
# Cache old hostname in case we would change it
vm.hostname_is_valid_fqdn()
# Get json
_json = vm.json
# Datacenter settings
dc_settings = vm.dc.settings
# Json defaults must be set before template data
if 'uuid' not in _json:
_json.update2(dc_settings.VMS_VM_JSON_DEFAULTS.copy())
_json['resolvers'] = dc_settings.VMS_VM_RESOLVERS_DEFAULT
# First populate vm.json with template data, so they can be overridden by data specified by user
if 'template' in attrs and attrs['template'] is not None:
vm.template = attrs['template']
_json.update2(vm.sync_template())
data = vm.template.vm_define
else:
data = {}
# Set json
vm.json = _json
# Mix template data with user attributes (which take precedence here)
data.update(attrs)
# ostype and brand must be set first
if 'ostype' in data:
vm.set_ostype(data.pop('ostype'))
# Save user data
for key, val in iteritems(data):
if key == 'node':
vm.set_node(val)
elif key == 'tags':
vm.set_tags(val)
else:
setattr(vm, key, val)
# Default disk with image for non-global zone
if instance is None and not vm.is_kvm() and 'image_uuid' not in vm.json:
vm.save_item('image_uuid', self.zone_img.uuid, save=False)
vm.save_item('quota', int(round(float(self.zone_img.size) / float(1024))), save=False)
vm.save_item('zfs_root_compression', self.dc_settings.VMS_DISK_COMPRESSION_DEFAULT, save=False)
return vm
def validate_owner(self, attrs, source):
"""Cannot change owner while pending tasks exist"""
validate_owner(self.object, attrs.get(source, None), _('VM'))
return attrs
def validate_node(self, attrs, source):
# Changing compute nodes is not supported
try:
value = attrs[source]
except KeyError:
pass
else:
# Only changing from None or in notcreated state is allowed
if self.object and self.object.node:
if self.object.node != value:
if self.object.is_notcreated():
self.node_changed = True
else:
raise s.ValidationError(_('Cannot change node.'))
elif value is not None:
self.node_changed = True
if self.node_changed and value:
if value.status != Node.ONLINE:
raise s.ValidationError(_('Node is currently not available.'))
# Node changed to some existing node - check nic tags - bug #chili-593
if self.object:
validate_nic_tags(self.object, new_node=value)
return attrs
def validate_hostname(self, attrs, source):
# Changing the hostname is an invasive operation
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and (self.object.hostname == value or self.object.uuid == value):
pass # Do not check if the same hostname or uuid was provided
elif Vm.objects.filter(Q(hostname__iexact=value) | Q(uuid__iexact=value)).exists():
raise ObjectAlreadyExists(model=Vm)
elif '..' in value or '--' in value or value in INVALID_HOSTNAMES:
raise s.ValidationError(s.WritableField.default_error_messages['invalid'])
if self.object and self.object.hostname != value:
self.old_hostname = self.object.hostname # Used by info event
self.hostname_changed = True # Update DNS record
return attrs
def validate_template(self, attrs, source):
# Check if template changed
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and value and self.object.template != value:
raise s.ValidationError(_('Cannot change template.'))
return attrs
def validate_cpu_shares(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if not self.request.user.is_staff and value != self.dc_settings.VMS_VM_CPU_SHARES_DEFAULT:
raise s.ValidationError(PERMISSION_DENIED)
return attrs
def validate_zfs_io_priority(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if not self.request.user.is_staff and value != self.dc_settings.VMS_VM_ZFS_IO_PRIORITY_DEFAULT:
raise s.ValidationError(PERMISSION_DENIED)
return attrs
def validate_zpool(self, attrs, source):
# Just check if zpool changed
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object:
if self.object.zpool == value:
return attrs
if self.object.is_deployed():
raise s.ValidationError(_('Cannot change zpool.'))
if not self.object.is_kvm():
raise s.ValidationError(_('Cannot change zpool for this OS type. '
'Please change it on the first disk.'))
self.zpool_changed = True
return attrs
def validate_ostype(self, attrs, source):
# ostype cannot change
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object:
if self.object.ostype != value:
raise s.ValidationError(_('Cannot change ostype.'))
elif not is_kvm(self.object, ostype=value):
# Creating zone -> Issue #chili-461 (must be enabled globally and in DC)
if not (settings.VMS_ZONE_ENABLED and self.dc_settings.VMS_ZONE_ENABLED):
raise s.ValidationError(_('This OS type is not supported.'))
# Creating zone -> check if default zone image is available
if value == Vm.LINUX_ZONE:
default_zone_image = self.dc_settings.VMS_DISK_IMAGE_LX_ZONE_DEFAULT
else:
default_zone_image = self.dc_settings.VMS_DISK_IMAGE_ZONE_DEFAULT
zone_images = get_images(self.request, ostype=value) # Linux Zone or SunOS Zone images ordered by name
try:
self.zone_img = zone_images.get(name=default_zone_image)
except Image.DoesNotExist:
self.zone_img = zone_images.first()
if not self.zone_img:
raise s.ValidationError(_('Default disk image for this OS type is not available.'))
return attrs
def validate_monitored_internal(self, attrs, source):
# Only SuperAdmin can change this attribute
try:
value = attrs[source]
except KeyError:
pass
else:
if not self.request.user.is_staff and value != self.fields['monitored_internal'].default:
raise s.ValidationError(PERMISSION_DENIED)
return attrs
def validate_monitoring_hostgroups(self, attrs, source):
# Allow to use only available hostgroups
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and self.object.monitoring_hostgroups == value:
return attrs
elif self.dc_settings.MON_ZABBIX_HOSTGROUPS_VM_RESTRICT and not \
set(value).issubset(set(self.dc_settings.MON_ZABBIX_HOSTGROUPS_VM_ALLOWED)):
raise s.ValidationError(_('Selected monitoring hostgroups are not available.'))
return attrs
def validate_monitoring_templates(self, attrs, source):
# Allow to use only available templates
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and self.object.monitoring_templates == value:
return attrs
elif self.dc_settings.MON_ZABBIX_TEMPLATES_VM_RESTRICT and not \
set(value).issubset(set(self.dc_settings.MON_ZABBIX_TEMPLATES_VM_ALLOWED)):
raise s.ValidationError(_('Selected monitoring templates are not available.'))
return attrs
def validate_node_resources(self, attrs): # noqa: R701
vm = self.object
dc = self.request.dc
node = None
node_errors = []
if 'vcpus' in attrs:
in_cpu = Vm.calculate_cpu_count_from_vcpus(attrs['vcpus'])
else:
in_cpu = None
in_ram = attrs.get('ram', None)
# Check if there are free resources if node was set manually
if self.node_changed:
# No need to check if node really exists, because this check is
# performed by the default serializer validation. But still it can be None...
node = attrs['node']
old_cpu = old_ram = new_disk = 0
if vm:
vm_disks = vm.get_disks()
if node: # We have a new node
# Get old resources
old_cpu, old_ram, new_disk = vm.get_cpu_ram_disk(zpool=node.zpool)
# Node changed to real node, validate storage names and disk space
for zpool, size in vm_disks.items():
# Also check if storage exists on this new node
try:
ns = validate_zpool(self.request, zpool, node=node)
except s.ValidationError as err:
node_errors.extend(err.messages)
else:
logger.info('Checking storage %s free space (%s) for vm %s', ns.storage, size, vm)
if ns.check_free_space(size):
self.update_storage_resources.append(ns)
else:
node_errors.append(_('Not enough free disk space on storage with zpool=%s.') % zpool)
if vm.node:
# Node changed from real node -> always update storage resources associated with old node
self.update_storage_resources.extend(list(vm.node.get_node_storages(dc, vm_disks.keys())))
if self._is_kvm:
ram_overhead = settings.VMS_VM_KVM_MEMORY_OVERHEAD
else:
ram_overhead = 0
# Use new or old absolute resource counts
if in_cpu is None:
new_cpu = old_cpu
else:
new_cpu = in_cpu
if in_ram is None:
new_ram = old_ram + ram_overhead
else:
new_ram = in_ram + ram_overhead
# Also check for additional free resources if number of vcpus or ram
# changed and node was set in the past (=> we stay on current node)
elif vm and vm.node and (in_cpu is not None or in_ram is not None):
node = vm.node
old_cpu, old_ram = vm.get_cpu_ram()
new_disk = 0 # Disk size vs. node was validated in vm_define_disk
if in_cpu is None:
new_cpu = 0
else:
new_cpu = in_cpu - old_cpu
if in_ram is None:
new_ram = 0
else:
new_ram = in_ram - old_ram
# At this point we have to check for resources if node is defined
if node:
dc_node = node.get_dc_node(dc)
# noinspection PyUnboundLocalVariable
logger.info('Checking node=%s, dc_node=%s resources (cpu=%s, ram=%s, disk=%s) for vm %s',
node, dc_node, new_cpu, new_ram, new_disk, vm)
if new_cpu > 0 and not dc_node.check_free_resources(cpu=new_cpu):
node_errors.append(_('Not enough free vCPUs on node.'))
if new_ram > 0 and not dc_node.check_free_resources(ram=new_ram):
node_errors.append(_('Not enough free RAM on node.'))
if new_disk > 0 and not dc_node.check_free_resources(disk=new_disk):
node_errors.append(_('Not enough free disk space on node.'))
if node_errors:
self._errors['node'] = s.ErrorList(node_errors)
else:
self.update_node_resources = True
def validate(self, attrs): # noqa: R701
vm = self.object
dc_settings = self.dc_settings
if self.request.method == 'POST':
limit = dc_settings.VMS_VM_DEFINE_LIMIT
if limit is not None:
total = self.request.dc.vm_set.count()
if int(limit) <= total:
raise s.ValidationError(_('Maximum number of server definitions reached.'))
try:
ostype = attrs['ostype']
except KeyError:
ostype = vm.ostype
# Default cpu_type for a new Windows VM is 'host'
if not vm and ostype == Vm.WINDOWS and 'cpu_type' not in self.init_data:
attrs['cpu_type'] = Vm.CPU_TYPE_HOST
# Check if template ostype matches vm.ostype
template = attrs.get('template', None)
if template and template.ostype:
if template.ostype != ostype:
err = _('Server template is only available for servers with "%(ostype)s" OS type.')
self._errors['template'] = s.ErrorList([err % {'ostype': template.get_ostype_display()}])
# Default owner is request.user, but setting this in __init__ does not work
if 'owner' in attrs and attrs['owner'] is None:
if vm:
del attrs['owner']
else:
attrs['owner'] = self.request.user
# Zpool check depends on node
if self.zpool_changed or self.node_changed:
try:
zpool = attrs['zpool']
except KeyError:
zpool = vm.zpool
try:
node = attrs['node']
except KeyError:
if vm:
node = vm.node
else:
node = None
try:
validate_zpool(self.request, zpool, node=node)
except s.ValidationError as err:
self._errors['zpool'] = err.messages
# Check if alias is unique for this user
if 'alias' in attrs:
if vm and 'owner' not in attrs:
owner = vm.owner
elif 'owner' in attrs:
owner = attrs['owner']
else:
owner = self.request.user
alias = attrs['alias']
if vm and vm.alias == alias:
pass # Do not check if the same alias was provided
elif Vm.objects.filter(dc=self.request.dc, owner=owner, alias__iexact=alias).exists():
self._errors['alias'] = s.ErrorList([_('This server name is already in use. '
'Please supply a different server name.')])
# Check if there are free resources if node is set/changed and/or ram/vcpus changed
if not self._errors: # already invalid serializer, skip complicated resource checking
self.validate_node_resources(attrs)
# Disable monitored flag if monitoring module/sync disabled
# noinspection PyProtectedMember
if 'monitored_internal' in attrs and not (DefaultDc().settings.MON_ZABBIX_ENABLED and
dc_settings._MON_ZABBIX_VM_SYNC):
attrs['monitored_internal'] = False
if 'monitored' in attrs and not (dc_settings.MON_ZABBIX_ENABLED and dc_settings.MON_ZABBIX_VM_SYNC):
attrs['monitored'] = False
return attrs
class _VmDefineDiskSerializer(s.Serializer):
size = s.IntegerField(max_value=268435456, min_value=1)
boot = s.BooleanField(default=False) # Needed for server list in GUI (both KVM and ZONE)
compression = s.ChoiceField(choices=Vm.DISK_COMPRESSION, default=settings.VMS_DISK_COMPRESSION_DEFAULT)
zpool = s.CharField(default=Node.ZPOOL, max_length=64)
block_size = s.IntegerField(min_value=512, max_value=131072, validators=(mod2_validator,)) # Default set below
def __init__(self, request, vm, *args, **kwargs):
# custom stuff
self.vm = vm
self.request = request
self.update_node_resources = False
self.update_storage_resources = []
self.zpool_changed = False
self.node_storage = None
self.disk_id = kwargs.pop('disk_id', None)
self.img = None
self.img_old = None
self.img_error = False
if len(args) > 0: # PUT, GET
# rewrite disk data
if isinstance(args[0], list):
data = map(self.fix_before, args[0])
else:
data = self.fix_before(args[0])
super(_VmDefineDiskSerializer, self).__init__(data, *args[1:], **kwargs)
else: # POST
super(_VmDefineDiskSerializer, self).__init__(*args, **kwargs)
data = kwargs.get('data', None)
# defaults disk size from image
if data is not None and 'image' in data and data['image']:
try:
self.img = get_images(self.request).get(name=data['image'])
except Image.DoesNotExist:
self.img_error = True # this should be stopped by default validate_image
else:
self.fields['size'].default = self.img.size
if vm.is_kvm():
self.fields['refreservation'].default = self.img.size
if vm.is_kvm() and data is not None and 'size' in data:
self.fields['refreservation'].default = data['size']
if self.disk_id == 0:
self.fields['boot'].default = True
dc_settings = vm.dc.settings
self.fields['block_size'].default = Vm.DISK_BLOCK_SIZE[vm.ostype]
self.fields['zpool'].default = vm.zpool
self.fields['compression'].default = dc_settings.VMS_DISK_COMPRESSION_DEFAULT
# Set defaults from template
if self.disk_id is not None and vm.template:
for field, value in vm.template.get_vm_define_disk(self.disk_id).items():
try:
self.fields[field].default = value
except KeyError:
pass
def fix_before(self, data):
"""
Rewrite disk data from json to serializer compatible object.
"""
if 'image_uuid' in data:
try:
self.img = self.img_old = Image.objects.get(uuid=data['image_uuid'])
data['image'] = self.img.name
except Image.DoesNotExist:
raise APIError(detail='Unknown image in disk definition.')
else:
del data['image_uuid']
return data
@property
def jsondata(self):
"""
Rewrite validated disk data from user to json usable data.
"""
data = dict(self.object)
if 'image' in data:
image_name = data.pop('image')
if image_name: # got valid image, let's replace it with image_uuid
data['image_uuid'] = str(self.img.uuid)
data['image_size'] = self.img.size # needed for valid json
if self.vm.is_kvm():
data.pop('block_size', None) # block size is inherited from the image
else: # remove image from json
data.pop('image_uuid', None)
data.pop('image_size', None)
return data
def detail_dict(self, **kwargs):
ret = super(_VmDefineDiskSerializer, self).detail_dict(**kwargs)
ret.pop('disk_id', None) # disk_id is added in the view
return ret
def validate_boot(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if value is True and self.disk_id is not None:
if self.disk_id != 0:
raise s.ValidationError(_('Cannot set boot flag on disks other than first disk.'))
other_disks = self.vm.json_get_disks()
if other_disks:
try:
del other_disks[self.disk_id]
except IndexError:
pass
for d in other_disks:
if d['boot'] is True:
raise s.ValidationError(_('Cannot set boot flag on multiple disks.'))
return attrs
def validate_image(self, attrs, source): # noqa: R701
try:
value = attrs[source]
except KeyError:
pass
else:
if self.img_error:
raise s.ObjectDoesNotExist(value)
if not value:
value = attrs[source] = None
if value and self.disk_id != 0:
raise s.ValidationError(_('Cannot set image on disks other than first disk.'))
if self.object:
if (self.img and self.img.name == value) or (self.img == value):
return attrs # Input image name is the same as in DB
elif self.vm.is_notcreated():
if value:
try:
self.img = get_images(self.request).get(name=value)
except Image.DoesNotExist:
raise s.ObjectDoesNotExist(value)
else:
self.img = None
else:
raise s.ValidationError(_('Cannot change disk image.'))
elif value and self.vm.is_deployed():
raise s.ValidationError(_('Cannot set disk image on already created server.'))
if self.img:
if self.img.access in Image.UNUSABLE or self.img.ostype != self.vm.ostype:
raise s.ObjectDoesNotExist(value)
if self.img.status != Image.OK:
raise s.ValidationError(_('Image is currently not available.'))
return attrs
def validate_zpool(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object:
old_zpool = self.object.get('zpool')
if old_zpool == value:
return attrs
if self.vm.is_deployed():
raise s.ValidationError(_('Cannot change zpool.'))
else:
old_zpool = value # never-mind that this is actually a new zpool (see validate_storage_resources())
self.node_storage = validate_zpool(self.request, value, node=self.vm.node)
self.zpool_changed = old_zpool
return attrs
def validate_storage_resources(self, zpool, size):
"""Check storage or node resources"""
if not self.vm.node:
return
if self.object and not self.zpool_changed: # only size has changed
new_size = size - self.object.get('size')
else:
new_size = size # size or zpool changed or new disk
if not new_size:
return
vm = self.vm
ns = self.get_node_storage(zpool, vm.node)
logger.info('Checking storage %s free space (%s) for vm %s', ns.storage, new_size, vm)
if ns.check_free_space(new_size):
# NodeStorage for update:
self.update_storage_resources.append(ns)
# Old NodeStorage for update
if self.zpool_changed:
self.update_storage_resources.append(vm.node.get_node_storage(vm.dc, self.zpool_changed))
else:
self._errors['size'] = s.ErrorList([_('Not enough free disk space on storage.')])
if zpool == vm.node.zpool:
dc_node = vm.node.get_dc_node(vm.dc)
logger.info('Checking node %s resources (disk=%s) for vm %s', vm.node, new_size, vm)
if dc_node.check_free_resources(disk=new_size):
self.update_node_resources = True
else:
self._errors['size'] = s.ErrorList([_('Not enough free disk space on node.')])
def validate(self, attrs): # noqa: R701
try:
size = attrs['size']
size_change = True
except KeyError:
size = self.object['size']
size_change = False
try:
zpool = attrs['zpool']
except KeyError:
zpool = self.object['zpool']
if self.vm.is_kvm() and self.img: # always check size if image
if not self.img.resize and size != self.img.size:
self._errors['size'] = s.ErrorList([_('Cannot define disk size other than image size (%s), '
'because image does not support resizing.') % self.img.size])
elif size < self.img.size:
self._errors['size'] = s.ErrorList([_('Cannot define smaller disk size than '
'image size (%s).') % self.img.size])
if self.vm.is_notcreated():
# Check disk_driver in image manifest (bug #chili-605) only if server is not created;
# User should be able to change the driver after server is deployed
img_disk_driver = self.img.json.get('manifest', {}).get('disk_driver', None)
if img_disk_driver:
try:
model = attrs['model']
except KeyError:
model = self.object['model']
if img_disk_driver != model:
self._errors['image'] = s.ErrorList([_('Disk image requires specific disk '
'model (%s).') % img_disk_driver])
if self.vm.is_kvm():
try:
refreservation = attrs['refreservation']
except KeyError: # self.object must exist here (PUT)
try:
refreservation = self.object['refreservation']
except KeyError:
refreservation = attrs['refreservation'] = size
else:
if refreservation > 0:
refreservation = attrs['refreservation'] = size # Override refreservation with new disk size
if refreservation > size:
self._errors['refreservation'] = s.ErrorList([_('Cannot define refreservation larger than disk size.')])
if not self._errors and (size_change or self.zpool_changed) and (self.vm.is_kvm() or self.disk_id == 0):
self.validate_storage_resources(zpool, size)
return attrs
@property
def data(self):
if self._data is None:
data = super(_VmDefineDiskSerializer, self).data
if self.many:
for i, disk in enumerate(data):
disk['disk_id'] = i + 1
else:
data['disk_id'] = self.disk_id
try:
data['disk_id'] += 1
except TypeError:
pass
self._data = data
return self._data
def get_node_storage(self, zpool, node):
if not self.node_storage and node:
self.node_storage = node.get_node_storage(self.vm.dc, zpool)
return self.node_storage
class KVmDefineDiskSerializer(_VmDefineDiskSerializer):
model = s.ChoiceField(choices=Vm.DISK_MODEL, default=settings.VMS_DISK_MODEL_DEFAULT)
image = s.CharField(required=False, default=settings.VMS_DISK_IMAGE_DEFAULT, max_length=64)
refreservation = s.IntegerField(default=0, max_value=268435456, min_value=0) # default set below
# nocreate = s.BooleanField(default=False) # processed in save_disks()
def __init__(self, request, vm, *args, **kwargs):
super(KVmDefineDiskSerializer, self).__init__(request, vm, *args, **kwargs)
dc_settings = vm.dc.settings
self.fields['model'].default = dc_settings.VMS_DISK_MODEL_DEFAULT
self.fields['image'].default = dc_settings.VMS_DISK_IMAGE_DEFAULT
def validate_block_size(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and self.vm.is_deployed() and self.object.get('block_size') != value:
raise s.ValidationError(_('Cannot change block_size.'))
return attrs
class ZVmDefineDiskSerializer(_VmDefineDiskSerializer):
image = s.CharField(required=True, default=settings.VMS_DISK_IMAGE_ZONE_DEFAULT, max_length=64)
def __init__(self, request, vm, *args, **kwargs):
super(ZVmDefineDiskSerializer, self).__init__(request, vm, *args, **kwargs)
if vm.ostype == Vm.LINUX_ZONE:
self.fields['image'].default = vm.dc.settings.VMS_DISK_IMAGE_LX_ZONE_DEFAULT
else:
self.fields['image'].default = vm.dc.settings.VMS_DISK_IMAGE_ZONE_DEFAULT
if self.disk_id > 0:
if not self.object:
self.object = {}
self.object['boot'] = False
self.object['image'] = None
self.object['size'] = vm.json.get('quota', 0) * 1024
self.object['zpool'] = vm.json.get('zpool', Node.ZPOOL)
self.fields['image'].read_only = True
self.fields['size'].read_only = True
self.fields['zpool'].read_only = True
self.fields['boot'].read_only = True
elif self.disk_id is not None:
self.object['boot'] = True
self.fields['boot'].read_only = True
# noinspection PyPep8Naming
def VmDefineDiskSerializer(request, vm, *args, **kwargs):
if vm.is_kvm():
return KVmDefineDiskSerializer(request, vm, *args, **kwargs)
else:
return ZVmDefineDiskSerializer(request, vm, *args, **kwargs)
class VmDefineNicSerializer(s.Serializer):
mac = s.MACAddressField(required=False) # processed in save_nics()
model = s.ChoiceField(choices=Vm.NIC_MODEL, default=settings.VMS_NIC_MODEL_DEFAULT)
net = s.CharField()
ip = s.IPAddressField(required=False) # checked in validate()
netmask = s.IPAddressField(read_only=True)
gateway = s.IPAddressField(read_only=True)
primary = s.BooleanField(default=False)
dns = s.BooleanField(default=False) # Should we set DNS records for this IP?
use_net_dns = s.BooleanField(default=False) # set VM resolvers from Subnet?
allow_dhcp_spoofing = s.BooleanField(default=False)
allow_ip_spoofing = s.BooleanField(default=False)
allow_mac_spoofing = s.BooleanField(default=False)
allow_restricted_traffic = s.BooleanField(default=False)
allow_unfiltered_promisc = s.BooleanField(default=False)
allowed_ips = s.IPAddressArrayField(default=list(), max_items=NIC_ALLOWED_IPS_MAX)
monitoring = s.BooleanField(default=False)
set_gateway = s.BooleanField(default=True)
mtu = s.IntegerField(read_only=True, required=False)
def __init__(self, request, vm, *args, **kwargs):
self.request = request
self.vm = vm
self.dc_settings = dc_settings = vm.dc.settings
self.nic_id = kwargs.pop('nic_id', None)
self.resolvers = vm.resolvers
# List of DNS Record objects, where the content is equal to this NIC's IP address
self._dns = []
# Subnet object currently set in this NIC
self._net = None
# New Subnet object that is going to be replaced by self._net
self._net_old = None
# The self._ip attribute holds the IPAddress object that is currently associated with this NIC
# In case the related network object has dhcp_passthrough=True the value of self._ip will be False.
self._ip = None
# self._ip_old holds the IPAddress object which is currently associated with this NIC, but is going to be
# changed by a new IP (self._ip). The purpose of this attribute is to clean up old DNS and IP relations after
# the VM is updated (save_ip()).
self._ip_old = None
# The self._ips and self._ips_old have the same purpose as self._ip and self._ip_old but in relation to
# the allowed_ips array.
self._ips = ()
self._ips_old = ()
# Helper attribute for self.save_ip()
self._changing_allowed_ips = False
# This attribute is True if vm.monitoring_ip equals to nic['ip']
self._monitoring_old = None
if len(args) > 0: # GET, PUT
# rewrite nic data
if isinstance(args[0], list):
data = map(self.fix_before, args[0])
else:
data = self.fix_before(args[0])
super(VmDefineNicSerializer, self).__init__(data, *args[1:], **kwargs)
else: # POST
super(VmDefineNicSerializer, self).__init__(*args, **kwargs)
# By default set DNS for the first NIC
if self.nic_id == 0:
self.fields['dns'].default = True
self.fields['primary'].default = True
# By default enable monitoring for this NIC according to VMS_NIC_MONITORING_DEFAULT
if self.nic_id == dc_settings.VMS_NIC_MONITORING_DEFAULT - 1:
self.fields['monitoring'].default = True
# Set defaults from template
if self.nic_id is not None and vm.template:
for field, value in vm.template.get_vm_define_nic(self.nic_id).items():
try:
self.fields[field].default = value
except KeyError:
pass
if vm.is_kvm():
self.fields['model'].default = dc_settings.VMS_NIC_MODEL_DEFAULT
else:
del self.fields['model']
def fix_before(self, data): # noqa: R701
"""
Rewrite nic data from json to serializer compatible object.
"""
# fetch Subnet object
if data.get('network_uuid', None):
try:
self._net = Subnet.objects.get(uuid=data['network_uuid'])
data['net'] = self._net.name
except Subnet.DoesNotExist:
raise APIError(detail='Unknown net in NIC definition.')
else:
del data['network_uuid']
else:
data['net'] = None
# default vlan ID is 0
if 'vlan_id' not in data:
data['vlan_id'] = 0
# default MTU is None
if 'mtu' not in data:
data['mtu'] = None
# primary does not exist in json if False
if 'primary' not in data:
data['primary'] = False
ip = data.get('ip', None)
# fetch IPAddress object
if ip:
try:
if self._net and self._net.dhcp_passthrough and ip == 'dhcp':
# No netmask/gateway in json, only ip with 'dhcp' value
data['ip'] = ip = None # ip=None means that monitoring (below) will be False
data['netmask'] = None
data['gateway'] = None
self._ip = False
else:
self._ip = IPAddress.objects.get(ip=ip, subnet=self._net)
except IPAddress.DoesNotExist:
raise APIError(detail='Unknown ip in NIC definition.')
allowed_ips = data.get('allowed_ips', None)
if allowed_ips is not None:
self._ips = IPAddress.objects.filter(ip__in=allowed_ips, subnet=self._net)
data['allowed_ips'] = list(set(allowed_ips))
# dns is True if a valid DNS A record exists and points this NICs IP
data['dns'] = False
if ip and self.vm.hostname_is_valid_fqdn(): # will return False if DNS_ENABLED is False
dns = RecordView.Record.get_records_A(self.vm.hostname, self.vm.fqdn_domain)
if dns:
for record in dns:
if record.content == ip:
self._dns.append(record)
data['dns'] = True
if self._net and self._net.get_resolvers() == self.vm.resolvers:
data['use_net_dns'] = True
else:
data['use_net_dns'] = False
# monitoring is true if vm.monitoring_ip equals to nic['ip']
self._monitoring_old = self.vm.monitoring_ip == ip
if self._monitoring_old:
data['monitoring'] = True
else:
data['monitoring'] = False
# set_gateway is True if gateway is set
data['set_gateway'] = bool(data.get('gateway', None))
return data
@property
def jsondata(self):
"""
Rewrite validated nic data from user to json usable data.
"""
data = dict(self.object)
if 'net' in data:
subnet = data.pop('net')
if subnet: # got valid subnet, let's replace it with network_uuid
data['network_uuid'] = str(self._net.uuid)
# Remove dummy attributes
data.pop('dns', None)
data.pop('use_net_dns', None)
data.pop('monitoring', None)
data.pop('set_gateway', None)
if not data.get('ip') and self._net.dhcp_passthrough:
data['ip'] = 'dhcp'
data.pop('netmask', None)
data.pop('gateway', None)
return data
def detail_dict(self, **kwargs):
ret = super(VmDefineNicSerializer, self).detail_dict(**kwargs)
ret.pop('nic_id', None) # nic_id is added in the view
# When changing net or ip (PUT), the IP address may not be in the detail dict
if self._net_old or self._ip_old is not None:
ret['ip'] = self.object.get('ip', None)
ret['netmask'] = self.object.get('netmask', None)
ret['gateway'] = self.object.get('gateway', None)
ret['allowed_ips'] = self.object.get('allowed_ips', [])
return ret
def validate_mac(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and self.vm.is_deployed():
if not value or self.object.get('mac', None) != value:
raise s.ValidationError(_('Cannot change MAC address.'))
return attrs
def validate_set_gateway(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if self.object and self.vm.is_deployed() and self.object.get('set_gateway', None) != value:
raise s.ValidationError(_('Cannot change gateway.'))
return attrs
def _validate_insecure_boolean_attr(self, attrs, source):
try:
value = attrs[source]
except KeyError:
return attrs
if not self.request.user.is_staff and value:
raise s.ValidationError(PERMISSION_DENIED)
return attrs
def validate_allow_dhcp_spoofing(self, attrs, source):
return self._validate_insecure_boolean_attr(attrs, source) # Only SuperAdmin can enable this option
def validate_allow_ip_spoofing(self, attrs, source):
return self._validate_insecure_boolean_attr(attrs, source) # Only SuperAdmin can enable this option
def validate_allow_mac_spoofing(self, attrs, source):
return self._validate_insecure_boolean_attr(attrs, source) # Only SuperAdmin can enable this option
def validate_allow_restricted_traffic(self, attrs, source):
return self._validate_insecure_boolean_attr(attrs, source) # Only SuperAdmin can enable this option
def validate_allow_unfiltered_promisc(self, attrs, source):
return self._validate_insecure_boolean_attr(attrs, source) # Only SuperAdmin can enable this option
def validate_primary(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if value is True and self.nic_id is not None:
other_nics = self.vm.json_get_nics()
if other_nics:
try:
del other_nics[self.nic_id]
except IndexError:
pass
for n in other_nics:
if n.get('primary', False) is True:
raise s.ValidationError(_('Cannot enable primary flag on multiple NICs.'))
return attrs
def validate_net(self, attrs, source):
try:
value = attrs[source]
except KeyError:
pass
else:
if value:
if self.object and self._net and self._net.name == value:
return attrs # Input net name is the same as in DB
try:
_net = get_subnets(self.request).get(name=value)
except Subnet.DoesNotExist:
raise s.ObjectDoesNotExist(value)
else:
if _net.access in Subnet.UNUSABLE:
raise s.ObjectDoesNotExist(value)
if self.vm.node: # New network and node is defined - check nic tags - bug #chili-593
validate_nic_tags(self.vm, new_net=_net)
if self.object and self._net != _net: # changing net is tricky, see validate() below
self._net_old = self._net
# If a MTU is set on an existing NIC then it cannot be removed
# An overlay nic_tag cannot be set on an existing NIC
if (self.object.get('mtu', None) and _net.mtu is None) or _net.vxlan_id:
raise s.ValidationError(_('This field cannot be changed because some inherited NIC '
'attributes (MTU, nic_tag) cannot be updated. '
'Please remove the NIC and add a new NIC.'))
self._net = _net
return attrs
def _check_ip_usage(self, ipaddress, allowed_ips=False):
"""Returns an error message if IP address is used by some VM"""
ip = ipaddress.ip
if ipaddress.usage == IPAddress.VM_REAL and ipaddress.vm == self.vm: # Trying to re-use our lost IP?
if ipaddress.ip in self.vm.json_get_ips(): # check if selected address is not on another interface
return _('Object with name=%s is already used.') % ip
else:
if ipaddress.vm is not None: # check if selected address is free in this subnet
return _('Object with name=%s is already used as default address.') % ip
if allowed_ips:
if ipaddress.usage not in (IPAddress.VM, IPAddress.VM_REAL): # check if selected IP can be used for VM
return _('Object with name=%s is not available.') % ip
for other_vm in ipaddress.vms.exclude(uuid=self.vm.uuid):
if other_vm.dc != self.vm.dc:
return _('Object with name=%s is already used as additional address in '
'another virtual datacenter.') % ip
else:
if ipaddress.usage != IPAddress.VM: # check if selected address can be used for virtual servers
return _('Object with name=%s is not available.') % ip
if ipaddress.vms.exists(): # IP address is already used as allowed_ips
return _('Object with name=%s is already used as additional address.') % ip
return None
def validate(self, attrs): # noqa: R701
net = self._net
assert net
# first fetch the IPAddress object
if 'ip' in attrs and attrs['ip']: # ip specified
ip = attrs['ip']
if self.object and not self._net_old and self._ip and self._ip.ip == ip:
pass # Input IP is the same as in DB
else:
try: # check if selected address exists in subnet
_ip = IPAddress.objects.get(ip=ip, subnet=net)
except IPAddress.DoesNotExist:
self._errors['ip'] = s.ObjectDoesNotExist(ip).messages
return attrs
else:
error = self._check_ip_usage(_ip)
if error:
self._errors['ip'] = s.ErrorList([error])
return attrs
if self._ip and self._ip != _ip: # changing ip is tricky
self._ip_old = self._ip
self._ip = _ip
attrs['ip'] = self._ip.ip # normalize IP address
else:
# changing net + ip not specified || empty ip specified (finding new ip below)
if self._net_old or not attrs.get('ip', True):
self._ip_old = self._ip
self._ip = None
allowed_ips = list(set(attrs.get('allowed_ips', [])))
if allowed_ips:
_ips = IPAddress.objects.filter(ip__in=allowed_ips, subnet=net)
if self.object and not self._net_old and self._ips and self._ips == _ips:
pass # Input allowed_ips are the same as in DB
else:
ip_list = _ips.values_list('ip', flat=True)
if len(ip_list) != len(allowed_ips):
self._errors['allowed_ips'] = s.ErrorList(
[_('Object with name=%s does not exist.') % i for i in allowed_ips if i not in ip_list]
)
return attrs
if self._ip and self._ip.ip in allowed_ips:
self._errors['allowed_ips'] = s.ErrorList(
[_('The default IP address must not be among allowed_ips.')]
)
return attrs
errors = [err for err in (self._check_ip_usage(ipaddress, allowed_ips=True) for ipaddress in _ips)
if err is not None]
if errors:
self._errors['allowed_ips'] = s.ErrorList(errors)
return attrs
if self._ips and self._ips != _ips: # changing allowed_ips is tricky
# noinspection PyUnresolvedReferences
self._ips_old = self._ips.exclude(ip__in=ip_list)
self._ips = _ips
self._changing_allowed_ips = True
attrs['allowed_ips'] = list(set(ip_list))
else:
# changing net + allowed_ips not specified, but already set on nic (with old net)
# or settings empty allowed_ips (=> user wants to remove allowed_ips)
if self._ips and (self._net_old or 'allowed_ips' in attrs):
attrs['allowed_ips'] = list()
self._ips_old = self._ips
self._ips = ()
self._changing_allowed_ips = True
if net.dhcp_passthrough:
# no dns and monitoring for this NIC
try:
dns = attrs['dns']
except KeyError:
dns = self.object['dns']
try:
monitoring = attrs['monitoring']
except KeyError:
monitoring = self.object['monitoring']
if dns or monitoring:
if dns:
self._errors['dns'] = s.ErrorList([_('Cannot enable DNS for externally managed network.')])
if monitoring:
self._errors['monitoring'] = s.ErrorList([_('Cannot enable monitoring for externally '
'managed network.')])
return attrs
# try to get free ip address for this subnet
if not self._ip:
if net.dhcp_passthrough:
# no IP for this NIC
self._ip = False
attrs['ip'] = None
attrs['netmask'] = None
attrs['gateway'] = None
else:
try:
self._ip = IPAddress.objects.filter(subnet=net, vm__isnull=True, vms=None, usage=IPAddress.VM)\
.exclude(ip__in=allowed_ips).order_by('?')[0:1].get()
except IPAddress.DoesNotExist:
raise s.ValidationError(_('Cannot find free IP address for net %s.') % net.name)
else:
logger.info('IP address %s for NIC ID %s on VM %s was chosen automatically',
self._ip, self.nic_id, self.vm)
attrs['ip'] = self._ip.ip # set ip
if self._ip is not False:
assert self._ip and attrs.get('ip', True)
# other attributes cannot be specified (they need to be inherited from net)
attrs['netmask'] = net.netmask
attrs['gateway'] = net.gateway
# get set_gateway from new or existing NIC object
try:
set_gateway = attrs['set_gateway']
except KeyError:
set_gateway = self.object['set_gateway']
if not set_gateway:
# Set gateway to None even if the NIC must not have any gateway set (see Vm._NICS_REMOVE_EMPTY)
attrs['gateway'] = None
# These attributes cannot be specified (they need to be inherited from net)
attrs['vlan_id'] = net.vlan_id
if net.vxlan_id:
attrs['nic_tag'] = '%s/%s' % (net.nic_tag, net.vxlan_id)
else:
attrs['nic_tag'] = net.nic_tag
attrs['mtu'] = net.mtu
if 'use_net_dns' in attrs:
if attrs['use_net_dns']:
self.resolvers = net.get_resolvers()
elif self.object:
self.resolvers = self.dc_settings.VMS_VM_RESOLVERS_DEFAULT
return attrs
@staticmethod
@catch_api_exception
def save_a(request, task_id, vm, ip, dns=(), delete=False):
if not vm.dc.settings.DNS_ENABLED:
logger.info('DNS support disabled: skipping DNS A record saving for vm %s', vm)
return None
# Find domain and check if the domain is legit for creating A records
if not vm.hostname_is_valid_fqdn():
logger.warn('Valid domain for vm %s not found. Could not %s DNS A record.',
vm, 'delete' if delete else 'add')
return None
record_cls = RecordView.Record
ip = str(ip.ip)
domain = vm.fqdn_domain
logger.info('%s DNS A record for vm %s, domain %s, name %s.',
'Deleting' if delete else 'Adding/Updating', vm, domain, ip)
if not dns:
dns = record_cls.get_records_A(vm.hostname, domain)
if delete:
method = 'DELETE'
data = {}
else:
records_exist = [record.content == ip for record in dns]
if records_exist and all(records_exist):
logger.info('DNS A record for vm %s, domain %s, name %s already exists.', vm, domain, ip)
return True
if len(dns):
method = 'PUT'
data = {'content': ip}
else:
method = 'POST'
dns = (record_cls(domain=RecordView.internal_domain_get(domain, task_id=task_id)),)
data = {
'type': record_cls.A,
'name': vm.hostname.lower(),
'domain': domain,
'content': ip,
}
for record in dns:
RecordView.internal_response(request, method, record, data, task_id=task_id, related_obj=vm)
return True
@staticmethod
@catch_api_exception
def save_ptr(request, task_id, vm, ip, net, delete=False, content=None):
dc_settings = vm.dc.settings
if not dc_settings.DNS_ENABLED:
logger.info('DNS support disabled: skipping DNS PTR record saving for vm %s', vm)
return None
record_cls = RecordView.Record
ipaddr = str(ip.ip)
ptr = record_cls.get_record_PTR(ipaddr)
logger.info('%s DNS PTR record for vm %s, domain %s, name %s.',
'Deleting' if delete else 'Adding', vm, net.ptr_domain, ipaddr)
def default_ptr(server, ip_address):
placeholders = {
'hostname': server.hostname,
'alias': server.alias,
'ipaddr': ip_address.replace('.', '-'),
}
try:
return dc_settings.DNS_PTR_DEFAULT.format(**placeholders)
except (KeyError, ValueError, TypeError) as e:
logger.error('Could not convert DNS_PTR_DEFAULT (%s) for IP %s of VM %s. Error was: %s',
dc_settings.DNS_PTR_DEFAULT, ip_address, server, e)
return 'ptr-{ipaddr}.example.com'.format(**placeholders)
if ptr:
if delete:
method = 'DELETE'
data = {}
else:
method = 'PUT'
data = {'content': content or default_ptr(vm, ipaddr)}
else:
if delete:
return None
else:
ptr = record_cls(domain=RecordView.internal_domain_get(net.ptr_domain, task_id=task_id))
method = 'POST'
data = {
'type': record_cls.PTR,
'domain': net.ptr_domain,
'name': record_cls.get_reverse(ipaddr),
'content': content or default_ptr(vm, ipaddr),
}
return RecordView.internal_response(request, method, ptr, data, task_id=task_id, related_obj=vm)
@staticmethod
def _remove_vm_ip_association(vm, ip, many=False):
logger.info('Removing association of IP %s with vm %s.', ip, vm)
if ip.usage == IPAddress.VM_REAL and vm.is_deployed(): # IP is set on hypervisor
logger.info(' ^ Removal of association of IP %s with vm %s will be delayed until PUT vm_manage is done.',
ip, vm)
else: # DB only operation
if many:
ip.vms.remove(vm)
else:
ip.vm = None
ip.save()
@staticmethod
def _create_vm_ip_association(vm, ip, many=False):
logger.info('Creating association of IP %s with vm %s.', ip, vm)
if ip.vm:
raise APIError(detail='Unexpected problem with IP address association.')
if many:
ip.vms.add(vm)
else:
ip.vm = vm
ip.save()
@classmethod
def _update_vm_ip_association(cls, vm, ip, delete=False, many=False):
if delete:
cls._remove_vm_ip_association(vm, ip, many=many)
else:
cls._create_vm_ip_association(vm, ip, many=many)
def save_ip(self, task_id, delete=False, update=False): # noqa: R701
vm = self.vm
ip = self._ip
ip_old = self._ip_old
if ip is False: # means that the new IP uses a network with dhcp_passthrough
assert self._net.dhcp_passthrough
else:
assert ip
if not update or ip_old:
if ip_old:
self._remove_vm_ip_association(vm, ip_old)
if ip:
self._update_vm_ip_association(vm, ip, delete=delete)
# Delete PTR Record for old IP
if ip_old and ip_old.subnet.ptr_domain:
self.save_ptr(self.request, task_id, vm, ip_old, ip_old.subnet, delete=True) # fails silently
# Create PTR Record only if a PTR domain is defined
if ip and self._net and self._net.ptr_domain:
self.save_ptr(self.request, task_id, vm, ip, self._net, delete=delete) # fails silently
if self._changing_allowed_ips:
for _ip_old in self._ips_old:
self._remove_vm_ip_association(vm, _ip_old, many=True)
for _ip in self._ips:
self._update_vm_ip_association(vm, _ip, delete=delete, many=True)
# Create DNS A Record if dns setting is True
# or Remove dns if dns settings was originally True, but now is set to False
dns = self.object['dns']
remove_dns = self._dns and not dns
if dns or remove_dns:
if remove_dns:
delete = True
if delete and ip_old:
# The dns should be removed for the old ip
ip = ip_old
if ip:
self.save_a(self.request, task_id, vm, ip, dns=self._dns, delete=delete)
return ip
def update_ip(self, task_id):
return self.save_ip(task_id, update=True)
def delete_ip(self, task_id):
return self.save_ip(task_id, delete=True)
@property
def data(self):
if self._data is None:
data = super(VmDefineNicSerializer, self).data
if self.many:
for i, nic in enumerate(data):
nic['nic_id'] = i + 1
else:
data['nic_id'] = self.nic_id
try:
data['nic_id'] += 1
except TypeError:
pass
self._data = data
return self._data
def get_monitoring_ip(self, delete=False):
# Return ip if monitoring is True,
# empty string if monitoring was true, but now is set to False or delete was requested,
# or None if monitoring_ip should stay unchanged
monitoring = self.object['monitoring']
ip = self.object['ip']
if self._ip is False:
assert self._net.dhcp_passthrough
assert not monitoring
else:
assert self._ip
if self._monitoring_old and (delete or not monitoring):
logger.info('Removing monitoring IP %s for vm %s.', ip, self.vm)
return ''
elif monitoring:
logger.info('Saving monitoring IP %s for vm %s.', ip, self.vm)
return ip
else:
return None
| 40.513623
| 120
| 0.578571
|
1e88e60435ba0c35ebb7a1b8af37427bf08c7881
| 1,456
|
py
|
Python
|
sdk/python/feast/constants.py
|
amommendes/feast
|
2435777d87c19d4eee670a7ccf5e73e88e22ce9d
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/constants.py
|
amommendes/feast
|
2435777d87c19d4eee670a7ccf5e73e88e22ce9d
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/constants.py
|
amommendes/feast
|
2435777d87c19d4eee670a7ccf5e73e88e22ce9d
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Maximum interval(secs) to wait between retries for retry function
MAX_WAIT_INTERVAL: str = "60"
AWS_LAMBDA_FEATURE_SERVER_IMAGE = "feastdev/feature-server"
AWS_LAMBDA_FEATURE_SERVER_REPOSITORY = "feast-python-server"
# feature_store.yaml environment variable name for remote feature server
FEATURE_STORE_YAML_ENV_NAME: str = "FEATURE_STORE_YAML_BASE64"
# Environment variable for registry
REGISTRY_ENV_NAME: str = "REGISTRY_BASE64"
# Environment variable for toggling usage
FEAST_USAGE = "FEAST_USAGE"
# Environment variable for the path for overwriting universal test configs
FULL_REPO_CONFIGS_MODULE_ENV_NAME: str = "FULL_REPO_CONFIGS_MODULE"
# Environment variable for overwriting FTS port
FEATURE_TRANSFORMATION_SERVER_PORT_ENV_NAME: str = "FEATURE_TRANSFORMATION_SERVER_PORT"
# Default FTS port
DEFAULT_FEATURE_TRANSFORMATION_SERVER_PORT = 6569
| 36.4
| 87
| 0.802885
|
7579ab704a79440ed3ac0241f66d6c5cf221a0d4
| 32,212
|
py
|
Python
|
libcloud/test/storage/test_atmos.py
|
zimventures/libcloud
|
be0765df384f1baccde24539156119856cb96816
|
[
"Apache-2.0"
] | 4
|
2017-11-14T17:24:12.000Z
|
2020-10-30T01:46:02.000Z
|
libcloud/test/storage/test_atmos.py
|
zimventures/libcloud
|
be0765df384f1baccde24539156119856cb96816
|
[
"Apache-2.0"
] | 11
|
2017-01-29T08:59:21.000Z
|
2018-07-02T09:17:47.000Z
|
libcloud/test/storage/test_atmos.py
|
zimventures/libcloud
|
be0765df384f1baccde24539156119856cb96816
|
[
"Apache-2.0"
] | 4
|
2016-04-04T08:01:48.000Z
|
2018-06-06T08:04:36.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os.path
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import b
import libcloud.utils.files
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerAlreadyExistsError, \
ContainerDoesNotExistError, \
ContainerIsNotEmptyError, \
ObjectDoesNotExistError
from libcloud.storage.drivers.atmos import AtmosConnection, AtmosDriver
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import MockHttp, generate_random_data, make_response
from libcloud.test.file_fixtures import StorageFileFixtures
class AtmosTests(unittest.TestCase):
def setUp(self):
AtmosDriver.connectionCls.conn_class = AtmosMockHttp
AtmosDriver.path = ''
AtmosMockHttp.type = None
AtmosMockHttp.upload_created = False
self.driver = AtmosDriver('dummy', base64.b64encode(b('dummy')))
self._remove_test_file()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_list_containers(self):
AtmosMockHttp.type = 'EMPTY'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
AtmosMockHttp.type = None
containers = self.driver.list_containers()
self.assertEqual(len(containers), 6)
def test_list_container_objects(self):
container = Container(name='test_container', extra={},
driver=self.driver)
AtmosMockHttp.type = 'EMPTY'
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
AtmosMockHttp.type = None
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = [o for o in objects if o.name == 'not-a-container1'][0]
self.assertEqual(obj.meta_data['object_id'],
'651eae32634bf84529c74eabd555fda48c7cead6')
self.assertEqual(obj.container.name, 'test_container')
def test_get_container(self):
container = self.driver.get_container(container_name='test_container')
self.assertEqual(container.name, 'test_container')
self.assertEqual(container.extra['object_id'],
'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9')
def test_get_container_escaped(self):
container = self.driver.get_container(
container_name='test & container')
self.assertEqual(container.name, 'test & container')
self.assertEqual(container.extra['object_id'],
'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9')
def test_get_container_not_found(self):
try:
self.driver.get_container(container_name='not_found')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_success(self):
container = self.driver.create_container(
container_name='test_create_container')
self.assertTrue(isinstance(container, Container))
self.assertEqual(container.name, 'test_create_container')
self.assertEqual(container.extra['object_id'],
'31a27b593629a3fe59f887fd973fd953e80062ce')
def test_create_container_already_exists(self):
AtmosMockHttp.type = 'ALREADY_EXISTS'
try:
self.driver.create_container(
container_name='test_create_container')
except ContainerAlreadyExistsError:
pass
else:
self.fail(
'Container already exists but an exception was not thrown')
def test_delete_container_success(self):
container = Container(name='foo_bar_container', extra={}, driver=self)
result = self.driver.delete_container(container=container)
self.assertTrue(result)
def test_delete_container_not_found(self):
AtmosMockHttp.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={}, driver=self)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail(
'Container does not exist but an exception was not thrown')
def test_delete_container_not_empty(self):
AtmosMockHttp.type = 'NOT_EMPTY'
container = Container(name='foo_bar_container', extra={}, driver=self)
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Container is not empty but an exception was not thrown')
def test_get_object_success(self):
obj = self.driver.get_object(container_name='test_container',
object_name='test_object')
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(obj.size, 555)
self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17')
self.assertEqual(obj.extra['object_id'],
'322dce3763aadc41acc55ef47867b8d74e45c31d6643')
self.assertEqual(
obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT')
self.assertEqual(obj.meta_data['foo-bar'], 'test 1')
self.assertEqual(obj.meta_data['bar-foo'], 'test 2')
def test_get_object_escaped(self):
obj = self.driver.get_object(container_name='test & container',
object_name='test & object')
self.assertEqual(obj.container.name, 'test & container')
self.assertEqual(obj.size, 555)
self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17')
self.assertEqual(obj.extra['object_id'],
'322dce3763aadc41acc55ef47867b8d74e45c31d6643')
self.assertEqual(
obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT')
self.assertEqual(obj.meta_data['foo-bar'], 'test 1')
self.assertEqual(obj.meta_data['bar-foo'], 'test 2')
def test_get_object_not_found(self):
try:
self.driver.get_object(container_name='test_container',
object_name='not_found')
except ObjectDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_object_success(self):
AtmosMockHttp.type = 'DELETE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
status = self.driver.delete_object(obj=obj)
self.assertTrue(status)
def test_delete_object_escaped_success(self):
AtmosMockHttp.type = 'DELETE'
container = Container(name='foo & bar_container', extra={},
driver=self.driver)
obj = Object(name='foo & bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
status = self.driver.delete_object(obj=obj)
self.assertTrue(status)
def test_delete_object_not_found(self):
AtmosMockHttp.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
try:
self.driver.delete_object(obj=obj)
except ObjectDoesNotExistError:
pass
else:
self.fail('Object does not exist but an exception was not thrown')
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_escaped_success(self):
container = Container(name='foo & bar_container', extra={},
driver=self.driver)
obj = Object(name='foo & bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_success_not_found(self):
AtmosMockHttp.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container,
meta_data=None,
driver=self.driver)
destination_path = os.path.abspath(__file__) + '.temp'
try:
self.driver.download_object(
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except ObjectDoesNotExistError:
pass
else:
self.fail('Object does not exist but an exception was not thrown')
def test_download_object_as_stream(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
stream = self.driver.download_object_as_stream(
obj=obj, chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_download_object_as_stream_escaped(self):
container = Container(name='foo & bar_container', extra={},
driver=self.driver)
obj = Object(name='foo & bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
stream = self.driver.download_object_as_stream(
obj=obj, chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_success(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200, headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
'bytes_transferred': 1000,
'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
old_func = AtmosDriver._upload_object
AtmosDriver._upload_object = upload_file
path = os.path.abspath(__file__)
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=path, container=container,
extra=extra, object_name=object_name)
self.assertEqual(obj.name, 'ftu')
self.assertEqual(obj.size, 1000)
self.assertTrue('some-value' in obj.meta_data)
AtmosDriver._upload_object = old_func
def test_upload_object_no_content_type(self):
def no_content_type(name):
return None, None
old_func = libcloud.utils.files.guess_file_mime_type
libcloud.utils.files.guess_file_mime_type = no_content_type
file_path = os.path.abspath(__file__)
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name)
# Just check that the file was uploaded OK, as the fallback
# Content-Type header should be set (application/octet-stream).
self.assertEqual(obj.name, object_name)
libcloud.utils.files.guess_file_mime_type = old_func
def test_upload_object_error(self):
def dummy_content_type(name):
return 'application/zip', None
def send(self, method, **kwargs):
raise LibcloudError('')
old_func1 = libcloud.utils.files.guess_file_mime_type
libcloud.utils.files.guess_file_mime_type = dummy_content_type
old_func2 = AtmosMockHttp.request
AtmosMockHttp.request = send
file_path = os.path.abspath(__file__)
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name)
except LibcloudError:
pass
else:
self.fail(
'Timeout while uploading but an exception was not thrown')
finally:
libcloud.utils.files.guess_file_mime_type = old_func1
AtmosMockHttp.request = old_func2
def test_upload_object_nonexistent_file(self):
def dummy_content_type(name):
return 'application/zip', None
old_func = libcloud.utils.files.guess_file_mime_type
libcloud.utils.files.guess_file_mime_type = dummy_content_type
file_path = os.path.abspath(__file__ + '.inexistent')
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name)
except OSError:
pass
else:
self.fail('Inesitent but an exception was not thrown')
finally:
libcloud.utils.files.guess_file_mime_type = old_func
def test_upload_object_via_stream_new_object(self):
def dummy_content_type(name):
return 'application/zip', None
old_func = libcloud.storage.drivers.atmos.guess_file_mime_type
libcloud.storage.drivers.atmos.guess_file_mime_type = dummy_content_type
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftsdn'
iterator = DummyIterator(data=['2', '3', '5'])
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator)
finally:
libcloud.storage.drivers.atmos.guess_file_mime_type = old_func
def test_upload_object_via_stream_existing_object(self):
def dummy_content_type(name):
return 'application/zip', None
old_func = libcloud.storage.drivers.atmos.guess_file_mime_type
libcloud.storage.drivers.atmos.guess_file_mime_type = dummy_content_type
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftsde'
iterator = DummyIterator(data=['2', '3', '5'])
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator)
finally:
libcloud.storage.drivers.atmos.guess_file_mime_type = old_func
def test_upload_object_via_stream_no_content_type(self):
def no_content_type(name):
return None, None
old_func = libcloud.storage.drivers.atmos.guess_file_mime_type
libcloud.storage.drivers.atmos.guess_file_mime_type = no_content_type
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftsdct'
iterator = DummyIterator(data=['2', '3', '5'])
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator)
except AttributeError:
pass
else:
self.fail(
'File content type not provided'
' but an exception was not thrown')
finally:
libcloud.storage.drivers.atmos.guess_file_mime_type = old_func
def test_signature_algorithm(self):
test_uid = 'fredsmagicuid'
test_key = base64.b64encode(b('ssssshhhhhmysecretkey'))
test_date = 'Mon, 04 Jul 2011 07:39:19 GMT'
test_values = [
('GET', '/rest/namespace/foo', '', {},
'WfSASIA25TuqO2n0aO9k/dtg6S0='),
('GET', '/rest/namespace/foo%20%26%20bar', '', {},
'vmlqXqcInxxoP4YX5mR09BonjX4='),
('POST', '/rest/namespace/foo', '', {},
'oYKdsF+1DOuUT7iX5CJCDym2EQk='),
('PUT', '/rest/namespace/foo', '', {},
'JleF9dpSWhaT3B2swZI3s41qqs4='),
('DELETE', '/rest/namespace/foo', '', {},
'2IX+Bd5XZF5YY+g4P59qXV1uLpo='),
('GET', '/rest/namespace/foo?metata/system', '', {},
'zuHDEAgKM1winGnWn3WBsqnz4ks='),
('POST', '/rest/namespace/foo?metadata/user', '', {
'x-emc-meta': 'fakemeta=fake, othermeta=faketoo'
}, '7sLx1nxPIRAtocfv02jz9h1BjbU='),
]
class FakeDriver(object):
path = ''
for method, action, api_path, headers, expected in test_values:
c = AtmosConnection(test_uid, test_key)
c.method = method
c.action = action
d = FakeDriver()
d.path = api_path
c.driver = d
headers = c.add_default_headers(headers)
headers['Date'] = headers['x-emc-date'] = test_date
self.assertEqual(c._calculate_signature({}, headers),
b(expected).decode('utf-8'))
class AtmosMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('atmos')
upload_created = False
upload_stream_created = False
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self)
if kwargs.get('host', None) and kwargs.get('port', None):
MockHttp.__init__(self, *args, **kwargs)
self._upload_object_via_stream_first_request = True
def runTest(self):
pass
def request(self, method, url, body=None, headers=None, raw=False,
stream=False):
headers = headers or {}
parsed = urlparse.urlparse(url)
if parsed.query.startswith('metadata/'):
parsed = list(parsed)
parsed[2] = parsed[2] + '/' + parsed[4]
parsed[4] = ''
url = urlparse.urlunparse(parsed)
return super(AtmosMockHttp, self).request(method, url, body, headers,
raw)
def _rest_namespace_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('empty_directory_listing.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_test_container_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('empty_directory_listing.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_test_container(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_test_container__metadata_system(
self, method, url, body,
headers):
headers = {
'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9'
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_20_26_20container__metadata_system(
self, method, url, body,
headers):
headers = {
'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9'
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_not_found__metadata_system(self, method, url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_test_create_container(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_test_create_container__metadata_system(self, method,
url, body,
headers):
headers = {
'x-emc-meta': 'objectid=31a27b593629a3fe59f887fd973fd953e80062ce'
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_create_container_ALREADY_EXISTS(self, method, url,
body, headers):
body = self.fixtures.load('already_exists.xml')
return (httplib.BAD_REQUEST, body, {},
httplib.responses[httplib.BAD_REQUEST])
def _rest_namespace_foo_bar_container(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_bar_container_NOT_FOUND(self, method, url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_foo_bar_container_NOT_EMPTY(self, method, url, body,
headers):
body = self.fixtures.load('not_empty.xml')
return (httplib.BAD_REQUEST, body, {},
httplib.responses[httplib.BAD_REQUEST])
def _rest_namespace_test_container_test_object_metadata_system(
self, method,
url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_20_26_20container_test_20_26_20object_metadata_system(
self, method,
url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_container_test_object_metadata_user(self, method,
url, body,
headers):
meta = {
'md5': '6b21c4a111ac178feacf9ec9d0c71f17',
'foo-bar': 'test 1',
'bar-foo': 'test 2',
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_20_26_20container_test_20_26_20object_metadata_user(
self, method,
url, body,
headers):
meta = {
'md5': '6b21c4a111ac178feacf9ec9d0c71f17',
'foo-bar': 'test 1',
'bar-foo': 'test 2',
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_container_not_found_metadata_system(self, method,
url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_foo_bar_container_foo_bar_object_DELETE(self, method, url,
body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object_DELETE(
self, method, url,
body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND(
self, method,
url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_fbc_ftu_metadata_system(self, method, url, body,
headers):
if not self.upload_created:
self.__class__.upload_created = True
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
self.__class__.upload_created = False
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftu_metadata_user(self, method, url, body, headers):
self.assertTrue('x-emc-meta' in headers)
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsdn_metadata_system(self, method, url, body,
headers):
if not self.upload_stream_created:
self.__class__.upload_stream_created = True
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
self.__class__.upload_stream_created = False
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsdn(self, method, url, body, headers):
if self._upload_object_via_stream_first_request:
self.assertTrue('Range' not in headers)
self.assertEqual(method, 'POST')
self._upload_object_via_stream_first_request = False
else:
self.assertTrue('Range' in headers)
self.assertEqual(method, 'PUT')
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsdn_metadata_user(self, method, url, body,
headers):
self.assertTrue('x-emc-meta' in headers)
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsde_metadata_system(self, method, url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsde(self, method, url, body, headers):
if self._upload_object_via_stream_first_request:
self.assertTrue('Range' not in headers)
self._upload_object_via_stream_first_request = False
else:
self.assertTrue('Range' in headers)
self.assertEqual(method, 'PUT')
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsde_metadata_user(self, method, url, body,
headers):
self.assertTrue('x-emc-meta' in headers)
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsd_metadata_system(self, method, url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url,
body, headers):
body = generate_random_data(1000)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object(
self, method, url,
body, headers):
body = generate_random_data(1000)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftu(self, method, url, body, headers):
return (httplib.CREATED, '', {}, httplib.responses[httplib.CREATED])
if __name__ == '__main__':
sys.exit(unittest.main())
| 42.052219
| 105
| 0.595244
|
d861b5599b3caecde75e21230d3901cb3459539e
| 3,956
|
py
|
Python
|
tests/shell/tests.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 7
|
2015-09-08T22:23:36.000Z
|
2022-03-08T09:24:40.000Z
|
tests/shell/tests.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8
|
2017-04-19T16:20:47.000Z
|
2022-03-28T14:40:11.000Z
|
tests/shell/tests.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3
|
2020-07-13T04:49:16.000Z
|
2021-12-22T21:15:14.000Z
|
import sys
import unittest
from unittest import mock
from django import __version__
from django.core.management import CommandError, call_command
from django.test import SimpleTestCase
from django.test.utils import captured_stdin, captured_stdout
class ShellCommandTestCase(SimpleTestCase):
script_globals = 'print("__name__" in globals())'
script_with_inline_function = (
'import django\n'
'def f():\n'
' print(django.__version__)\n'
'f()'
)
def test_command_option(self):
with self.assertLogs('test', 'INFO') as cm:
call_command(
'shell',
command=(
'import django; from logging import getLogger; '
'getLogger("test").info(django.__version__)'
),
)
self.assertEqual(cm.records[0].getMessage(), __version__)
def test_command_option_globals(self):
with captured_stdout() as stdout:
call_command('shell', command=self.script_globals)
self.assertEqual(stdout.getvalue().strip(), 'True')
def test_command_option_inline_function_call(self):
with captured_stdout() as stdout:
call_command('shell', command=self.script_with_inline_function)
self.assertEqual(stdout.getvalue().strip(), __version__)
@unittest.skipIf(sys.platform == 'win32', "Windows select() doesn't support file descriptors.")
@mock.patch('django.core.management.commands.shell.select')
def test_stdin_read(self, select):
with captured_stdin() as stdin, captured_stdout() as stdout:
stdin.write('print(100)\n')
stdin.seek(0)
call_command('shell')
self.assertEqual(stdout.getvalue().strip(), '100')
@unittest.skipIf(
sys.platform == 'win32',
"Windows select() doesn't support file descriptors.",
)
@mock.patch('django.core.management.commands.shell.select') # [1]
def test_stdin_read_globals(self, select):
with captured_stdin() as stdin, captured_stdout() as stdout:
stdin.write(self.script_globals)
stdin.seek(0)
call_command('shell')
self.assertEqual(stdout.getvalue().strip(), 'True')
@unittest.skipIf(
sys.platform == 'win32',
"Windows select() doesn't support file descriptors.",
)
@mock.patch('django.core.management.commands.shell.select') # [1]
def test_stdin_read_inline_function_call(self, select):
with captured_stdin() as stdin, captured_stdout() as stdout:
stdin.write(self.script_with_inline_function)
stdin.seek(0)
call_command('shell')
self.assertEqual(stdout.getvalue().strip(), __version__)
@mock.patch('django.core.management.commands.shell.select.select') # [1]
@mock.patch.dict('sys.modules', {'IPython': None})
def test_shell_with_ipython_not_installed(self, select):
select.return_value = ([], [], [])
with self.assertRaisesMessage(CommandError, "Couldn't import ipython interface."):
call_command('shell', interface='ipython')
@mock.patch('django.core.management.commands.shell.select.select') # [1]
@mock.patch.dict('sys.modules', {'bpython': None})
def test_shell_with_bpython_not_installed(self, select):
select.return_value = ([], [], [])
with self.assertRaisesMessage(CommandError, "Couldn't import bpython interface."):
call_command('shell', interface='bpython')
# [1] Patch select to prevent tests failing when when the test suite is run
# in parallel mode. The tests are run in a subprocess and the subprocess's
# stdin is closed and replaced by /dev/null. Reading from /dev/null always
# returns EOF and so select always shows that sys.stdin is ready to read.
# This causes problems because of the call to select.select() towards the
# end of shell's handle() method.
| 42.085106
| 99
| 0.657988
|
4a035f045a1d744da6b599dbfb2e775b61a63d5b
| 27,895
|
py
|
Python
|
leo/modes/pvwave.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 1,550
|
2015-01-14T16:30:37.000Z
|
2022-03-31T08:55:58.000Z
|
leo/modes/pvwave.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 2,009
|
2015-01-13T16:28:52.000Z
|
2022-03-31T18:21:48.000Z
|
leo/modes/pvwave.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 200
|
2015-01-05T15:07:41.000Z
|
2022-03-07T17:05:01.000Z
|
# Leo colorizer control file for pvwave mode.
# This file is in the public domain.
# Properties for pvwave mode.
properties = {
"lineComment": ";",
}
# Attributes dict for pvwave_main ruleset.
pvwave_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for pvwave mode.
attributesDictDict = {
"pvwave_main": pvwave_main_attributes_dict,
}
# Keywords dict for pvwave_main ruleset.
pvwave_main_keywords_dict = {
"abs": "keyword1",
"acos": "keyword1",
"add_exec_on_select": "keyword1",
"addsysvar": "keyword1",
"addvar": "keyword1",
"affine": "keyword1",
"alog": "keyword1",
"alog10": "keyword1",
"and": "keyword3",
"asarr": "keyword1",
"asin": "keyword1",
"askeys": "keyword1",
"assoc": "keyword1",
"atan": "keyword1",
"avg": "keyword1",
"axis": "keyword1",
"bar": "keyword1",
"bar2d": "keyword1",
"bar3d": "keyword1",
"begin": "keyword2",
"beseli": "keyword1",
"beselj": "keyword1",
"besely": "keyword1",
"bilinear": "keyword1",
"bindgen": "keyword1",
"blob": "keyword1",
"blobcount": "keyword1",
"boundary": "keyword1",
"breakpoint": "keyword2",
"build_table": "keyword1",
"buildresourcefilename": "keyword1",
"bytarr": "keyword1",
"byte": "keyword1",
"byteorder": "keyword1",
"bytscl": "keyword1",
"c_edit": "keyword1",
"call_unix": "keyword1",
"case": "keyword2",
"cd": "keyword1",
"center_view": "keyword1",
"chebyshev": "keyword1",
"check_math": "keyword1",
"checkfile": "keyword1",
"cindgen": "keyword1",
"close": "keyword1",
"color_convert": "keyword1",
"color_edit": "keyword1",
"color_palette": "keyword1",
"common": "keyword2",
"compile": "keyword2",
"complex": "keyword1",
"complexarr": "keyword1",
"cone": "keyword1",
"congrid": "keyword1",
"conj": "keyword1",
"contour": "keyword1",
"contour2": "keyword1",
"contourfill": "keyword1",
"conv_from_rect": "keyword1",
"conv_to_rect": "keyword1",
"convert_coord": "keyword1",
"convol": "keyword1",
"correlate": "keyword1",
"cos": "keyword1",
"cosh": "keyword1",
"cosines": "keyword1",
"cprod": "keyword1",
"create_holidays": "keyword1",
"create_weekdends": "keyword1",
"crossp": "keyword1",
"cursor": "keyword1",
"curvatures": "keyword1",
"curvefit": "keyword1",
"cylinder": "keyword1",
"day_name": "keyword1",
"day_of_week": "keyword1",
"day_of_year": "keyword1",
"dblarr": "keyword1",
"dc_error_msg": "keyword1",
"dc_options": "keyword1",
"dc_read_24_bit": "keyword1",
"dc_read_8_bit": "keyword1",
"dc_read_container": "keyword1",
"dc_read_dib": "keyword1",
"dc_read_fixed": "keyword1",
"dc_read_free": "keyword1",
"dc_read_tiff": "keyword1",
"dc_scan_container": "keyword1",
"dc_write_24_bit": "keyword1",
"dc_write_8_bit": "keyword1",
"dc_write_dib": "keyword1",
"dc_write_fixed": "keyword1",
"dc_write_free": "keyword1",
"dc_write_tiff": "keyword1",
"dcindgen": "keyword1",
"dcomplex": "keyword1",
"dcomplexarr": "keyword1",
"declare": "keyword2",
"define_key": "keyword1",
"defroi": "keyword1",
"defsysv": "keyword1",
"del_file": "keyword1",
"delfunc": "keyword1",
"dellog": "keyword1",
"delproc": "keyword1",
"delstruct": "keyword1",
"delvar": "keyword1",
"demo": "keyword1",
"deriv": "keyword1",
"derivn": "keyword1",
"determ": "keyword1",
"device": "keyword1",
"diag": "keyword1",
"dicm_tag_info": "keyword1",
"digital_filter": "keyword1",
"dilate": "keyword1",
"dindgen": "keyword1",
"dist": "keyword1",
"dminit": "keyword1",
"do": "keyword2",
"doc_lib_unix": "keyword1",
"doc_library": "keyword1",
"double": "keyword1",
"drop_exec_on_select": "keyword1",
"dt_add": "keyword1",
"dt_addly": "keyword1",
"dt_compress": "keyword1",
"dt_duration": "keyword1",
"dt_print": "keyword1",
"dt_subly": "keyword1",
"dt_subtract": "keyword1",
"dt_to_sec": "keyword1",
"dt_to_str": "keyword1",
"dt_to_var": "keyword1",
"dtegn": "keyword1",
"else": "keyword2",
"empty": "keyword1",
"end": "keyword2",
"endcase": "keyword2",
"endelse": "keyword2",
"endfor": "keyword2",
"endif": "keyword2",
"endrepeat": "keyword2",
"endwhile": "keyword2",
"environment": "keyword1",
"eof": "keyword1",
"eq": "keyword3",
"erase": "keyword1",
"erode": "keyword1",
"errorf": "keyword1",
"errplot": "keyword1",
"euclidean": "keyword1",
"exec_on_select": "keyword1",
"execute": "keyword1",
"exit": "keyword2",
"exp": "keyword1",
"expand": "keyword1",
"expon": "keyword1",
"extrema": "keyword1",
"factor": "keyword1",
"fast_grid2": "keyword1",
"fast_grid3": "keyword1",
"fast_grid4": "keyword1",
"fft": "keyword1",
"filepath": "keyword1",
"findfile": "keyword1",
"findgen": "keyword1",
"finite": "keyword1",
"fix": "keyword1",
"float": "keyword1",
"fltarr": "keyword1",
"flush": "keyword1",
"for": "keyword2",
"free_lun": "keyword1",
"fstat": "keyword1",
"func": "keyword2",
"funct": "keyword1",
"function": "keyword2",
"gamma": "keyword1",
"gaussfit": "keyword1",
"gaussint": "keyword1",
"gcd": "keyword1",
"ge": "keyword3",
"get_kbrd": "keyword1",
"get_lun": "keyword1",
"get_named_color": "keyword1",
"getenv": "keyword1",
"getncerr": "keyword1",
"getncopts": "keyword1",
"getparam": "keyword1",
"goto": "keyword2",
"great_int": "keyword1",
"grid": "keyword1",
"grid_2d": "keyword1",
"grid_3d": "keyword1",
"grid_4d": "keyword1",
"grid_sphere": "keyword1",
"gridn": "keyword1",
"group_by": "keyword1",
"gt": "keyword3",
"hak": "keyword1",
"hanning": "keyword1",
"hdf_test": "keyword1",
"hdfgetsds": "keyword1",
"help": "keyword2",
"hilbert": "keyword1",
"hist_equal": "keyword1",
"hist_equal_ct": "keyword1",
"histn": "keyword1",
"histogram": "keyword1",
"hls": "keyword1",
"hsv": "keyword1",
"hsv_to_rgd": "keyword1",
"if": "keyword2",
"image_check": "keyword1",
"image_color_quant": "keyword1",
"image_cont": "keyword1",
"image_create": "keyword1",
"image_display": "keyword1",
"image_filetypes": "keyword1",
"image_query_file": "keyword1",
"image_read": "keyword1",
"image_write": "keyword1",
"imaginary": "keyword1",
"img_true8": "keyword1",
"index_and": "keyword1",
"index_conv": "keyword1",
"index_or": "keyword1",
"indgen": "keyword1",
"info": "keyword2",
"intarr": "keyword1",
"interpol": "keyword1",
"interpolate": "keyword1",
"intrp": "keyword1",
"invert": "keyword1",
"isaskey": "keyword1",
"ishft": "keyword1",
"jacobian": "keyword1",
"journal": "keyword2",
"jul_to_dt": "keyword1",
"keyword_set": "keyword1",
"lcm": "keyword1",
"le": "keyword3",
"leefilt": "keyword1",
"legend": "keyword1",
"lindgen": "keyword1",
"linknload": "keyword1",
"list": "keyword1",
"listarr": "keyword1",
"load_holidays": "keyword1",
"load_option": "keyword1",
"load_weekends": "keyword1",
"loadct": "keyword1",
"loadct_custom": "keyword1",
"loadresources": "keyword1",
"loadstrings": "keyword1",
"locals": "keyword2",
"lonarr": "keyword1",
"long": "keyword1",
"lt": "keyword3",
"lubksb": "keyword1",
"ludcmp": "keyword1",
"make_array": "keyword1",
"map": "keyword1",
"map_axes": "keyword1",
"map_contour": "keyword1",
"map_grid": "keyword1",
"map_plots": "keyword1",
"map_polyfill": "keyword1",
"map_proj": "keyword1",
"map_reverse": "keyword1",
"map_velovect": "keyword1",
"map_version": "keyword1",
"map_xyouts": "keyword1",
"max": "keyword1",
"median": "keyword1",
"mesh": "keyword1",
"message": "keyword1",
"min": "keyword1",
"mod": "keyword3",
"modifyct": "keyword1",
"molec": "keyword1",
"moment": "keyword1",
"month_name": "keyword1",
"movie": "keyword1",
"mprove": "keyword1",
"msword_cgm_setup": "keyword1",
"n_elements": "keyword1",
"n_params": "keyword1",
"n_tags": "keyword1",
"ne": "keyword3",
"nint": "keyword1",
"normals": "keyword1",
"not": "keyword3",
"null_processor": "keyword1",
"of": "keyword2",
"on_error": "keyword2",
"on_error_goto": "keyword2",
"on_ioerror": "keyword2",
"openr": "keyword1",
"openu": "keyword1",
"openw": "keyword1",
"oplot": "keyword1",
"oploterr": "keyword1",
"option_is_loaded": "keyword1",
"or": "keyword3",
"order_by": "keyword1",
"packimage": "keyword1",
"packtable": "keyword1",
"padit": "keyword1",
"palette": "keyword1",
"param_present": "keyword1",
"parsefilename": "keyword1",
"pie": "keyword1",
"pie_chart": "keyword1",
"plot": "keyword1",
"plot_field": "keyword1",
"plot_histogram": "keyword1",
"plot_io": "keyword1",
"plot_oi": "keyword1",
"plot_oo": "keyword1",
"plot_windrose": "keyword1",
"ploterr": "keyword1",
"plots": "keyword1",
"pm": "keyword1",
"pmf": "keyword1",
"point_lun": "keyword1",
"poly": "keyword1",
"poly_2d": "keyword1",
"poly_area": "keyword1",
"poly_c_conv": "keyword1",
"poly_count": "keyword1",
"poly_dev": "keyword1",
"poly_fit": "keyword1",
"poly_merge": "keyword1",
"poly_norm": "keyword1",
"poly_plot": "keyword1",
"poly_sphere": "keyword1",
"poly_surf": "keyword1",
"poly_trans": "keyword1",
"polyfill": "keyword1",
"polyfillv": "keyword1",
"polyfitw": "keyword1",
"polyshade": "keyword1",
"polywarp": "keyword1",
"popd": "keyword1",
"prime": "keyword1",
"print": "keyword1",
"printd": "keyword1",
"printf": "keyword1",
"pro": "keyword2",
"profile": "keyword1",
"profiles": "keyword1",
"prompt": "keyword1",
"pseudo": "keyword1",
"pushd": "keyword1",
"query_table": "keyword1",
"quit": "keyword2",
"randomn": "keyword1",
"randomu": "keyword1",
"rdpix": "keyword1",
"read": "keyword1",
"read_airs": "keyword1",
"read_xbm": "keyword1",
"readf": "keyword1",
"readu": "keyword1",
"rebin": "keyword1",
"reform": "keyword1",
"regress": "keyword1",
"rename": "keyword1",
"render": "keyword1",
"render24": "keyword1",
"repeat": "keyword2",
"replicate": "keyword1",
"replv": "keyword1",
"resamp": "keyword1",
"restore": "keyword2",
"retall": "keyword2",
"return": "keyword2",
"reverse": "keyword1",
"rgb_to_hsv": "keyword1",
"rm": "keyword1",
"rmf": "keyword1",
"roberts": "keyword1",
"rot": "keyword1",
"rot_int": "keyword1",
"rotate": "keyword1",
"same": "keyword1",
"save": "keyword2",
"scale3d": "keyword1",
"sec_to_dt": "keyword1",
"select_read_lun": "keyword1",
"set_plot": "keyword1",
"set_screen": "keyword1",
"set_shading": "keyword1",
"set_symbol": "keyword1",
"set_view3d": "keyword1",
"set_viewport": "keyword1",
"set_xy": "keyword1",
"setdemo": "keyword1",
"setenv": "keyword1",
"setimagesize": "keyword1",
"setlog": "keyword1",
"setncopts": "keyword1",
"setup_keys": "keyword1",
"sgn": "keyword1",
"shade_surf": "keyword1",
"shade_surf_irr": "keyword1",
"shade_volume": "keyword1",
"shif": "keyword1",
"shift": "keyword1",
"show3": "keyword1",
"show_options": "keyword1",
"sigma": "keyword1",
"sin": "keyword1",
"sindgen": "keyword1",
"sinh": "keyword1",
"size": "keyword1",
"skipf": "keyword1",
"slice": "keyword1",
"slice_vol": "keyword1",
"small_int": "keyword1",
"smooth": "keyword1",
"sobel": "keyword1",
"socket_accept": "keyword1",
"socket_close": "keyword1",
"socket_connect": "keyword1",
"socket_getport": "keyword1",
"socket_init": "keyword1",
"socket_read": "keyword1",
"socket_write": "keyword1",
"sort": "keyword1",
"sortn": "keyword1",
"spawn": "keyword1",
"sphere": "keyword1",
"spline": "keyword1",
"sqrt": "keyword1",
"stdev": "keyword1",
"stop": "keyword2",
"str_to_dt": "keyword1",
"strarr": "keyword1",
"strcompress": "keyword1",
"stretch": "keyword1",
"string": "keyword1",
"strjoin": "keyword1",
"strlen": "keyword1",
"strlookup": "keyword1",
"strlowcase": "keyword1",
"strmatch": "keyword1",
"strmessage": "keyword1",
"strmid": "keyword1",
"strpos": "keyword1",
"strput": "keyword1",
"strsplit": "keyword1",
"strsubst": "keyword1",
"strtrim": "keyword1",
"structref": "keyword1",
"strupcase": "keyword1",
"sum": "keyword1",
"surface": "keyword1",
"surface_fit": "keyword1",
"surfr": "keyword1",
"svbksb": "keyword1",
"svd": "keyword1",
"svdfit": "keyword1",
"systime": "keyword1",
"t3d": "keyword1",
"tag_names": "keyword1",
"tan": "keyword1",
"tanh": "keyword1",
"tek_color": "keyword1",
"tensor_add": "keyword1",
"tensor_div": "keyword1",
"tensor_eq": "keyword1",
"tensor_exp": "keyword1",
"tensor_ge": "keyword1",
"tensor_gt": "keyword1",
"tensor_le": "keyword1",
"tensor_lt": "keyword1",
"tensor_max": "keyword1",
"tensor_min": "keyword1",
"tensor_mod": "keyword1",
"tensor_mul": "keyword1",
"tensor_ne": "keyword1",
"tensor_sub": "keyword1",
"then": "keyword2",
"threed": "keyword1",
"today": "keyword1",
"total": "keyword1",
"tqli": "keyword1",
"transpose": "keyword1",
"tred2": "keyword1",
"tridag": "keyword1",
"tv": "keyword1",
"tvcrs": "keyword1",
"tvlct": "keyword1",
"tvrd": "keyword1",
"tvscl": "keyword1",
"tvsize": "keyword1",
"uniqn": "keyword1",
"unique": "keyword1",
"unix_listen": "keyword1",
"unix_reply": "keyword1",
"unload_option": "keyword1",
"upvar": "keyword1",
"usersym": "keyword1",
"usgs_names": "keyword1",
"value_length": "keyword1",
"var_match": "keyword1",
"var_to_dt": "keyword1",
"vector_field3": "keyword1",
"vel": "keyword1",
"velovect": "keyword1",
"viewer": "keyword1",
"vol_marker": "keyword1",
"vol_pad": "keyword1",
"vol_red": "keyword1",
"vol_trans": "keyword1",
"volume": "keyword1",
"vtkaddattribute": "keyword1",
"vtkaxes": "keyword1",
"vtkcamera": "keyword1",
"vtkclose": "keyword1",
"vtkcolorbar": "keyword1",
"vtkcolornames": "keyword1",
"vtkcommand": "keyword1",
"vtkerase": "keyword1",
"vtkformat": "keyword1",
"vtkgrid": "keyword1",
"vtkhedgehog": "keyword1",
"vtkinit": "keyword1",
"vtklight": "keyword1",
"vtkplots": "keyword1",
"vtkpolydata": "keyword1",
"vtkpolyformat": "keyword1",
"vtkpolyshade": "keyword1",
"vtkppmread": "keyword1",
"vtkppmwrite": "keyword1",
"vtkreadvtk": "keyword1",
"vtkrectilineargrid": "keyword1",
"vtkrenderwindow": "keyword1",
"vtkscatter": "keyword1",
"vtkslicevol": "keyword1",
"vtkstructuredgrid": "keyword1",
"vtkstructuredpoints": "keyword1",
"vtksurface": "keyword1",
"vtksurfgen": "keyword1",
"vtktext": "keyword1",
"vtktvrd": "keyword1",
"vtkunstructuredgrid": "keyword1",
"vtkwdelete": "keyword1",
"vtkwindow": "keyword1",
"vtkwritevrml": "keyword1",
"vtkwset": "keyword1",
"wait": "keyword1",
"wavedatamanager": "keyword1",
"waveserver": "keyword1",
"wcopy": "keyword1",
"wdelete": "keyword1",
"wganimatetool": "keyword3",
"wgcbartool": "keyword3",
"wgcttool": "keyword3",
"wgisosurftool": "keyword3",
"wgmovietool": "keyword3",
"wgsimagetool": "keyword3",
"wgslicetool": "keyword3",
"wgsurfacetool": "keyword3",
"wgtexttool": "keyword3",
"where": "keyword1",
"wherein": "keyword1",
"while": "keyword2",
"whow": "keyword1",
"window": "keyword1",
"wmenu": "keyword1",
"woaddbuttons": "keyword3",
"woaddmessage": "keyword3",
"woaddstatus": "keyword3",
"wobuttonbar": "keyword3",
"wocheckfile": "keyword3",
"wocolorbutton": "keyword3",
"wocolorconvert": "keyword3",
"wocolorgrid": "keyword3",
"wocolorwheel": "keyword3",
"woconfirmclose": "keyword3",
"wodialogstatus": "keyword3",
"wofontoptionmenu": "keyword3",
"wogenericdialog": "keyword3",
"wolabeledtext": "keyword3",
"womenubar": "keyword3",
"womessage": "keyword3",
"wosaveaspixmap": "keyword3",
"wosetcursor": "keyword3",
"wosetwindowtitle": "keyword3",
"wostatus": "keyword3",
"wovariableoptionmenu": "keyword3",
"wpaste": "keyword1",
"wprint": "keyword1",
"wread_dib": "keyword1",
"wread_meta": "keyword1",
"write_xbm": "keyword1",
"writeu": "keyword1",
"wset": "keyword1",
"wtaddcallback": "keyword3",
"wtaddhandler": "keyword3",
"wtcursor": "keyword3",
"wtget": "keyword3",
"wtpointer": "keyword3",
"wtset": "keyword3",
"wttimer": "keyword3",
"wwalert": "keyword3",
"wwalertpopdown": "keyword3",
"wwbuttonbox": "keyword3",
"wwcallback": "keyword3",
"wwcontrolsbox": "keyword3",
"wwdialog": "keyword3",
"wwdrawing": "keyword3",
"wwfileselection": "keyword3",
"wwgenericdialog": "keyword3",
"wwgetbutton": "keyword3",
"wwgetkey": "keyword3",
"wwgetposition": "keyword3",
"wwgetvalue": "keyword3",
"wwhandler": "keyword3",
"wwinit": "keyword3",
"wwlayout": "keyword3",
"wwlist": "keyword3",
"wwlistutils": "keyword3",
"wwloop": "keyword3",
"wwmainwindow": "keyword3",
"wwmenubar": "keyword3",
"wwmenuitem": "keyword3",
"wwmessage": "keyword3",
"wwmulticlickhandler": "keyword3",
"wwoptionmenu": "keyword3",
"wwpickfile": "keyword3",
"wwpopupmenu": "keyword3",
"wwpreview": "keyword3",
"wwpreviewutils": "keyword3",
"wwradiobox": "keyword3",
"wwresource": "keyword3",
"wwrite_dib": "keyword1",
"wwrite_meta": "keyword1",
"wwseparator": "keyword3",
"wwsetcursor": "keyword3",
"wwsetvalue": "keyword3",
"wwtable": "keyword3",
"wwtableutils": "keyword3",
"wwtext": "keyword3",
"wwtimer": "keyword3",
"wwtoolbox": "keyword3",
"wzanimate": "keyword3",
"wzcoloredit": "keyword3",
"wzcontour": "keyword3",
"wzexport": "keyword3",
"wzhistogram": "keyword3",
"wzimage": "keyword3",
"wzimport": "keyword3",
"wzmultiview": "keyword3",
"wzplot": "keyword3",
"wzpreview": "keyword3",
"wzsurface": "keyword3",
"wztable": "keyword3",
"wzvariable": "keyword3",
"xor": "keyword3",
"xyouts": "keyword1",
"zoom": "keyword1",
"zroots": "keyword1",
}
# Dictionary of keywords dictionaries for pvwave mode.
keywordsDictDict = {
"pvwave_main": pvwave_main_keywords_dict,
}
# Rules for pvwave_main ruleset.
def pvwave_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def pvwave_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def pvwave_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def pvwave_rule3(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule4(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="-",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="#",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="{",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=".",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=",",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule19(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule20(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=":",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule21(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule22(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="&",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule23(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="@",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule24(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="!",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule25(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for pvwave_main ruleset.
rulesDict1 = {
"!": [pvwave_rule24,],
"\"": [pvwave_rule0,],
"#": [pvwave_rule10,],
"$": [pvwave_rule21,],
"&": [pvwave_rule22,],
"'": [pvwave_rule1,],
"(": [pvwave_rule3,],
")": [pvwave_rule4,],
"*": [pvwave_rule9,],
"+": [pvwave_rule6,],
",": [pvwave_rule17,],
"-": [pvwave_rule7,],
".": [pvwave_rule16,],
"/": [pvwave_rule8,],
"0": [pvwave_rule25,],
"1": [pvwave_rule25,],
"2": [pvwave_rule25,],
"3": [pvwave_rule25,],
"4": [pvwave_rule25,],
"5": [pvwave_rule25,],
"6": [pvwave_rule25,],
"7": [pvwave_rule25,],
"8": [pvwave_rule25,],
"9": [pvwave_rule25,],
":": [pvwave_rule20,],
";": [pvwave_rule2,],
"<": [pvwave_rule12,],
"=": [pvwave_rule5,],
">": [pvwave_rule11,],
"@": [pvwave_rule23,pvwave_rule25,],
"A": [pvwave_rule25,],
"B": [pvwave_rule25,],
"C": [pvwave_rule25,],
"D": [pvwave_rule25,],
"E": [pvwave_rule25,],
"F": [pvwave_rule25,],
"G": [pvwave_rule25,],
"H": [pvwave_rule25,],
"I": [pvwave_rule25,],
"J": [pvwave_rule25,],
"K": [pvwave_rule25,],
"L": [pvwave_rule25,],
"M": [pvwave_rule25,],
"N": [pvwave_rule25,],
"O": [pvwave_rule25,],
"P": [pvwave_rule25,],
"Q": [pvwave_rule25,],
"R": [pvwave_rule25,],
"S": [pvwave_rule25,],
"T": [pvwave_rule25,],
"U": [pvwave_rule25,],
"V": [pvwave_rule25,],
"W": [pvwave_rule25,],
"X": [pvwave_rule25,],
"Y": [pvwave_rule25,],
"Z": [pvwave_rule25,],
"[": [pvwave_rule19,],
"]": [pvwave_rule18,],
"^": [pvwave_rule13,],
"_": [pvwave_rule25,],
"a": [pvwave_rule25,],
"b": [pvwave_rule25,],
"c": [pvwave_rule25,],
"d": [pvwave_rule25,],
"e": [pvwave_rule25,],
"f": [pvwave_rule25,],
"g": [pvwave_rule25,],
"h": [pvwave_rule25,],
"i": [pvwave_rule25,],
"j": [pvwave_rule25,],
"k": [pvwave_rule25,],
"l": [pvwave_rule25,],
"m": [pvwave_rule25,],
"n": [pvwave_rule25,],
"o": [pvwave_rule25,],
"p": [pvwave_rule25,],
"q": [pvwave_rule25,],
"r": [pvwave_rule25,],
"s": [pvwave_rule25,],
"t": [pvwave_rule25,],
"u": [pvwave_rule25,],
"v": [pvwave_rule25,],
"w": [pvwave_rule25,],
"x": [pvwave_rule25,],
"y": [pvwave_rule25,],
"z": [pvwave_rule25,],
"{": [pvwave_rule15,],
"}": [pvwave_rule14,],
}
# x.rulesDictDict for pvwave mode.
rulesDictDict = {
"pvwave_main": rulesDict1,
}
# Import dict for pvwave mode.
importDict = {}
| 30.653846
| 88
| 0.576698
|
effc133eb953e25ea09684c6a5e30c1e12e7e6db
| 3,700
|
py
|
Python
|
pypureclient/flasharray/FA_2_4/models/hardware_patch.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_4/models/hardware_patch.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_4/models/hardware_patch.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_4 import models
class HardwarePatch(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'identify_enabled': 'bool',
'index': 'int'
}
attribute_map = {
'name': 'name',
'identify_enabled': 'identify_enabled',
'index': 'index'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
identify_enabled=None, # type: bool
index=None, # type: int
):
"""
Keyword args:
name (str): A locally unique, system-generated name. The name cannot be modified.
identify_enabled (bool): State of an LED used to visually identify the component.
index (int): Number that identifies the relative position of a hardware component within the array.
"""
if name is not None:
self.name = name
if identify_enabled is not None:
self.identify_enabled = identify_enabled
if index is not None:
self.index = index
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HardwarePatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HardwarePatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HardwarePatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.83871
| 111
| 0.553514
|
f7cc7f27df282faa9e66918373dcdfb3f5b95f66
| 25,360
|
py
|
Python
|
selfdrive/controls/lib/events.py
|
wahzoo-op/phoenixpilot
|
545286377f9752d0be8e0638b812b527d1987002
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/events.py
|
wahzoo-op/phoenixpilot
|
545286377f9752d0be8e0638b812b527d1987002
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/events.py
|
wahzoo-op/phoenixpilot
|
545286377f9752d0be8e0638b812b527d1987002
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from typing import Dict, Union, Callable, Any
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v+1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type , True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
alert_priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration_sound: float,
duration_hud_alert: float,
duration_text: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.alert_priority = alert_priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration_sound = duration_sound
self.duration_hud_alert = duration_hud_alert
self.duration_text = duration_text
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.start_time = 0.
self.alert_type = ""
self.event_type = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.alert_priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.alert_priority > alert2.alert_priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, audible_alert=AudibleAlert.chimeError,
visual_alert=VisualAlert.none, duration_hud_alert=2.):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
audible_alert, .4, duration_hud_alert, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, .1, 2., 2.),
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2, alert_text_1="TAKE CONTROL IMMEDIATELY"):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, 2.2, 3., 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert=True):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2, 0., 0.),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1, alert_text_2):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
# ********** alert callback functions **********
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(round(CP.minSteerSpeed * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = "km/h" if metric else "mph"
return Alert(
"TAKE CONTROL",
"Steer Unavailable Below %d %s" % (speed, unit),
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, 0., 0.4, .3)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(MIN_SPEED_FILTER * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
unit = "km/h" if metric else "mph"
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
"Drive Above %d %s" % (speed, unit),
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
gps_integrated = sm['health'].hwType in [log.HealthData.HwType.uno, log.HealthData.HwType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text, duration_hud_alert=0.)
EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, bool], Alert]]]] = {
# ********** events with no alerts **********
# ********** events only containing alerts displayed in all states **********
EventName.debugAlert: {
ET.PERMANENT: Alert(
"DEBUG ALERT",
"",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, .1, .1),
},
EventName.startup: {
ET.PERMANENT: Alert(
"Be ready to take over at any time",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupMaster: {
ET.PERMANENT: Alert(
"WARNING: This branch is not tested",
"Always keep hands on wheel and eyes on road",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupNoControl: {
ET.PERMANENT: Alert(
"Dashcam mode",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupNoCar: {
ET.PERMANENT: Alert(
"Dashcam mode for unsupported car",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: Alert(
"Stock LKAS is turned on",
"Turn off stock LKAS to engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.communityFeatureDisallowed: {
# LOW priority to overcome Cruise Error
ET.PERMANENT: Alert(
"Community Feature Detected",
"Enable Community Features in Developer Settings",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.carUnrecognized: {
ET.PERMANENT: Alert(
"Dashcam Mode",
"Car Unrecognized",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
},
EventName.stockFcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock FCW: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.chimeWarningRepeat, 1., 2., 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"TAKE CONTROL",
"Lane Departure Detected",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, 1., 2., 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"openpilot will not brake while gas pressed",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1, creation_delay=1.),
},
EventName.vehicleModelInvalid: {
ET.WARNING: Alert(
"Vehicle Parameter Identification Failed",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.steerRequired, AudibleAlert.none, .0, .0, .1),
},
EventName.steerTempUnavailableMute: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2, .2, .2),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"KEEP EYES ON ROAD: Driver Distracted",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"KEEP EYES ON ROAD",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"TOUCH STEERING WHEEL: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"TOUCH STEERING WHEEL",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.driverMonitorLowAcc: {
ET.WARNING: Alert(
"CHECK DRIVER FACE VISIBILITY",
"Driver Monitoring Uncertain",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .4, 0., 1.5),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Move",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lane",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, 1., 1., 1.),
},
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
EventName.pscmHandshaking: {
ET.WARNING: Alert(
"RELEASE THE WHEEL",
"PSCM is handshaking. Please wait...",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimeWarning1, .4, 2., 3.),
},
EventName.pscmHandshaked: {
ET.WARNING: Alert(
"PSCM has successfully handshaked",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .0, .0, .1),
},
EventName.pscmLostHandshake: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("PSCM APA Handshake Lost.")
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Park Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed During Attempt",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Enable Adaptive Cruise"),
},
EventName.steerTempUnavailable: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimeWarning1, .4, 2., 3.),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable",
duration_hud_alert=0.),
},
EventName.outOfSpace: {
ET.PERMANENT: Alert(
"Out of Storage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Out of Storage Space",
duration_hud_alert=0.),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: NoEntryAlert("Speed Too Low"),
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: Alert(
"System Overheated",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.SOFT_DISABLE: SoftDisableAlert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: SoftDisableAlert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: SoftDisableAlert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: SoftDisableAlert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: SoftDisableAlert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: SoftDisableAlert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: SoftDisableAlert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: SoftDisableAlert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
EventName.commIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarCommIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Communication Issue"),
ET.NO_ENTRY: NoEntryAlert("Radar Communication Issue",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarCanError: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Error: Restart the Car"),
ET.NO_ENTRY : NoEntryAlert("Radar Error: Restart the Car"),
},
EventName.modeldLagging: {
ET.SOFT_DISABLE: SoftDisableAlert("Driving model lagging"),
ET.NO_ENTRY : NoEntryAlert("Driving model lagging"),
},
EventName.posenetInvalid: {
ET.SOFT_DISABLE: SoftDisableAlert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
EventName.deviceFalling: {
ET.SOFT_DISABLE: SoftDisableAlert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: SoftDisableAlert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY : NoEntryAlert("Low Memory: Reboot Your Device",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.controlsFailed: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Failed"),
ET.NO_ENTRY: NoEntryAlert("Controls Failed"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: Alert(
"LKAS Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: Alert(
"Cruise Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=0.5),
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
ET.NO_ENTRY : NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
},
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Slow down to resume operation",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.none, 2.2, 3., 4.),
ET.NO_ENTRY: Alert(
"Speed Too High",
"Slow down to engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.chimeError, .4, 2., 3.),
},
# TODO: this is unclear, update check only happens offroad
EventName.internetConnectivityNeeded: {
ET.PERMANENT: NormalPermanentAlert("Connect to Internet", "An Update Check Is Required to Engage"),
ET.NO_ENTRY: NoEntryAlert("Connect to Internet",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: Alert(
"Cruise Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
}
| 32.935065
| 116
| 0.674211
|
f718a6e4efe0bc6650e570e12bb690e1b246fd8d
| 315
|
py
|
Python
|
data.py
|
thIYan-EsWar/Machine-Learning-Breast-Cancer-Prediction
|
349e6be13476dcfb602ab1e6f812bc464a7affc3
|
[
"Apache-2.0"
] | null | null | null |
data.py
|
thIYan-EsWar/Machine-Learning-Breast-Cancer-Prediction
|
349e6be13476dcfb602ab1e6f812bc464a7affc3
|
[
"Apache-2.0"
] | null | null | null |
data.py
|
thIYan-EsWar/Machine-Learning-Breast-Cancer-Prediction
|
349e6be13476dcfb602ab1e6f812bc464a7affc3
|
[
"Apache-2.0"
] | null | null | null |
from random import shuffle, sample
with open('data.txt', 'r') as f:
contents = f.readlines()
contents = sample(contents, len(contents))
with open('train_data.txt', 'w') as f:
[f.write(content) for content in contents[: 601]]
with open('test_data.txt', 'w') as f:
[f.write(content) for content in contents[601:]]
| 39.375
| 50
| 0.698413
|
1ce464512b50d6415d7a79d192b5e04e8b94341c
| 1,560
|
py
|
Python
|
scripts/filter_design/iir_comparison.py
|
CyrilCadoux/dsp-labs
|
8ef53fccb87ad842051d9032d127a86c1172155f
|
[
"MIT"
] | 18
|
2019-08-19T13:00:36.000Z
|
2022-01-14T02:32:15.000Z
|
scripts/filter_design/iir_comparison.py
|
CyrilCadoux/dsp-labs
|
8ef53fccb87ad842051d9032d127a86c1172155f
|
[
"MIT"
] | 2
|
2018-12-25T18:01:03.000Z
|
2018-12-26T19:13:47.000Z
|
scripts/filter_design/iir_comparison.py
|
CyrilCadoux/dsp-labs
|
8ef53fccb87ad842051d9032d127a86c1172155f
|
[
"MIT"
] | 10
|
2018-12-05T07:18:48.000Z
|
2021-08-12T13:46:08.000Z
|
"""
Compare various IIR filters
"""
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
def freq2rad(freq, fs):
return freq * np.pi / (fs/2)
def rad2freq(rad, fs):
return rad * (fs/2) / np.pi
# MAIN PARAMETER
pole_coef = 0.95
fs = 16000
# prepare figure
ALPHA = 0.8
f_max = 4000
plt.figure()
# simple filter
b = np.array([1, -1])
w, h = signal.freqz(b)
plt.semilogx([rad2freq(rad, fs) for rad in w],
20 * np.log10(abs(h)),
label="simple (2-tap)",
alpha=ALPHA)
# First order single pole
b = np.array([1., -1.])
a = np.array([1, -1*pole_coef])
w, h = signal.freqz(b, a)
plt.semilogx([rad2freq(rad, fs) for rad in w],
20 * np.log10(abs(h)),
label="1-stage",
alpha=ALPHA)
# (2nd order)
b = np.array([1., -2., 1.])
a = np.array([1, -2*pole_coef, pole_coef*pole_coef])
w, h = signal.freqz(b, a)
plt.semilogx([rad2freq(rad, fs) for rad in w],
20 * np.log10(abs(h)),
label="2-stage",
alpha=ALPHA)
# (3rd order)
b = np.array([1., -3., 3., -1.])
a = np.array([1, -3*pole_coef, 3*pole_coef*pole_coef, -1*pole_coef**3])
w, h = signal.freqz(b, a)
plt.semilogx([rad2freq(rad, fs) for rad in w],
20 * np.log10(abs(h)),
label="3-stage",
alpha=ALPHA)
plt.margins(0, 0.1)
plt.title("Frequency response for varying num. of stages (log scale)")
plt.xlabel("Frequency [Hz]")
plt.ylabel("Magnitude [dB]")
plt.grid()
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
| 20.8
| 71
| 0.582051
|
b9b596c05eca9041a0f0dafbb59bb40adf887713
| 1,561
|
py
|
Python
|
church/models/team.py
|
tyrchen/church
|
71f7b68237121b72d5a6eece366ccda00fa9c7cf
|
[
"MIT"
] | 1
|
2016-07-29T09:32:05.000Z
|
2016-07-29T09:32:05.000Z
|
church/models/team.py
|
tyrchen/church
|
71f7b68237121b72d5a6eece366ccda00fa9c7cf
|
[
"MIT"
] | 31
|
2018-07-22T09:13:04.000Z
|
2019-10-20T05:57:12.000Z
|
church/models/team.py
|
tyrchen/church
|
71f7b68237121b72d5a6eece366ccda00fa9c7cf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.db import models
import logging
from django.utils.text import slugify
from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField
import requests
from settings import API_SERVER
__author__ = 'tchen'
logger = logging.getLogger(__name__)
class Team(models.Model):
class Meta:
app_label = 'church'
db_table = 'church_team'
verbose_name = 'Team'
ordering = ['created']
name = models.CharField('Team Name', max_length=24, unique=True)
slug = models.CharField('Team Slug', max_length=24, unique=True)
members = models.CharField('Team Members', max_length=2048, default='', help_text='Please enter member alias, '
'seperated by comma')
created = CreationDateTimeField()
updated = ModificationDateTimeField()
def update_remote_team(self):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = API_SERVER + '/directory/teams/%s.json' % self.slug
data = {'name': self.name, 'members': self.members}
r = requests.post(url, data=json.dumps(data), headers=headers)
return r
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.slug = slugify(self.name)
self.update_remote_team()
return super(Team, self).save(force_insert, force_update, using, update_fields)
| 36.302326
| 115
| 0.658552
|
00f42aa3395a7c7d3eb874161a329d54aeaa2f3f
| 3,312
|
py
|
Python
|
sdk/keyvault/azure-keyvault-secrets/tests/perfstress_tests/list_secrets.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/keyvault/azure-keyvault-secrets/tests/perfstress_tests/list_secrets.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/keyvault/azure-keyvault-secrets/tests/perfstress_tests/list_secrets.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
from azure_devtools.perfstress_tests import PerfStressTest
from azure.identity import DefaultAzureCredential
from azure.identity.aio import DefaultAzureCredential as AsyncDefaultAzureCredential
from azure.keyvault.secrets import SecretClient
from azure.keyvault.secrets.aio import SecretClient as AsyncSecretClient
class ListSecretsTest(PerfStressTest):
def __init__(self, arguments):
super().__init__(arguments)
# Auth configuration
self.credential = DefaultAzureCredential()
self.async_credential = AsyncDefaultAzureCredential()
# Create clients
vault_url = self.get_from_env("AZURE_KEYVAULT_URL")
self.client = SecretClient(vault_url, self.credential, **self._client_kwargs)
self.async_client = AsyncSecretClient(vault_url, self.async_credential, **self._client_kwargs)
self.secret_names = ["livekvtestlistperfsecret{}".format(i) for i in range(self.args.count)]
async def global_setup(self):
"""The global setup is run only once."""
# Validate that vault contains 0 secrets (including soft-deleted secrets), since additional secrets
# (including soft-deleted) impact performance.
async for secret in self.async_client.list_properties_of_secrets():
raise Exception("KeyVault %s must contain 0 secrets (including soft-deleted) before starting perf test" \
% self.async_client.vault_url)
async for secret in self.async_client.list_deleted_secrets():
raise Exception("KeyVault %s must contain 0 secrets (including soft-deleted) before starting perf test" \
% self.async_client.vault_url)
await super().global_setup()
create = [self.async_client.set_secret(name, "secret-value") for name in self.secret_names]
await asyncio.wait(create)
async def global_cleanup(self):
"""The global cleanup is run only once."""
delete = [self.async_client.delete_secret(name) for name in self.secret_names]
await asyncio.wait(delete)
purge = [self.async_client.purge_deleted_secret(name) for name in self.secret_names]
await asyncio.wait(purge)
await super().global_cleanup()
async def close(self):
"""This is run after cleanup."""
await self.async_client.close()
await self.async_credential.close()
await super().close()
def run_sync(self):
"""The synchronous perf test."""
secret_properties = self.client.list_properties_of_secrets()
# enumerate secrets to exercise paging code
list(secret_properties)
async def run_async(self):
"""The asynchronous perf test."""
secret_properties = self.async_client.list_properties_of_secrets()
# enumerate secrets to exercise paging code
async for _ in secret_properties:
pass
@staticmethod
def add_arguments(parser):
super(ListSecretsTest, ListSecretsTest).add_arguments(parser)
parser.add_argument(
'--count', nargs='?', type=int, help='Number of secrets to list. Defaults to 10', default=10
)
| 43.012987
| 117
| 0.684481
|
29fb51288b27bf00c83a5bb80680ac601aaab169
| 2,106
|
py
|
Python
|
prereise/gather/winddata/hrrr/tests/test_grib.py
|
SEL-Columbia/PreREISE-building
|
527cc02e6867a879c7e68e8e3fc5dc843de20580
|
[
"MIT"
] | 15
|
2021-03-02T11:54:27.000Z
|
2022-02-16T13:01:40.000Z
|
prereise/gather/winddata/hrrr/tests/test_grib.py
|
SEL-Columbia/PreREISE-building
|
527cc02e6867a879c7e68e8e3fc5dc843de20580
|
[
"MIT"
] | 90
|
2021-01-25T19:02:14.000Z
|
2022-03-31T20:27:28.000Z
|
prereise/gather/winddata/hrrr/tests/test_grib.py
|
SEL-Columbia/PreREISE-building
|
527cc02e6867a879c7e68e8e3fc5dc843de20580
|
[
"MIT"
] | 15
|
2021-02-08T23:28:21.000Z
|
2022-01-24T21:59:14.000Z
|
from prereise.gather.winddata.hrrr.grib import GribRecordInfo
GRIB_RECORD_INFO_ARRAY = [
"52:38983378:d=2016010121:UGRD:10 m above ground:anl:",
"53:40192462:d=2016010121:VGRD:10 m above ground:anl:",
]
def test_grib_info_from_string():
g = GribRecordInfo.from_string(GRIB_RECORD_INFO_ARRAY[0])
expected = GribRecordInfo(
message_number="52",
beginning_byte="38983378",
ending_byte=None,
initialization_date="d=2016010121",
variable="UGRD",
level="10 m above ground",
forecast="anl",
)
assert g == expected
def test_grib_info_from_string_with_next_string():
g = GribRecordInfo.from_string(*GRIB_RECORD_INFO_ARRAY)
expected = GribRecordInfo(
message_number="52",
beginning_byte="38983378",
ending_byte="40192461",
initialization_date="d=2016010121",
variable="UGRD",
level="10 m above ground",
forecast="anl",
)
assert g == expected
def test_grib_info_generate_grib_record_information_list():
g_list = GribRecordInfo.generate_grib_record_information_list(
GRIB_RECORD_INFO_ARRAY, [0, 1]
)
g = g_list[0]
expected = GribRecordInfo(
message_number="52",
beginning_byte="38983378",
ending_byte="40192461",
initialization_date="d=2016010121",
variable="UGRD",
level="10 m above ground",
forecast="anl",
)
assert g == expected
g = g_list[1]
expected = GribRecordInfo(
message_number="53",
beginning_byte="40192462",
ending_byte=None,
initialization_date="d=2016010121",
variable="VGRD",
level="10 m above ground",
forecast="anl",
)
assert g == expected
def test_grib_info_byte_range_header_string():
g = GribRecordInfo(None, "10", "20", None, None, None, None)
assert g.byte_range_header_string() == "10-20"
def test_grib_info_byte_range_header_string_no_end_byte():
g = GribRecordInfo(None, "10", None, None, None, None, None)
assert g.byte_range_header_string() == "10-"
| 28.459459
| 66
| 0.65717
|
ffb436bf29f81a4c30ef4ee52d02779e940ce545
| 105,234
|
py
|
Python
|
src/parsedatetime/__init__.py
|
binbashar/terraform-certbot-lambda
|
a3fa1e715605ff9754d7a915c472a9baf64e4aa1
|
[
"MIT"
] | 4
|
2021-02-20T06:00:01.000Z
|
2022-01-07T20:37:37.000Z
|
src/parsedatetime/__init__.py
|
binbashar/terraform-certbot-lambda
|
a3fa1e715605ff9754d7a915c472a9baf64e4aa1
|
[
"MIT"
] | 2
|
2020-04-30T13:03:09.000Z
|
2021-05-05T10:20:15.000Z
|
src/parsedatetime/__init__.py
|
binbashar/terraform-certbot-lambda
|
a3fa1e715605ff9754d7a915c472a9baf64e4aa1
|
[
"MIT"
] | 5
|
2020-05-07T08:14:36.000Z
|
2022-03-24T15:15:08.000Z
|
# -*- coding: utf-8 -*-
#
# vim: sw=2 ts=2 sts=2
#
# Copyright 2004-2019 Mike Taylor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""parsedatetime
Parse human-readable date/time text.
Requires Python 2.7 or later
"""
from __future__ import with_statement, absolute_import, unicode_literals
import re
import time
import logging
import warnings
import datetime
import calendar
import contextlib
import email.utils
from .pdt_locales import (locales as _locales,
get_icu, load_locale)
from .context import pdtContext, pdtContextStack
from .warns import pdt20DeprecationWarning
__author__ = 'Mike Taylor'
__email__ = 'bear@bear.im'
__copyright__ = 'Copyright (c) 2017 Mike Taylor'
__license__ = 'Apache License 2.0'
__version__ = '2.5'
__url__ = 'https://github.com/bear/parsedatetime'
__download_url__ = 'https://pypi.python.org/pypi/parsedatetime'
__description__ = 'Parse human-readable date/time text.'
# as a library, do *not* setup logging
# see docs.python.org/2/howto/logging.html#configuring-logging-for-a-library
# Set default logging handler to avoid "No handler found" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
debug = False
pdtLocales = dict([(x, load_locale(x)) for x in _locales])
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Originally a def inside of _parse_date_w3dtf()
def _extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Originally a def inside of _parse_date_w3dtf()
def _extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = seconds.replace(',', '.').split('.', 1)[0]
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def _pop_time_accuracy(m, ctx):
if not m:
return
if m.group('hours'):
ctx.updateAccuracy(ctx.ACU_HOUR)
if m.group('minutes'):
ctx.updateAccuracy(ctx.ACU_MIN)
if m.group('seconds'):
ctx.updateAccuracy(ctx.ACU_SEC)
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Modified to return a tuple instead of mktime
#
# Original comment:
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def __closure_parse_date_w3dtf():
# the __extract_date and __extract_time methods were
# copied-out so they could be used by my code --bear
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours * 60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
def _parse_date_w3dtf(dateString):
m = __datetime_rx.match(dateString)
if m is None or m.group() != dateString:
return
return _extract_date(m) + _extract_time(m) + (0, 0, 0)
__date_re = (r'(?P<year>\d\d\d\d)'
r'(?:(?P<dsep>-|)'
r'(?:(?P<julian>\d\d\d)'
r'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = r'(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
# __tzd_rx = re.compile(__tzd_re)
__time_re = (r'(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
r'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' + __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
return _parse_date_w3dtf
_parse_date_w3dtf = __closure_parse_date_w3dtf()
del __closure_parse_date_w3dtf
_monthnames = set([
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december'])
_daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Modified to return a tuple instead of mktime
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
del data[0]
if len(data) == 4:
s = data[3]
s = s.split('+', 1)
if len(s) == 2:
data[3:] = s
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
return email.utils.parsedate_tz(dateString)
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
# _additional_timezones = {'AT': -400, 'ET': -500,
# 'CT': -600, 'MT': -700,
# 'PT': -800}
# email.utils._timezones.update(_additional_timezones)
VERSION_FLAG_STYLE = 1
VERSION_CONTEXT_STYLE = 2
class Calendar(object):
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None, version=VERSION_FLAG_STYLE):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{Constants}
@type version: integer
@param version: Default style version of current Calendar instance.
Valid value can be 1 (L{VERSION_FLAG_STYLE}) or
2 (L{VERSION_CONTEXT_STYLE}). See L{parse()}.
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = Constants()
else:
self.ptc = constants
self.version = version
if version == VERSION_FLAG_STYLE:
warnings.warn(
'Flag style will be deprecated in parsedatetime 2.0. '
'Instead use the context style by instantiating `Calendar()` '
'with argument `version=parsedatetime.VERSION_CONTEXT_STYLE`.',
pdt20DeprecationWarning)
self._ctxStack = pdtContextStack()
@contextlib.contextmanager
def context(self):
ctx = pdtContext()
self._ctxStack.push(ctx)
yield ctx
ctx = self._ctxStack.pop()
if not self._ctxStack.isEmpty():
self.currentContext.update(ctx)
@property
def currentContext(self):
return self._ctxStack.last()
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value.
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
word_list, a, b = re.split(r"[,\s-]+", unitText), 0, 0
for word in word_list:
x = self.ptc.small.get(word)
if x is not None:
a += x
elif word == "hundred":
a *= 100
else:
x = self.ptc.magnitude.get(word)
if x is not None:
b += a * x
a = 0
elif word in self.ptc.ignore:
pass
else:
raise Exception("Unknown number: " + word)
return a + b
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them
into values. After converting, calcuate the time and return the
adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
ctx = self.currentContext
debug and log.debug('_buildTime: [%s][%s][%s]',
quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
qty = self._quantityToReal(quantity)
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
# realunit = next((key for key, values in self.ptc.units.items()
# if any(imap(units.__contains__, values))), None)
realunit = units
for key, values in self.ptc.units.items():
if units in values:
realunit = key
break
debug and log.debug('units %s --> realunit %s (qty=%s)',
units, realunit, qty)
try:
if realunit in ('years', 'months'):
target = self.inc(start, **{realunit[:-1]: qty})
elif realunit in ('days', 'hours', 'minutes', 'seconds', 'weeks'):
delta = datetime.timedelta(**{realunit: qty})
target = start + delta
except OverflowError:
# OverflowError is raise when target.year larger than 9999
pass
else:
ctx.updateAccuracy(realunit)
return target.timetuple()
def parseDate(self, dateString, sourceTime=None):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
if sourceTime is None:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
else:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
accuracy = []
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [v1, v2, v3]
d = {'m': mth, 'd': dy, 'y': yr}
# yyyy/mm/dd format
dp_order = self.ptc.dp_order if v1 <= 31 else ['y', 'm', 'd']
for i in range(0, 3):
n = v[i]
c = dp_order[i]
if n >= 0:
d[c] = n
accuracy.append({'m': pdtContext.ACU_MONTH,
'd': pdtContext.ACU_DAY,
'y': pdtContext.ACU_YEAR}[c])
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + self.ptc.YearParseStyle
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
daysInCurrentMonth = self.ptc.daysInMonth(mth, yr)
debug and log.debug('parseDate: %s %s %s %s',
yr, mth, dy, daysInCurrentMonth)
with self.context() as ctx:
if mth > 0 and mth <= 12 and dy > 0 and \
dy <= daysInCurrentMonth:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
ctx.updateAccuracy(*accuracy)
else:
# return current time if date string is invalid
sourceTime = time.localtime()
return sourceTime
def parseDateText(self, dateString, sourceTime=None):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
if sourceTime is None:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
else:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
currentMth = mth
currentDy = dy
accuracy = []
debug and log.debug('parseDateText currentMth %s currentDy %s',
mth, dy)
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
accuracy.append('month')
if m.group('day') is not None:
dy = int(m.group('day'))
accuracy.append('day')
else:
dy = 1
if m.group('year') is not None:
yr = int(m.group('year'))
accuracy.append('year')
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += self.ptc.YearParseStyle
with self.context() as ctx:
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
ctx.updateAccuracy(*accuracy)
else:
# Return current time if date string is invalid
sourceTime = time.localtime()
debug and log.debug('parseDateText returned '
'mth %d dy %d yr %d sourceTime %s',
mth, dy, yr, sourceTime)
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
rangeFlag = retFlag = 0
startStr = endStr = ''
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
for cre, rflag in [(self.ptc.CRE_TIMERNG1, 1),
(self.ptc.CRE_TIMERNG2, 2),
(self.ptc.CRE_TIMERNG4, 7),
(self.ptc.CRE_TIMERNG3, 3),
(self.ptc.CRE_DATERNG1, 4),
(self.ptc.CRE_DATERNG2, 5),
(self.ptc.CRE_DATERNG3, 6)]:
m = cre.search(s)
if m is not None:
rangeFlag = rflag
break
debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
sourceTime, ctx = self.parse(s, sourceTime,
VERSION_CONTEXT_STYLE)
if not ctx.hasDateOrTime:
sourceTime = None
else:
parseStr = s
if rangeFlag in (1, 2):
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag in (3, 7):
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startStr = parseStr[:m.start()] + self.ptc.meridian[0]
else:
startStr = parseStr[:m.start()] + self.ptc.meridian[1]
else:
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 1
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endStr = parseStr[m.start() + 1:]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endStr)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startStr = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startStr)
startYear = date.group('year')
if startYear is None:
startStr = startStr + ', ' + endYear
else:
startStr = parseStr[:m.start()]
retFlag = 1
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startStr)
mth = mth.group('mthname')
# appending the month name to the end date
endStr = mth + parseStr[(m.start() + 1):]
retFlag = 1
else:
# if range is not found
startDT = endDT = time.localtime()
if retFlag:
startDT, sctx = self.parse(startStr, sourceTime,
VERSION_CONTEXT_STYLE)
endDT, ectx = self.parse(endStr, sourceTime,
VERSION_CONTEXT_STYLE)
if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
retFlag = 0
return startDT, endDT, retFlag
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value
set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value
set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
diffBase = wkdy - wd
origOffset = offset
if offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if wkdy * style > wd * style or \
currentDayStyle and wkdy == wd:
# wkdy located in current week
offset = 0
elif style in (-1, 1):
# wkdy located in last (-1) or next (1) week
offset = style
else:
# invalid style, or should raise error?
offset = 0
# offset = -1 means last week
# offset = 0 means current week
# offset = 1 means next week
diff = diffBase + 7 * offset
if style == 1 and diff < -7:
diff += 7
elif style == -1 and diff > 7:
diff -= 7
debug and log.debug("wd %s, wkdy %s, offset %d, "
"style %d, currentDayStyle %d",
wd, wkdy, origOffset, style, currentDayStyle)
return diff
def _quantityToReal(self, quantity):
"""
Convert a quantity, either spelled-out or numeric, to a float
@type quantity: string
@param quantity: quantity to parse to float
@rtype: int
@return: the quantity as an float, defaulting to 0.0
"""
if not quantity:
return 1.0
try:
return float(quantity.replace(',', '.'))
except ValueError:
pass
try:
return float(self.ptc.numbers[quantity])
except KeyError:
pass
return 0.0
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: text chunk that preceded modifier (if any)
@type chunk2: string
@param chunk2: text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
ctx = self.currentContext
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
if self.ptc.StartTimeFromSourceTime:
startHour = hr
startMinute = mn
startSecond = sec
else:
startHour = self.ptc.StartHour
startMinute = 0
startSecond = 0
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
debug and log.debug("modifier [%s] chunk1 [%s] "
"chunk2 [%s] unit [%s]",
modifier, chunk1, chunk2, unit)
if unit in self.ptc.units['months']:
currentDaysInMonth = self.ptc.daysInMonth(mth, yr)
if offset == 0:
dy = currentDaysInMonth
sourceTime = (yr, mth, dy, startHour, startMinute,
startSecond, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == currentDaysInMonth:
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, startHour,
startMinute, startSecond)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, startHour,
startMinute, startSecond)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
ctx.updateAccuracy(ctx.ACU_MONTH)
elif unit in self.ptc.units['weeks']:
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, startHour,
startMinute, startSecond)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, startHour,
startMinute, startSecond)
target = start + offset * datetime.timedelta(weeks=1)
sourceTime = target.timetuple()
ctx.updateAccuracy(ctx.ACU_WEEK)
elif unit in self.ptc.units['days']:
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
ctx.updateAccuracy(ctx.ACU_HALFDAY)
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, startHour,
startMinute, startSecond)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
ctx.updateAccuracy(ctx.ACU_DAY)
elif unit in self.ptc.units['hours']:
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
ctx.updateAccuracy(ctx.ACU_HOUR)
elif unit in self.ptc.units['years']:
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, startHour, startMinute,
startSecond, wd, yd, isdst)
ctx.updateAccuracy(ctx.ACU_YEAR)
elif modifier == 'eom':
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, startHour, startMinute,
startSecond, wd, yd, isdst)
ctx.updateAccuracy(ctx.ACU_DAY)
elif modifier == 'eoy':
mth = 12
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, startHour, startMinute,
startSecond, wd, yd, isdst)
ctx.updateAccuracy(ctx.ACU_MONTH)
elif self.ptc.CRE_WEEKDAY.match(unit):
m = self.ptc.CRE_WEEKDAY.match(unit)
debug and log.debug('CRE_WEEKDAY matched')
wkdy = m.group()
if modifier == 'eod':
ctx.updateAccuracy(ctx.ACU_HOUR)
# Calculate the upcoming weekday
sourceTime, subctx = self.parse(wkdy, sourceTime,
VERSION_CONTEXT_STYLE)
sTime = self.ptc.getSource(modifier, sourceTime)
if sTime is not None:
sourceTime = sTime
ctx.updateAccuracy(ctx.ACU_HALFDAY)
else:
# unless one of these modifiers is being applied to the
# day-of-week, we want to start with target as the day
# in the current week.
dowOffset = offset
relativeModifier = modifier not in ['this', 'next', 'last', 'prior', 'previous']
if relativeModifier:
dowOffset = 0
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(
wd, wkdy, dowOffset, self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, startHour,
startMinute, startSecond)
target = start + datetime.timedelta(days=diff)
if chunk1 != '' and relativeModifier:
# consider "one day before thursday": we need to parse chunk1 ("one day")
# and apply according to the offset ("before"), rather than allowing the
# remaining parse step to apply "one day" without the offset direction.
t, subctx = self.parse(chunk1, sourceTime, VERSION_CONTEXT_STYLE)
if subctx.hasDateOrTime:
delta = time.mktime(t) - time.mktime(sourceTime)
target = start + datetime.timedelta(days=diff) + datetime.timedelta(seconds=delta * offset)
chunk1 = ''
sourceTime = target.timetuple()
ctx.updateAccuracy(ctx.ACU_DAY)
elif chunk1 == '' and chunk2 == '' and self.ptc.CRE_TIME.match(unit):
m = self.ptc.CRE_TIME.match(unit)
debug and log.debug('CRE_TIME matched')
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), subctx = \
self.parse(unit, None, VERSION_CONTEXT_STYLE)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
else:
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
debug and log.debug('check for modifications '
'to source time [%s] [%s]',
chunk1, unit)
unit = unit.strip()
if unit:
s = '%s %s' % (unit, chunk2)
t, subctx = self.parse(s, sourceTime, VERSION_CONTEXT_STYLE)
if subctx.hasDate: # working with dates
u = unit.lower()
if u in self.ptc.Months or \
u in self.ptc.shortMonths:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = t
start = datetime.datetime(
yr, mth, dy, hr, mn, sec)
t = self.inc(start, year=offset).timetuple()
elif u in self.ptc.Weekdays:
t = t + datetime.timedelta(weeks=offset)
if subctx.hasDateOrTime:
sourceTime = t
chunk2 = ''
chunk1 = chunk1.strip()
# if the word after next is a number, the string is more than
# likely to be "next 4 hrs" which we will have to combine the
# units with the rest of the string
if chunk1:
try:
m = list(self.ptc.CRE_NUMBER.finditer(chunk1))[-1]
except IndexError:
pass
else:
qty = None
debug and log.debug('CRE_NUMBER matched')
qty = self._quantityToReal(m.group()) * offset
chunk1 = '%s%s%s' % (chunk1[:m.start()],
qty, chunk1[m.end():])
t, subctx = self.parse(chunk1, sourceTime,
VERSION_CONTEXT_STYLE)
chunk1 = ''
if subctx.hasDateOrTime:
sourceTime = t
debug and log.debug('looking for modifier %s', modifier)
sTime = self.ptc.getSource(modifier, sourceTime)
if sTime is not None:
debug and log.debug('modifier found in sources')
sourceTime = sTime
ctx.updateAccuracy(ctx.ACU_HALFDAY)
debug and log.debug('returning chunk = "%s %s" and sourceTime = %s',
chunk1, chunk2, sourceTime)
return '%s %s' % (chunk1, chunk2), sourceTime
def _evalDT(self, datetimeString, sourceTime):
"""
Calculate the datetime from known format like RFC822 or W3CDTF
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
ctx = self.currentContext
s = datetimeString.strip()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
debug and log.debug(
'attempt to parse as rfc822 - %s', str(sourceTime))
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
ctx.updateAccuracy(ctx.ACU_YEAR, ctx.ACU_MONTH, ctx.ACU_DAY)
if hr != 0 and mn != 0 and sec != 0:
ctx.updateAccuracy(ctx.ACU_HOUR, ctx.ACU_MIN, ctx.ACU_SEC)
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
ctx.updateAccuracy(ctx.ACU_YEAR, ctx.ACU_MONTH, ctx.ACU_DAY,
ctx.ACU_HOUR, ctx.ACU_MIN, ctx.ACU_SEC)
if sourceTime is None:
sourceTime = time.localtime()
return sourceTime
def _evalUnits(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseUnits()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is a time string with units like "5 hrs 30 min"
modifier = '' # TODO
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
return sourceTime
def _evalQUnits(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseQUnits()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is a time string with single char units like "5 h 30 m"
modifier = '' # TODO
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
return sourceTime
def _evalDateStr(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseDateStr()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is in the format "May 23rd, 2005"
debug and log.debug('checking for MMM DD YYYY')
return self.parseDateText(s, sourceTime)
def _evalDateStd(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseDateStd()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is in the format 07/21/2006
return self.parseDate(s, sourceTime)
def _evalDayStr(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseDaystr()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is a natural language date string like today, tomorrow..
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
try:
offset = self.ptc.dayOffsets[s]
except KeyError:
offset = 0
if self.ptc.StartTimeFromSourceTime:
startHour = hr
startMinute = mn
startSecond = sec
else:
startHour = self.ptc.StartHour
startMinute = 0
startSecond = 0
self.currentContext.updateAccuracy(pdtContext.ACU_DAY)
start = datetime.datetime(yr, mth, dy, startHour,
startMinute, startSecond)
target = start + datetime.timedelta(days=offset)
return target.timetuple()
def _evalWeekday(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseWeekday()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is a weekday
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
self.currentContext.updateAccuracy(pdtContext.ACU_DAY)
target = start + datetime.timedelta(days=qty)
return target.timetuple()
def _evalTimeStr(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseTimeStr()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
if s in self.ptc.re_values['now']:
self.currentContext.updateAccuracy(pdtContext.ACU_NOW)
else:
# Given string is a natural language time string like
# lunch, midnight, etc
sTime = self.ptc.getSource(s, sourceTime)
if sTime:
sourceTime = sTime
self.currentContext.updateAccuracy(pdtContext.ACU_HALFDAY)
return sourceTime
def _evalMeridian(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseMeridian()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is in the format HH:MM(:SS)(am/pm)
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
hr = 0
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
hr += 12
# time validation
if hr < 24 and mn < 60 and sec < 60:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
_pop_time_accuracy(m, self.currentContext)
return sourceTime
def _evalTimeStd(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseTimeStd()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is in the format HH:MM(:SS)
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
# time validation
if hr < 24 and mn < 60 and sec < 60:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
_pop_time_accuracy(m, self.currentContext)
return sourceTime
def _UnitsTrapped(self, s, m, key):
# check if a day suffix got trapped by a unit match
# for example Dec 31st would match for 31s (aka 31 seconds)
# Dec 31st
# ^ ^
# | +-- m.start('units')
# | and also m2.start('suffix')
# +---- m.start('qty')
# and also m2.start('day')
m2 = self.ptc.CRE_DAY2.search(s)
if m2 is not None:
t = '%s%s' % (m2.group('day'), m.group(key))
if m.start(key) == m2.start('suffix') and \
m.start('qty') == m2.start('day') and \
m.group('qty') == t:
return True
else:
return False
else:
return False
def _partialParseModifier(self, s, sourceTime):
"""
test if giving C{s} matched CRE_MODIFIER, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Modifier like next/prev/from/after/prior..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
if m.group() != s:
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()].strip()
chunk2 = s[m.end():].strip()
else:
parseStr = s
if parseStr:
debug and log.debug('found (modifier) [%s][%s][%s]',
parseStr, chunk1, chunk2)
s, sourceTime = self._evalModifier(parseStr, chunk1,
chunk2, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseUnits(self, s, sourceTime):
"""
test if giving C{s} matched CRE_UNITS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
debug and log.debug('CRE_UNITS matched')
if self._UnitsTrapped(s, m, 'units'):
debug and log.debug('day suffix trapped by unit match')
else:
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug('found (units) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalUnits(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseQUnits(self, s, sourceTime):
"""
test if giving C{s} matched CRE_QUNITS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
debug and log.debug('CRE_QUNITS matched')
if self._UnitsTrapped(s, m, 'qunits'):
debug and log.debug(
'day suffix trapped by qunit match')
else:
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug('found (qunits) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalQUnits(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseDateStr(self, s, sourceTime):
"""
test if giving C{s} matched CRE_DATE3, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
m = self.ptc.CRE_DATE3.search(s)
# NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
# for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from
# triggering this regex, we checks if the month field
# exists in the searched expression, if it doesn't exist,
# the date field is not valid
# if match.group('mthname'):
# m = self.ptc.CRE_DATE3.search(s, match.start())
# valid_date = True
# break
# String date format
if m is not None:
if (m.group('date') != s):
# capture remaining string
mStart = m.start('date')
mEnd = m.end('date')
# we need to check that anything following the parsed
# date is a time expression because it is often picked
# up as a valid year if the hour is 2 digits
fTime = False
mm = self.ptc.CRE_TIMEHMS2.search(s)
# "February 24th 1PM" doesn't get caught
# "February 24th 12PM" does
mYear = m.group('year')
if mm is not None and mYear is not None:
fTime = True
else:
# "February 24th 12:00"
mm = self.ptc.CRE_TIMEHMS.search(s)
if mm is not None and mYear is None:
fTime = True
if fTime:
hoursStart = mm.start('hours')
if hoursStart < m.end('year'):
mEnd = hoursStart
parseStr = s[mStart:mEnd]
chunk1 = s[:mStart]
chunk2 = s[mEnd:]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (date3) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalDateStr(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseDateStd(self, s, sourceTime):
"""
test if giving C{s} matched CRE_DATE, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (date) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalDateStd(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseDayStr(self, s, sourceTime):
"""
test if giving C{s} matched CRE_DAY, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (day) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalDayStr(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseWeekday(self, s, sourceTime):
"""
test if giving C{s} matched CRE_WEEKDAY, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
ctx = self.currentContext
log.debug('eval %s with context - %s, %s', s, ctx.hasDate, ctx.hasTime)
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group()
if s not in self.ptc.dayOffsets:
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr and not ctx.hasDate:
debug and log.debug(
'found (weekday) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalWeekday(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseTimeStr(self, s, sourceTime):
"""
test if giving C{s} matched CRE_TIME, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None or s in self.ptc.re_values['now']:
if (m and m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (time) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalTimeStr(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseMeridian(self, s, sourceTime):
"""
test if giving C{s} matched CRE_TIMEHMS2, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
else:
parseStr = m.group('hours')
parseStr += ' ' + m.group('meridian')
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
if parseStr:
debug and log.debug('found (meridian) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalMeridian(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def _partialParseTimeStd(self, s, sourceTime):
"""
test if giving C{s} matched CRE_TIMEHMS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
if parseStr:
debug and log.debug(
'found (hms) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalTimeStd(parseStr, sourceTime)
return s, sourceTime, bool(parseStr)
def parseDT(self, datetimeString, sourceTime=None,
tzinfo=None, version=None):
"""
C{datetimeString} is as C{.parse}, C{sourceTime} has the same semantic
meaning as C{.parse}, but now also accepts datetime objects. C{tzinfo}
accepts a tzinfo object. It is advisable to use pytz.
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time, datetime, date, time
@param sourceTime: time value to use as the base
@type tzinfo: tzinfo
@param tzinfo: Timezone to apply to generated datetime objs.
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag/context
see .parse for return code details.
"""
# if sourceTime has a timetuple method, use thet, else, just pass the
# entire thing to parse and prey the user knows what the hell they are
# doing.
sourceTime = getattr(sourceTime, 'timetuple', (lambda: sourceTime))()
# You REALLY SHOULD be using pytz. Using localize if available,
# hacking if not. Note, None is a valid tzinfo object in the case of
# the ugly hack.
localize = getattr(
tzinfo,
'localize',
(lambda dt: dt.replace(tzinfo=tzinfo)), # ugly hack is ugly :(
)
# Punt
time_struct, ret_code = self.parse(
datetimeString,
sourceTime=sourceTime,
version=version)
# Comments from GHI indicate that it is desired to have the same return
# signature on this method as that one it punts to, with the exception
# of using datetime objects instead of time_structs.
dt = localize(datetime.datetime(*time_struct[:6]))
return dt, ret_code
def parse(self, datetimeString, sourceTime=None, version=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found, then::
If C{version} equals to L{VERSION_FLAG_STYLE}, the second item of
the returned tuple will be a flag to let you know what kind of
C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
If C{version} equals to L{VERSION_CONTEXT_STYLE}, the second value
will be an instance of L{pdtContext}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag/context
"""
debug and log.debug('parse()')
datetimeString = re.sub(r'(\w)\.(\s)', r'\1\2', datetimeString)
datetimeString = re.sub(r'(\w)[\'"](\s|$)', r'\1 \2', datetimeString)
datetimeString = re.sub(r'(\s|^)[\'"](\w)', r'\1 \2', datetimeString)
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
debug and log.debug('coercing datetime to timetuple')
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise ValueError('sourceTime is not a struct_time')
else:
sourceTime = time.localtime()
with self.context() as ctx:
s = datetimeString.lower().strip()
debug and log.debug('remainedString (before parsing): [%s]', s)
while s:
for parseMeth in (self._partialParseModifier,
self._partialParseUnits,
self._partialParseQUnits,
self._partialParseDateStr,
self._partialParseDateStd,
self._partialParseDayStr,
self._partialParseWeekday,
self._partialParseTimeStr,
self._partialParseMeridian,
self._partialParseTimeStd):
retS, retTime, matched = parseMeth(s, sourceTime)
if matched:
s, sourceTime = retS.strip(), retTime
break
else:
# nothing matched
s = ''
debug and log.debug('hasDate: [%s], hasTime: [%s]',
ctx.hasDate, ctx.hasTime)
debug and log.debug('remainedString: [%s]', s)
# String is not parsed at all
if sourceTime is None:
debug and log.debug('not parsed [%s]', str(sourceTime))
sourceTime = time.localtime()
if not isinstance(sourceTime, time.struct_time):
sourceTime = time.struct_time(sourceTime)
version = self.version if version is None else version
if version == VERSION_CONTEXT_STYLE:
return sourceTime, ctx
else:
return sourceTime, ctx.dateTimeFlag
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: float or integer
@param month: optional number of months to increment
@type year: float or integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
try:
month = float(month)
except (TypeError, ValueError):
month = 0
try:
year = float(year)
except (TypeError, ValueError):
year = 0
finally:
month += year * 12
year = 0
subMi = 0.0
maxDay = 0
if month:
mi = int(month)
subMi = month - mi
y = int(mi / 12.0)
m = mi - y * 12
mth = mth + m
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
elif mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
maxDay = self.ptc.daysInMonth(mth, yr)
if dy > maxDay:
dy = maxDay
if yr > datetime.MAXYEAR or yr < datetime.MINYEAR:
raise OverflowError('year is out of range')
d = source.replace(year=yr, month=mth, day=dy)
if subMi:
d += datetime.timedelta(days=subMi * maxDay)
return source + (d - source)
def nlp(self, inputString, sourceTime=None, version=None):
"""Utilizes parse() after making judgements about what datetime
information belongs together.
It makes logical groupings based on proximity and returns a parsed
datetime for each matched grouping of datetime text, along with
location info within the given inputString.
@type inputString: string
@param inputString: natural language text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple or None
@return: tuple of tuples in the format (parsed_datetime as
datetime.datetime, flags as int, start_pos as int,
end_pos as int, matched_text as string) or None if there
were no matches
"""
orig_inputstring = inputString
# replace periods at the end of sentences w/ spaces
# opposed to removing them altogether in order to
# retain relative positions (identified by alpha, period, space).
# this is required for some of the regex patterns to match
inputString = re.sub(r'(\w)(\.)(\s)', r'\1 \3', inputString).lower()
inputString = re.sub(r'(\w)(\'|")(\s|$)', r'\1 \3', inputString)
inputString = re.sub(r'(\s|^)(\'|")(\w)', r'\1 \3', inputString)
startpos = 0 # the start position in the inputString during the loop
# list of lists in format:
# [startpos, endpos, matchedstring, flags, type]
matches = []
while startpos < len(inputString):
# empty match
leftmost_match = [0, 0, None, 0, None]
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 0
leftmost_match[4] = 'modifier'
# Quantity + Units
m = self.ptc.CRE_UNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug('CRE_UNITS matched')
if self._UnitsTrapped(inputString[startpos:], m, 'units'):
debug and log.debug('day suffix trapped by unit match')
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('qty') + startpos:
leftmost_match[0] = m.start('qty') + startpos
leftmost_match[1] = m.end('qty') + startpos
leftmost_match[2] = m.group('qty')
leftmost_match[3] = 3
leftmost_match[4] = 'units'
if m.start('qty') > 0 and \
inputString[m.start('qty') - 1] == '-':
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = '-' + leftmost_match[2]
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug('CRE_QUNITS matched')
if self._UnitsTrapped(inputString[startpos:], m, 'qunits'):
debug and log.debug('day suffix trapped by qunit match')
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('qty') + startpos:
leftmost_match[0] = m.start('qty') + startpos
leftmost_match[1] = m.end('qty') + startpos
leftmost_match[2] = m.group('qty')
leftmost_match[3] = 3
leftmost_match[4] = 'qunits'
if m.start('qty') > 0 and \
inputString[m.start('qty') - 1] == '-':
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = '-' + leftmost_match[2]
m = self.ptc.CRE_DATE3.search(inputString[startpos:])
# NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
# for match in self.ptc.CRE_DATE3.finditer(inputString[startpos:]):
# to prevent "HH:MM(:SS) time strings" expressions from
# triggering this regex, we checks if the month field exists
# in the searched expression, if it doesn't exist, the date
# field is not valid
# if match.group('mthname'):
# m = self.ptc.CRE_DATE3.search(inputString[startpos:],
# match.start())
# break
# String date format
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('date') + startpos:
leftmost_match[0] = m.start('date') + startpos
leftmost_match[1] = m.end('date') + startpos
leftmost_match[2] = m.group('date')
leftmost_match[3] = 1
leftmost_match[4] = 'dateStr'
# Standard date format
m = self.ptc.CRE_DATE.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('date') + startpos:
leftmost_match[0] = m.start('date') + startpos
leftmost_match[1] = m.end('date') + startpos
leftmost_match[2] = m.group('date')
leftmost_match[3] = 1
leftmost_match[4] = 'dateStd'
# Natural language day strings
m = self.ptc.CRE_DAY.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 1
leftmost_match[4] = 'dayStr'
# Weekday
m = self.ptc.CRE_WEEKDAY.search(inputString[startpos:])
if m is not None:
if inputString[startpos:] not in self.ptc.dayOffsets:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 1
leftmost_match[4] = 'weekdy'
# Natural language time strings
m = self.ptc.CRE_TIME.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 2
leftmost_match[4] = 'timeStr'
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('hours') + startpos:
leftmost_match[0] = m.start('hours') + startpos
leftmost_match[1] = m.end('meridian') + startpos
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
leftmost_match[3] = 2
leftmost_match[4] = 'meridian'
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('hours') + startpos:
leftmost_match[0] = m.start('hours') + startpos
if m.group('seconds') is not None:
leftmost_match[1] = m.end('seconds') + startpos
else:
leftmost_match[1] = m.end('minutes') + startpos
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
leftmost_match[3] = 2
leftmost_match[4] = 'timeStd'
# Units only; must be preceded by a modifier
if len(matches) > 0 and matches[-1][3] == 0:
m = self.ptc.CRE_UNITS_ONLY.search(inputString[startpos:])
# Ensure that any match is immediately proceded by the
# modifier. "Next is the word 'month'" should not parse as a
# date while "next month" should
if m is not None and \
inputString[startpos:startpos + m.start()].strip() == '':
debug and log.debug('CRE_UNITS_ONLY matched [%s]',
m.group())
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 3
leftmost_match[4] = 'unitsOnly'
# set the start position to the end pos of the leftmost match
startpos = leftmost_match[1]
# nothing was detected
# so break out of the loop
if startpos == 0:
startpos = len(inputString)
else:
if leftmost_match[3] > 0:
m = self.ptc.CRE_NLP_PREFIX.search(
inputString[:leftmost_match[0]] + ' ' + str(leftmost_match[3]))
if m is not None:
leftmost_match[0] = m.start('nlp_prefix')
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
matches.append(leftmost_match)
# find matches in proximity with one another and
# return all the parsed values
proximity_matches = []
if len(matches) > 1:
combined = ''
from_match_index = 0
date = matches[0][3] == 1
time = matches[0][3] == 2
units = matches[0][3] == 3
for i in range(1, len(matches)):
# test proximity (are there characters between matches?)
endofprevious = matches[i - 1][1]
begofcurrent = matches[i][0]
if orig_inputstring[endofprevious:
begofcurrent].lower().strip() != '':
# this one isn't in proximity, but maybe
# we have enough to make a datetime
# TODO: make sure the combination of
# formats (modifier, dateStd, etc) makes logical sense
# before parsing together
if date or time or units:
combined = orig_inputstring[matches[from_match_index]
[0]:matches[i - 1][1]]
parsed_datetime, flags = self.parse(combined,
sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[i - 1][1],
combined))
# not in proximity, reset starting from current
from_match_index = i
date = matches[i][3] == 1
time = matches[i][3] == 2
units = matches[i][3] == 3
continue
else:
if matches[i][3] == 1:
date = True
if matches[i][3] == 2:
time = True
if matches[i][3] == 3:
units = True
# check last
# we have enough to make a datetime
if date or time or units:
combined = orig_inputstring[matches[from_match_index][0]:
matches[len(matches) - 1][1]]
parsed_datetime, flags = self.parse(combined, sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[len(matches) - 1][1],
combined))
elif len(matches) == 0:
return None
else:
if matches[0][3] == 0: # not enough info to parse
return None
else:
combined = orig_inputstring[matches[0][0]:matches[0][1]]
parsed_datetime, flags = self.parse(matches[0][2], sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[0][0],
matches[0][1],
combined))
return tuple(proximity_matches)
def _initSymbols(ptc):
"""
Initialize symbols and single character constants.
"""
# build am and pm lists to contain
# original case, lowercase, first-char and dotted
# versions of the meridian text
ptc.am = ['', '']
ptc.pm = ['', '']
for idx, xm in enumerate(ptc.locale.meridian[:2]):
# 0: am
# 1: pm
target = ['am', 'pm'][idx]
setattr(ptc, target, [xm])
target = getattr(ptc, target)
if xm:
lxm = xm.lower()
target.extend((xm[0], '{0}.{1}.'.format(*xm),
lxm, lxm[0], '{0}.{1}.'.format(*lxm)))
class Constants(object):
"""
Default set of constants for parsedatetime.
If PyICU is present, then the class will first try to get PyICU
to return a locale specified by C{localeID}. If either C{localeID} is
None or if the locale does not exist within PyICU, then each of the
locales defined in C{fallbackLocales} is tried in order.
If PyICU is not present or none of the specified locales can be used,
then the class will initialize itself to the en_US locale.
if PyICU is not present or not requested, only the locales defined by
C{pdtLocales} will be searched.
"""
def __init__(self, localeID=None, usePyICU=True,
fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales[:]
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = list(range(1904, 2097, 4))
self.Second = 1
self.Minute = 60 # 60 * self.Second
self.Hour = 3600 # 60 * self.Minute
self.Day = 86400 # 24 * self.Hour
self.Week = 604800 # 7 * self.Day
self.Month = 2592000 # 30 * self.Day
self.Year = 31536000 # 365 * self.Day
self._DaysInMonthList = (31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31)
self.rangeSep = '-'
self.BirthdayEpoch = 50
# When True the starting time for all relative calculations will come
# from the given SourceTime, otherwise it will be self.StartHour
self.StartTimeFromSourceTime = False
# The hour of the day that will be used as the starting time for all
# relative calculations when self.StartTimeFromSourceTime is False
self.StartHour = 9
# YearParseStyle controls how we parse "Jun 12", i.e. dates that do
# not have a year present. The default is to compare the date given
# to the current date, and if prior, then assume the next year.
# Setting this to 0 will prevent that.
self.YearParseStyle = 1
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
if self.usePyICU:
self.locale = get_icu(self.localeID)
if self.locale.icu is None:
self.usePyICU = False
self.locale = None
if self.locale is None:
if self.localeID not in pdtLocales:
for localeId in range(0, len(self.fallbackLocales)):
self.localeID = self.fallbackLocales[localeId]
if self.localeID in pdtLocales:
break
self.locale = pdtLocales[self.localeID]
if self.locale is not None:
def _getLocaleDataAdjusted(localeData):
"""
If localeData is defined as ["mon|mnd", 'tu|tues'...] then this
function splits those definitions on |
"""
adjusted = []
for d in localeData:
if '|' in d:
adjusted += d.split("|")
else:
adjusted.append(d)
return adjusted
def re_join(g):
return '|'.join(re.escape(i) for i in g)
mths = _getLocaleDataAdjusted(self.locale.Months)
smths = _getLocaleDataAdjusted(self.locale.shortMonths)
swds = _getLocaleDataAdjusted(self.locale.shortWeekdays)
wds = _getLocaleDataAdjusted(self.locale.Weekdays)
# escape any regex special characters that may be found
self.locale.re_values['months'] = re_join(mths)
self.locale.re_values['shortmonths'] = re_join(smths)
self.locale.re_values['days'] = re_join(wds)
self.locale.re_values['shortdays'] = re_join(swds)
self.locale.re_values['dayoffsets'] = \
re_join(self.locale.dayOffsets)
self.locale.re_values['numbers'] = \
re_join(self.locale.numbers)
self.locale.re_values['decimal_mark'] = \
re.escape(self.locale.decimal_mark)
units = [unit for units in self.locale.units.values()
for unit in units] # flatten
units.sort(key=len, reverse=True) # longest first
self.locale.re_values['units'] = re_join(units)
self.locale.re_values['modifiers'] = re_join(self.locale.Modifiers)
self.locale.re_values['sources'] = re_join(self.locale.re_sources)
# For distinguishing numeric dates from times, look for timeSep
# and meridian, if specified in the locale
self.locale.re_values['timecomponents'] = \
re_join(self.locale.timeSep + self.locale.meridian)
# build weekday offsets - yes, it assumes the Weekday and
# shortWeekday lists are in the same order and Mon..Sun
# (Python style)
def _buildOffsets(offsetDict, localeData, indexStart):
o = indexStart
for key in localeData:
if '|' in key:
for k in key.split('|'):
offsetDict[k] = o
else:
offsetDict[key] = o
o += 1
_buildOffsets(self.locale.WeekdayOffsets,
self.locale.Weekdays, 0)
_buildOffsets(self.locale.WeekdayOffsets,
self.locale.shortWeekdays, 0)
# build month offsets - yes, it assumes the Months and shortMonths
# lists are in the same order and Jan..Dec
_buildOffsets(self.locale.MonthOffsets,
self.locale.Months, 1)
_buildOffsets(self.locale.MonthOffsets,
self.locale.shortMonths, 1)
_initSymbols(self)
# TODO: add code to parse the date formats and build the regexes up
# from sub-parts, find all hard-coded uses of date/time separators
# not being used in code, but kept in case others are manually
# utilizing this regex for their own purposes
self.RE_DATE4 = r'''(?P<date>
(
(
(?P<day>\d\d?)
(?P<suffix>{daysuffix})?
(,)?
(\s)*
)
(?P<mthname>
\b({months}|{shortmonths})\b
)\s*
(?P<year>\d\d
(\d\d)?
)?
)
)'''.format(**self.locale.re_values)
# still not completely sure of the behavior of the regex and
# whether it would be best to consume all possible irrelevant
# characters before the option groups (but within the {1,3} repetition
# group or inside of each option group, as it currently does
# however, right now, all tests are passing that were,
# including fixing the bug of matching a 4-digit year as ddyy
# when the day is absent from the string
self.RE_DATE3 = r'''(?P<date>
(?:
(?:^|\s+)
(?P<mthname>
{months}|{shortmonths}
)\b
|
(?:^|\s+)
(?P<day>[1-9]|[012]\d|3[01])
(?P<suffix>{daysuffix}|)\b
(?!\s*(?:{timecomponents}))
|
,?\s+
(?P<year>\d\d(?:\d\d|))\b
(?!\s*(?:{timecomponents}))
){{1,3}}
(?(mthname)|$-^)
)'''.format(**self.locale.re_values)
# not being used in code, but kept in case others are manually
# utilizing this regex for their own purposes
self.RE_MONTH = r'''(\s+|^)
(?P<month>
(
(?P<mthname>
\b({months}|{shortmonths})\b
)
(\s*
(?P<year>(\d{{4}}))
)?
)
)
(?=\s+|$|[^\w])'''.format(**self.locale.re_values)
self.RE_WEEKDAY = r'''\b
(?:
{days}|{shortdays}
)
\b'''.format(**self.locale.re_values)
self.RE_NUMBER = (r'(\b(?:{numbers})\b|\d+(?:{decimal_mark}\d+|))'
.format(**self.locale.re_values))
self.RE_SPECIAL = (r'(?P<special>^[{specials}]+)\s+'
.format(**self.locale.re_values))
self.RE_UNITS_ONLY = (r'''\b({units})\b'''
.format(**self.locale.re_values))
self.RE_UNITS = r'''\b(?P<qty>
-?
(?:\d+(?:{decimal_mark}\d+|)|(?:{numbers})\b)\s*
(?P<units>{units})
)\b'''.format(**self.locale.re_values)
self.RE_QUNITS = r'''\b(?P<qty>
-?
(?:\d+(?:{decimal_mark}\d+|)|(?:{numbers})\s+)\s*
(?P<qunits>{qunits})
)\b'''.format(**self.locale.re_values)
self.RE_MODIFIER = r'''\b(?:
{modifiers}
)\b'''.format(**self.locale.re_values)
self.RE_TIMEHMS = r'''([\s(\["'-]|^)
(?P<hours>\d\d?)
(?P<tsep>{timeseparator}|)
(?P<minutes>\d\d)
(?:(?P=tsep)
(?P<seconds>\d\d
(?:[\.,]\d+)?
)
)?\b'''.format(**self.locale.re_values)
self.RE_TIMEHMS2 = r'''([\s(\["'-]|^)
(?P<hours>\d\d?)
(?:
(?P<tsep>{timeseparator}|)
(?P<minutes>\d\d?)
(?:(?P=tsep)
(?P<seconds>\d\d?
(?:[\.,]\d+)?
)
)?
)?'''.format(**self.locale.re_values)
# 1, 2, and 3 here refer to the type of match date, time, or units
self.RE_NLP_PREFIX = r'''\b(?P<nlp_prefix>
(on)
(\s)+1
|
(at|in)
(\s)+2
|
(in)
(\s)+3
)'''
if 'meridian' in self.locale.re_values:
self.RE_TIMEHMS2 += (r'\s*(?P<meridian>{meridian})\b'
.format(**self.locale.re_values))
else:
self.RE_TIMEHMS2 += r'\b'
# Always support common . and - separators
dateSeps = ''.join(re.escape(s)
for s in self.locale.dateSep + ['-', '.'])
self.RE_DATE = r'''([\s(\["'-]|^)
(?P<date>
\d\d?[{0}]\d\d?(?:[{0}]\d\d(?:\d\d)?)?
|
\d{{4}}[{0}]\d\d?[{0}]\d\d?
)
\b'''.format(dateSeps)
self.RE_DATE2 = r'[{0}]'.format(dateSeps)
assert 'dayoffsets' in self.locale.re_values
self.RE_DAY = r'''\b
(?:
{dayoffsets}
)
\b'''.format(**self.locale.re_values)
self.RE_DAY2 = r'''(?P<day>\d\d?)
(?P<suffix>{daysuffix})?
'''.format(**self.locale.re_values)
self.RE_TIME = r'''\b
(?:
{sources}
)
\b'''.format(**self.locale.re_values)
self.RE_REMAINING = r'\s+'
# Regex for date/time ranges
self.RE_RTIMEHMS = r'''(\s*|^)
(\d\d?){timeseparator}
(\d\d)
({timeseparator}(\d\d))?
(\s*|$)'''.format(**self.locale.re_values)
self.RE_RTIMEHMS2 = (r'''(\s*|^)
(\d\d?)
({timeseparator}(\d\d?))?
({timeseparator}(\d\d?))?'''
.format(**self.locale.re_values))
if 'meridian' in self.locale.re_values:
self.RE_RTIMEHMS2 += (r'\s*({meridian})'
.format(**self.locale.re_values))
self.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
self.RE_RDATE3 = r'''(
(
(
\b({months})\b
)\s*
(
(\d\d?)
(\s?|{daysuffix}|$)+
)?
(,\s*\d{{4}})?
)
)'''.format(**self.locale.re_values)
# "06/07/06 - 08/09/06"
self.DATERNG1 = (r'{0}\s*{rangeseparator}\s*{0}'
.format(self.RE_RDATE, **self.locale.re_values))
# "march 31 - june 1st, 2006"
self.DATERNG2 = (r'{0}\s*{rangeseparator}\s*{0}'
.format(self.RE_RDATE3, **self.locale.re_values))
# "march 1rd -13th"
self.DATERNG3 = (r'{0}\s*{rangeseparator}\s*(\d\d?)\s*(rd|st|nd|th)?'
.format(self.RE_RDATE3, **self.locale.re_values))
# "4:00:55 pm - 5:90:44 am", '4p-5p'
self.TIMERNG1 = (r'{0}\s*{rangeseparator}\s*{0}'
.format(self.RE_RTIMEHMS2, **self.locale.re_values))
self.TIMERNG2 = (r'{0}\s*{rangeseparator}\s*{0}'
.format(self.RE_RTIMEHMS, **self.locale.re_values))
# "4-5pm "
self.TIMERNG3 = (r'\d\d?\s*{rangeseparator}\s*{0}'
.format(self.RE_RTIMEHMS2, **self.locale.re_values))
# "4:30-5pm "
self.TIMERNG4 = (r'{0}\s*{rangeseparator}\s*{1}'
.format(self.RE_RTIMEHMS, self.RE_RTIMEHMS2,
**self.locale.re_values))
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = {'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_NUMBER': self.RE_NUMBER,
'CRE_UNITS': self.RE_UNITS,
'CRE_UNITS_ONLY': self.RE_UNITS_ONLY,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
'CRE_NLP_PREFIX': self.RE_NLP_PREFIX}
self.cre_keys = set(self.cre_source.keys())
def __getattr__(self, name):
if name in self.cre_keys:
value = re.compile(self.cre_source[name], self.re_option)
setattr(self, name, value)
return value
elif name in self.locale.locale_keys:
return getattr(self.locale, name)
else:
raise AttributeError(name)
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
debug and log.debug('daysInMonth(%s, %s)', month, year)
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result
def getSource(self, sourceKey, sourceTime=None):
"""
GetReturn a date/time tuple based on the giving source key
and the corresponding key found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceKey not in self.re_sources:
return None
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
defaults = {'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec}
source = self.re_sources[sourceKey]
values = {}
for key, default in defaults.items():
values[key] = source.get(key, default)
return (values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'],
wd, yd, isdst)
| 37.71828
| 115
| 0.495676
|
fc716c8cd9834270266914ab5d0a0cdc6a973040
| 1,443
|
py
|
Python
|
examples/example_calculator/nodes/output.py
|
hello-arun/pyqt-node-editor
|
fa6cdd95020ef22055143c04279b538473fcdef3
|
[
"MIT"
] | 1
|
2021-12-05T17:46:42.000Z
|
2021-12-05T17:46:42.000Z
|
examples/example_calculator/nodes/output.py
|
hello-arun/pyqt-node-editor
|
fa6cdd95020ef22055143c04279b538473fcdef3
|
[
"MIT"
] | null | null | null |
examples/example_calculator/nodes/output.py
|
hello-arun/pyqt-node-editor
|
fa6cdd95020ef22055143c04279b538473fcdef3
|
[
"MIT"
] | 1
|
2021-09-21T07:41:02.000Z
|
2021-09-21T07:41:02.000Z
|
from qtpy.QtWidgets import QLabel
from qtpy.QtCore import Qt
from examples.example_calculator.calc_conf import register_node, OP_NODE_OUTPUT
from examples.example_calculator.calc_node_base import CalcNode, CalcGraphicsNode
from nodeeditor.node_content_widget import QDMNodeContentWidget
class CalcOutputContent(QDMNodeContentWidget):
def initUI(self):
self.lbl = QLabel("42", self)
self.lbl.setAlignment(Qt.AlignLeft)
self.lbl.setObjectName(self.node.content_label_objname)
@register_node(OP_NODE_OUTPUT)
class CalcNode_Output(CalcNode):
icon = "icons/out.png"
op_code = OP_NODE_OUTPUT
op_title = "Output"
content_label_objname = "calc_node_output"
def __init__(self, scene):
super().__init__(scene, inputs=[1], outputs=[])
def initInnerClasses(self):
self.content = CalcOutputContent(self)
self.grNode = CalcGraphicsNode(self)
def evalImplementation(self):
input_node = self.getInput(0)
if not input_node:
self.grNode.setToolTip("Input is not connected")
self.markInvalid()
return
val = input_node.eval()
if val is None:
self.grNode.setToolTip("Input is NaN")
self.markInvalid()
return
self.content.lbl.setText("%d" % val)
self.markInvalid(False)
self.markDirty(False)
self.grNode.setToolTip("")
return val
| 29.44898
| 81
| 0.680527
|
ee1d341407ef293038ad79ee5fa5c32391ee4da5
| 34,224
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_snapshots_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_snapshots_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_snapshots_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SnapshotsOperations(object):
"""SnapshotsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(snapshot, 'Snapshot')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created.
The name can't be changed after the snapshot is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The max name length
is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk
operation.
:type snapshot: ~azure.mgmt.compute.v2018_04_01.models.Snapshot
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Snapshot or
ClientRawResponse<Snapshot> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.Snapshot]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.Snapshot]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'}
def _update_initial(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(snapshot, 'SnapshotUpdate')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created.
The name can't be changed after the snapshot is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The max name length
is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch
snapshot operation.
:type snapshot: ~azure.mgmt.compute.v2018_04_01.models.SnapshotUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Snapshot or
ClientRawResponse<Snapshot> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.Snapshot]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.Snapshot]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'}
def get(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created.
The name can't be changed after the snapshot is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The max name length
is 80 characters.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Snapshot or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2018_04_01.models.Snapshot or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'}
def _delete_initial(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created.
The name can't be changed after the snapshot is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The max name length
is 80 characters.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Snapshot
:rtype:
~azure.mgmt.compute.v2018_04_01.models.SnapshotPaged[~azure.mgmt.compute.v2018_04_01.models.Snapshot]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists snapshots under a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Snapshot
:rtype:
~azure.mgmt.compute.v2018_04_01.models.SnapshotPaged[~azure.mgmt.compute.v2018_04_01.models.Snapshot]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'}
def _grant_access_initial(
self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds)
# Construct URL
url = self.grant_access.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def grant_access(
self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, polling=True, **operation_config):
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created.
The name can't be changed after the snapshot is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The max name length
is 80 characters.
:type snapshot_name: str
:param access: Possible values include: 'None', 'Read'
:type access: str or
~azure.mgmt.compute.v2018_04_01.models.AccessLevel
:param duration_in_seconds: Time duration in seconds until the SAS
access expires.
:type duration_in_seconds: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AccessUri or
ClientRawResponse<AccessUri> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.AccessUri]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.AccessUri]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._grant_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
access=access,
duration_in_seconds=duration_in_seconds,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'}
def _revoke_access_initial(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.revoke_access.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def revoke_access(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created.
The name can't be changed after the snapshot is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The max name length
is 80 characters.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._revoke_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'}
| 47.010989
| 173
| 0.66953
|
901f1a5abdd21902b79a9623366ba3c2f0440e03
| 862
|
py
|
Python
|
dpsutil/common/hw.py
|
connortran216/DPS_Util
|
8e6af59c3cc5d4addf3694ee0dfede08206ec4b3
|
[
"MIT"
] | 1
|
2021-01-19T03:14:42.000Z
|
2021-01-19T03:14:42.000Z
|
dpsutil/common/hw.py
|
connortran216/DPS_Util
|
8e6af59c3cc5d4addf3694ee0dfede08206ec4b3
|
[
"MIT"
] | 1
|
2021-01-27T09:50:33.000Z
|
2021-01-27T09:50:33.000Z
|
dpsutil/common/hw.py
|
connortran216/DPS_Util
|
8e6af59c3cc5d4addf3694ee0dfede08206ec4b3
|
[
"MIT"
] | 3
|
2020-03-24T02:49:47.000Z
|
2021-02-26T04:05:06.000Z
|
import os
import subprocess
def number_of_gpus():
"""
Count numbers of NVIDIA GPU
"""
return int(subprocess.getoutput("nvidia-smi --query-gpu=name --format=csv,noheader | wc -l"))
def number_of_cores():
"""
number_of_cores()
Detect the number of cores in this system.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1 # Default
| 25.352941
| 97
| 0.588167
|
09b91b53a96e9895102078431a2ca6f6ff3955f8
| 1,001
|
py
|
Python
|
tests/test_Objective.py
|
light-weaver/desdeo_problem
|
f3732bdd154ea5b6e94566d4daaf9fea67669646
|
[
"MIT"
] | null | null | null |
tests/test_Objective.py
|
light-weaver/desdeo_problem
|
f3732bdd154ea5b6e94566d4daaf9fea67669646
|
[
"MIT"
] | null | null | null |
tests/test_Objective.py
|
light-weaver/desdeo_problem
|
f3732bdd154ea5b6e94566d4daaf9fea67669646
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pytest
from desdeo_problem.problem.Objective import (
ObjectiveError,
ObjectiveEvaluationResults,
VectorDataObjective,
VectorObjective,
)
# ============= utils ============
# just testing the basic functionality. Just return the vector as eval.
def evaluator(x):
return x
# this should fail since we do not any evaluator to vectorDataObjective.
def evaluate_vec_data_obj():
data = [["f1", 4.0], ["f2", 7.0]]
df = pd.DataFrame(data, columns=["f1", "f2"])
vec_data_obj = VectorDataObjective(["f1", "f2"], data=df)
vec_data_obj.evaluate(np.array([1.1, 1.1, 1.1]))
# ============= TESTs ==========
def test_evalutation_fails():
with pytest.raises(ObjectiveError):
evaluate_vec_data_obj()
def test_obj():
vec_obj = VectorObjective(["f1, f2, f3"], evaluator=evaluator)
res = vec_obj.evaluate(np.array([1.1, 1.1, 1.1]))
assert type(res) is ObjectiveEvaluationResults, "something went wrong"
| 25.666667
| 74
| 0.663337
|
760f6071057a8f26719ee19669bfb41a6187b5ab
| 2,010
|
py
|
Python
|
src/detailRecord.py
|
moenova/Human-Memory-Manager
|
02b2d8ffd78a3ddcd693f0d991bd4c5d0ef54c80
|
[
"MIT"
] | 6
|
2019-09-20T01:15:05.000Z
|
2020-05-20T20:14:39.000Z
|
src/detailRecord.py
|
moenova/Memory-Helper
|
02b2d8ffd78a3ddcd693f0d991bd4c5d0ef54c80
|
[
"MIT"
] | null | null | null |
src/detailRecord.py
|
moenova/Memory-Helper
|
02b2d8ffd78a3ddcd693f0d991bd4c5d0ef54c80
|
[
"MIT"
] | null | null | null |
from lzpy import Root, Table
from pprint import pprint
from datetime import datetime,timedelta
import time
class DetailRecord:
"""
def nkv2str(name,qkey,qvalue):
return "{}:{}-{}".format(name, qkey, qvalue)
"""
subpath = "rec/"
timeout = timedelta(minutes=5)
def __init__(self,recordname,keys):
self.recordname = recordname
self.keys = keys
# initialize record
encoding_save = Root.encoding
try:
self.record = Root.read(recordname,subpath = DetailRecord.subpath)
except:
self.record = Root({}, name=recordname,subpath = DetailRecord.subpath)
for key in keys:
self.record.body[key] = []
Root.encoding = encoding_save
# initialize current value
self.body =self.record.body
dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for key in self.body:
sub = {"dt": dt,
"c": 0,
"p": timedelta()}
self.body[key].append(sub)
def incTotal(self,key):
self.body[key][-1]["c"]+=1
def plusDuration(self,key,amount):
if not isinstance(amount,timedelta):
raise Exception("amount should be 'timedelta' object")
if amount < DetailRecord.timeout:
self.body[key][-1]["p"] += amount
else:
print("timeout {}".format(DetailRecord.timeout))
def save(self):
for key in self.record.body:
for item in self.record.body[key]:
item["p"] = str(item["p"])
self.record.save()
#del self.record
if __name__ == "__main__":
name= "testrecordname"
kv = {"A":{"a":"","aa":"","aaa":""},
"C":{"c":""},
"D":{"d":"","dd":"","ddd":""},
"G":{"g":""}}
dr = DetailRecord(name,kv)
dr.incTotal("A")
dr.plusDuration("C",timedelta(minutes=2))
dr.save()
| 27.534247
| 83
| 0.516418
|
967acb40e7f30ea406bf24edfe270dd4cf59779a
| 673
|
py
|
Python
|
accounts/forms.py
|
HRangelov/gallery
|
3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
HRangelov/gallery
|
3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
HRangelov/gallery
|
3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from accounts.models import UserProfile
from common.BootstrapFormMixin import BootstrapFormMixin
class SignUpForm(UserCreationForm, BootstrapFormMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setup_form()
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_picture', 'email')
# def clean_email(self):
# email = self.cleaned_data.get('email', False)
# if not email:
# raise forms.ValidationError('Email is required')
# return email
| 28.041667
| 62
| 0.690936
|
d29d34529201cfa72ceb6521a152e84f7336964f
| 1,426
|
py
|
Python
|
blog/models.py
|
Microcore/Quantic
|
727234a5e9e58217eb8235b6d0f27fe1b95f5f83
|
[
"MIT"
] | null | null | null |
blog/models.py
|
Microcore/Quantic
|
727234a5e9e58217eb8235b6d0f27fe1b95f5f83
|
[
"MIT"
] | 7
|
2015-04-25T05:04:46.000Z
|
2015-04-26T08:24:36.000Z
|
blog/models.py
|
Microcore/Quantic
|
727234a5e9e58217eb8235b6d0f27fe1b95f5f83
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Models for Blog app.
'''
from django.db import models
class Tag(models.Model):
text = models.CharField(max_length = 20, unique = True)
class Author(models.Model):
name = models.CharField(max_length = 20, unique = True)
email = models.EmailField(unique = True)
url = models.URLField(blank = True)
bio = models.CharField(max_length = 300, blank = True)
class Post(models.Model):
title = models.CharField(max_length = 100)
content = models.TextField()
tag = models.ManyToManyField(Tag)
time = models.DateTimeField()
author = models.ForeignKey(Author)
slug = models.CharField(max_length = 100, unique = True)
# doctype in ['markdown', 'html', 'plaintext']
doctype = models.CharField(max_length = 20)
class Comment(models.Model):
post = models.ForeignKey(Post)
author = models.ForeignKey(Author)
content = models.TextField()
time = models.DateTimeField()
# Fields used to judge spam
IP = models.IPAddressField()
UA = models.CharField(max_length = 200)
spam = models.BooleanField(default = True)
akismeted = models.BooleanField(default = False)
class Option(models.Model):
name = models.CharField(max_length = 40, unique = True)
value = models.CharField(max_length = 100)
text = models.CharField(max_length = 100)
class QuietEmail(models.Model):
email = models.EmailField(unique = True)
| 27.960784
| 60
| 0.686536
|
f0d067e68e15b0d4e6fcc15142f8df01766d7bb6
| 384
|
py
|
Python
|
papaya/papaya_app/models.py
|
marinaskevin/Django-Angular
|
1673ecfe2456b861e6d14f517be1659da0877395
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
papaya/papaya_app/models.py
|
marinaskevin/Django-Angular
|
1673ecfe2456b861e6d14f517be1659da0877395
|
[
"CNRI-Python-GPL-Compatible"
] | 19
|
2020-06-05T20:43:10.000Z
|
2022-03-02T07:13:31.000Z
|
papaya/papaya_app/models.py
|
marinaskevin/Django-Angular
|
1673ecfe2456b861e6d14f517be1659da0877395
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
from django.db import models
from django.conf import settings
# Create your models here.
class Papaya(models.Model):
id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
name = models.CharField(max_length=200)
class Task(models.Model):
id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
| 32
| 96
| 0.786458
|
49edde01d518acae4bece7f238da27a58de337ea
| 23,684
|
py
|
Python
|
plasmapy/atomic/ionization_state.py
|
JvPy/PlasmaPy
|
9ba3f4eb5dbbb1a3d7b25527e0d5eb62c5086edf
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-05T18:40:50.000Z
|
2020-02-05T18:40:50.000Z
|
plasmapy/atomic/ionization_state.py
|
JvPy/PlasmaPy
|
9ba3f4eb5dbbb1a3d7b25527e0d5eb62c5086edf
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
plasmapy/atomic/ionization_state.py
|
JvPy/PlasmaPy
|
9ba3f4eb5dbbb1a3d7b25527e0d5eb62c5086edf
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
Objects for storing ionization state data for a single element or for
a single ionization level.
"""
__all__ = ["IonizationState", "State"]
import collections
import numpy as np
import warnings
from astropy import units as u
from numbers import (Integral, Real)
from plasmapy.atomic import Particle, particle_input
from plasmapy.atomic.exceptions import AtomicError, ChargeError, InvalidParticleError
from plasmapy.utils.decorators import validate_quantities
from typing import (Union, List, Optional)
_number_density_errmsg = (
"Number densities must be Quantity objects with units of inverse "
"volume."
)
# TODO: Change `State` into a class with validations for all of the
# TODO: attributes.
State = collections.namedtuple(
'State', [
'integer_charge',
'ionic_fraction',
'ionic_symbol',
'number_density',
])
class IonizationState:
"""
Representation of the ionization state distribution of a single
element or isotope.
Parameters
----------
particle: str, integer, or ~plasmapy.atomic.Particle
A `str` or `~plasmapy.atomic.Particle` instance representing
an element or isotope, or an integer representing the atomic
number of an element.
ionic_fractions: ~numpy.ndarray, list, tuple, or ~astropy.units.Quantity; optional
The ionization fractions of an element, where the indices
correspond to integer charge. This argument should contain the
atomic number plus one items, and must sum to one within an
absolute tolerance of ``tol`` if dimensionless. Alternatively,
this argument may be a `~astropy.units.Quantity` that represents
the number densities of each neutral/ion.
T_e: ~astropy.units.Quantity, keyword-only, optional
The electron temperature or thermal energy per particle.
n_elem: ~astropy.units.Quantity, keyword-only, optional
The number density of the element, including neutrals and all
ions.
tol: float or integer, keyword-only, optional
The absolute tolerance used by `~numpy.isclose` when testing
normalizations and making comparisons. Defaults to ``1e-15``.
Raises
------
~plasmapy.utils.AtomicError
If the ionic fractions are not normalized or contain invalid
values, or if number density information is provided through
both ``ionic_fractions`` and ``n_elem``.
~plasmapy.utils.InvalidParticleError
If the particle is invalid.
Examples
--------
>>> states = IonizationState('H', [0.6, 0.4], n_elem=1*u.cm**-3, T_e=11000*u.K)
>>> states.ionic_fractions[0] # fraction of hydrogen that is neutral
0.6
>>> states.ionic_fractions[1] # fraction of hydrogen that is ionized
0.4
>>> states.n_e # electron number density
<Quantity 400000. 1 / m3>
>>> states.n_elem # element number density
<Quantity 1000000. 1 / m3>
Notes
-----
Calculation of collisional ionization equilibrium has not yet been
implemented.
"""
# TODO: Allow this class to (optionally?) handle negatively charged
# TODO: ions. There are instances where singly negatively charged
# TODO: ions are important in astrophysical plasmas, such as H- in
# TODO: the atmospheres of relatively cool stars. There may be some
# TODO: rare situations where doubly negatively charged ions show up
# TODO: too, but triply negatively charged ions are very unlikely.
# TODO: Add in functionality to find equilibrium ionization states.
@validate_quantities(T_e={'equivalencies': u.temperature_energy()})
@particle_input(require='element', exclude='ion')
def __init__(self,
particle: Particle,
ionic_fractions=None,
*,
T_e: u.K = np.nan * u.K,
kappa: Real = np.inf,
n_elem: u.m ** -3 = np.nan * u.m ** -3,
tol: Union[float, int] = 1e-15):
"""Initialize an `~plasmapy.atomic.IonizationState` instance."""
self._particle_instance = particle
try:
self.tol = tol
self.T_e = T_e
self.kappa = kappa
if not np.isnan(n_elem) and isinstance(ionic_fractions, u.Quantity) and \
ionic_fractions.si.unit == u.m ** -3:
raise AtomicError(
"Cannot simultaneously provide number density "
"through both n_elem and ionic_fractions.")
self.n_elem = n_elem
self.ionic_fractions = ionic_fractions
if ionic_fractions is None and not np.isnan(self.T_e):
warnings.warn(
"Collisional ionization equilibration has not yet "
"been implemented in IonizationState; cannot set "
"ionic fractions.")
except Exception as exc:
raise AtomicError(
f"Unable to create IonizationState instance for "
f"{particle.particle}.") from exc
def __str__(self) -> str:
return f"<IonizationState instance for {self.base_particle}>"
def __repr__(self) -> str:
return self.__str__()
def __getitem__(self, value) -> State:
"""Return information for a single ionization level."""
if isinstance(value, slice):
raise TypeError("IonizationState instances cannot be sliced.")
if isinstance(value, Integral) and 0 <= value <= self.atomic_number:
result = State(
value,
self.ionic_fractions[value],
self.ionic_symbols[value],
self.number_densities[value],
)
else:
if not isinstance(value, Particle):
try:
value = Particle(value)
except InvalidParticleError as exc:
raise InvalidParticleError(
f"{value} is not a valid integer charge or "
f"particle.") from exc
same_element = value.element == self.element
same_isotope = value.isotope == self.isotope
has_charge_info = value.is_category(any_of=["charged", "uncharged"])
if same_element and same_isotope and has_charge_info:
Z = value.integer_charge
result = State(
Z,
self.ionic_fractions[Z],
self.ionic_symbols[Z],
self.number_densities[Z],
)
else:
if not same_element or not same_isotope:
raise AtomicError("Inconsistent element or isotope.")
elif not has_charge_info:
raise ChargeError("No integer charge provided.")
return result
def __setitem__(self, key, value):
raise NotImplementedError(
"Item assignment of an IonizationState instance is not "
"allowed because the ionic fractions for different "
"ionization levels must be set simultaneously due to the "
"normalization constraint.")
def __iter__(self):
"""Initialize an instance prior to iteration."""
self._charge_index = 0
return self
def __next__(self):
"""
Return a `~plasmapy.atomic.State` instance that contains
information about a particular ionization level.
"""
if self._charge_index <= self.atomic_number:
result = State(
self._charge_index,
self._ionic_fractions[self._charge_index],
self.ionic_symbols[self._charge_index],
self.number_densities[self._charge_index],
)
self._charge_index += 1
return result
else:
del self._charge_index
raise StopIteration
def __eq__(self, other):
"""
Return `True` if the ionic fractions, number density scaling
factor (if set), and electron temperature (if set) are all
equal, and `False` otherwise.
Raises
------
TypeError
If ``other`` is not an `~plasmapy.atomic.IonizationState`
instance.
AtomicError
If ``other`` corresponds to a different element or isotope.
Examples
--------
>>> IonizationState('H', [1, 0], tol=1e-6) == IonizationState('H', [1, 1e-6], tol=1e-6)
True
>>> IonizationState('H', [1, 0], tol=1e-8) == IonizationState('H', [1, 1e-6], tol=1e-5)
False
"""
if not isinstance(other, IonizationState):
raise TypeError(
"An instance of the IonizationState class may only be "
"compared with another IonizationState instance.")
same_element = self.element == other.element
same_isotope = self.isotope == other.isotope
if not same_element or not same_isotope:
raise AtomicError(
"An instance of the IonizationState class may only be "
"compared with another IonizationState instance if "
"both correspond to the same element and/or isotope.")
# Use the tighter of the two tolerances. For thermodynamic
# quantities, use it as a relative tolerance because the values
# may substantially depart from order unity.
min_tol = np.min([self.tol, other.tol])
same_T_e = np.isnan(self.T_e) and np.isnan(other.T_e) or \
u.allclose(self.T_e, other.T_e, rtol=min_tol*u.K, atol=0*u.K)
same_n_elem = np.isnan(self.n_elem) and np.isnan(other.n_elem) or \
u.allclose(self.n_elem, other.n_elem, rtol=min_tol*u.m**-3, atol=0*u.m**-3)
# For the next line, recall that np.nan == np.nan is False (sigh)
same_fractions = np.any([
np.allclose(self.ionic_fractions, other.ionic_fractions, rtol=0, atol=min_tol),
np.all(np.isnan(self.ionic_fractions)) and np.all(np.isnan(other.ionic_fractions)),
])
return np.all([same_element, same_isotope, same_T_e, same_n_elem, same_fractions])
@property
def ionic_fractions(self) -> np.ndarray:
"""
Return the ionic fractions, where the index corresponds to
the integer charge.
Examples
--------
>>> hydrogen_states = IonizationState('H', [0.9, 0.1])
>>> hydrogen_states.ionic_fractions
array([0.9, 0.1])
"""
return self._ionic_fractions
@ionic_fractions.setter
def ionic_fractions(self, fractions):
"""
Set the ionic fractions, while checking that the new values are
valid and normalized to one.
"""
if fractions is None or np.all(np.isnan(fractions)):
self._ionic_fractions = np.full(self.atomic_number + 1, np.nan, dtype=np.float64)
return
try:
if np.min(fractions) < 0:
raise AtomicError("Cannot have negative ionic fractions.")
if len(fractions) != self.atomic_number + 1:
raise AtomicError(
"The length of ionic_fractions must be "
f"{self.atomic_number + 1}.")
if isinstance(fractions, u.Quantity):
fractions = fractions.to(u.m ** -3)
self.n_elem = np.sum(fractions)
self._ionic_fractions = np.array(fractions/self.n_elem)
else:
fractions = np.array(fractions, dtype=np.float64)
sum_of_fractions = np.sum(fractions)
all_nans = np.all(np.isnan(fractions))
if not all_nans:
if np.any(fractions < 0) or np.any(fractions > 1):
raise AtomicError("Ionic fractions must be between 0 and 1.")
if not np.isclose(sum_of_fractions, 1, rtol=0, atol=self.tol):
raise AtomicError("Ionic fractions must sum to one.")
self._ionic_fractions = fractions
except Exception as exc:
raise AtomicError(f"Unable to set ionic fractions of {self.element} "
f"to {fractions}.") from exc
def _is_normalized(self, tol: Optional[Real] = None) -> bool:
"""
Return `True` if the sum of the ionization fractions is equal to
one within the allowed tolerance, and `False` otherwise.
"""
tol = tol if tol is not None else self.tol
if not isinstance(tol, Real):
raise TypeError("tol must be an int or float.")
if not 0 <= tol < 1:
raise ValueError("Need 0 <= tol < 1.")
total = np.sum(self._ionic_fractions)
return np.isclose(total, 1, atol=tol, rtol=0)
def normalize(self) -> None:
"""
Normalize the ionization state distribution (if set) so that the
sum becomes equal to one.
"""
self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)
@property
def equil_ionic_fractions(self, T_e: u.K = None):
"""
Return the equilibrium ionic fractions for temperature ``T_e``
or the temperature set in the IonizationState instance. Not
implemented.
"""
raise NotImplementedError
@u.quantity_input(equivalencies=u.temperature_energy())
def equilibrate(self, T_e: u.K = np.nan * u.K):
"""
Set the ionic fractions to collisional ionization equilibrium
for temperature ``T_e``. Not implemented.
"""
# self.ionic_fractions = self.equil_ionic_fractions
raise NotImplementedError
@property
@u.quantity_input
def n_e(self) -> u.m ** -3:
"""
Return the electron number density assuming a single species
plasma.
"""
return np.sum(self._n_elem * self.ionic_fractions * self.integer_charges)
@property
@u.quantity_input
def n_elem(self) -> u.m ** -3:
"""Return the total number density of neutrals and all ions."""
return self._n_elem.to(u.m ** -3)
@n_elem.setter
@u.quantity_input
def n_elem(self, value: u.m ** -3):
"""Set the number density of neutrals and all ions."""
if value < 0 * u.m ** -3:
raise AtomicError
if 0 * u.m ** -3 < value <= np.inf * u.m ** -3:
self._n_elem = value.to(u.m ** -3)
elif np.isnan(value):
self._n_elem = np.nan * u.m ** -3
@property
@u.quantity_input
def number_densities(self) -> u.m ** -3:
"""Return the number densities for each state."""
try:
return (self.n_elem * self.ionic_fractions).to(u.m ** -3)
except Exception:
return np.full(self.atomic_number + 1, np.nan) * u.m ** -3
@number_densities.setter
@u.quantity_input
def number_densities(self, value: u.m ** -3):
"""Set the number densities for each state."""
if np.any(value.value < 0):
raise AtomicError("Number densities cannot be negative.")
if len(value) != self.atomic_number + 1:
raise AtomicError(
f"Incorrect number of charge states for "
f"{self.base_particle}")
value = value.to(u.m ** -3)
self._n_elem = value.sum()
self._ionic_fractions = value / self._n_elem
@property
@u.quantity_input(equivalencies=u.temperature_energy())
def T_e(self) -> u.K:
"""Return the electron temperature."""
if self._T_e is None:
raise AtomicError("No electron temperature has been specified.")
return self._T_e.to(u.K, equivalencies=u.temperature_energy())
@T_e.setter
@u.quantity_input(equivalencies=u.temperature_energy())
def T_e(self, value: u.K):
"""Set the electron temperature."""
try:
value = value.to(u.K, equivalencies=u.temperature_energy())
except (AttributeError, u.UnitsError, u.UnitConversionError):
raise AtomicError("Invalid temperature.") from None
else:
if value < 0 * u.K:
raise AtomicError("T_e cannot be negative.")
self._T_e = value
@property
def kappa(self) -> np.real:
"""
Return the kappa parameter for a kappa distribution function
for electrons.
The value of ``kappa`` must be greater than ``1.5`` in order to
have a valid distribution function. If ``kappa`` equals
`~numpy.inf`, then the distribution function reduces to a
Maxwellian.
"""
return self._kappa
@kappa.setter
def kappa(self, value: Real):
"""
Set the kappa parameter for a kappa distribution function for
electrons. The value must be between ``1.5`` and `~numpy.inf`.
"""
kappa_errmsg = "kappa must be a real number greater than 1.5"
if not isinstance(value, Real):
raise TypeError(kappa_errmsg)
if value <= 1.5:
raise ValueError(kappa_errmsg)
self._kappa = np.real(value)
@property
def element(self) -> str:
"""Return the atomic symbol of the element."""
return self._particle_instance.element
@property
def isotope(self) -> Optional[str]:
"""
Return the isotope symbol for an isotope, or `None` if the
particle is not an isotope.
"""
return self._particle_instance.isotope
@property
def base_particle(self) -> str:
"""Return the symbol of the element or isotope."""
return self.isotope if self.isotope else self.element
@property
def atomic_number(self) -> int:
"""Return the atomic number of the element."""
return self._particle_instance.atomic_number
@property
def _particle_instances(self) -> List[Particle]:
"""
Return a list of the `~plasmapy.atomic.Particle` class
instances corresponding to each ion.
"""
return [
Particle(self._particle_instance.particle, Z=i)
for i in range(self.atomic_number + 1)
]
@property
def ionic_symbols(self) -> List[str]:
"""Return the ionic symbols for all charge states."""
return [particle.ionic_symbol for particle in self._particle_instances]
@property
def integer_charges(self) -> np.ndarray:
"""Return an array with the integer charges."""
return np.arange(0, self.atomic_number + 1, dtype=np.int)
@property
def Z_mean(self) -> np.float64:
"""Return the mean integer charge"""
if np.nan in self.ionic_fractions:
raise ChargeError(
"Z_mean cannot be found because no ionic fraction "
f"information is available for {self.base_particle}.")
return np.sum(self.ionic_fractions * np.arange(self.atomic_number + 1))
@property
def Z_rms(self) -> np.float64:
"""Return the root mean square integer charge."""
return np.sqrt(np.sum(self.ionic_fractions * np.arange(self.atomic_number + 1) ** 2))
@property
def Z_most_abundant(self) -> List[Integral]:
"""
Return a `list` of the integer charges with the highest ionic
fractions.
Examples
--------
>>> He = IonizationState('He', [0.2, 0.5, 0.3])
>>> He.Z_most_abundant
[1]
>>> Li = IonizationState('Li', [0.4, 0.4, 0.2, 0.0])
>>> Li.Z_most_abundant
[0, 1]
"""
if np.any(np.isnan(self.ionic_fractions)):
raise AtomicError(
f"Cannot find most abundant ion of {self.base_particle} "
f"because the ionic fractions have not been defined.")
return np.flatnonzero(
self.ionic_fractions == self.ionic_fractions.max()
).tolist()
@property
def tol(self) -> Real:
"""Return the absolute tolerance for comparisons."""
return self._tol
@tol.setter
def tol(self, atol: Real):
"""Set the absolute tolerance for comparisons."""
if not isinstance(atol, Real):
raise TypeError("The attribute tol must be a real number.")
if 0 <= atol < 1:
self._tol = atol
else:
raise ValueError("Need 0 <= tol < 1.")
def _get_states_info(self, minimum_ionic_fraction=0.01) -> List[str]:
"""
Return a `list` containing the ion symbol, ionic fraction, and
(if available) the number density for that ion.
"""
states_info = []
for state in self:
if state.ionic_fraction > minimum_ionic_fraction:
state_info = ""
symbol = state.ionic_symbol
if state.integer_charge < 10:
symbol = symbol[:-2] + ' ' + symbol[-2:]
fraction = "{:.3f}".format(state.ionic_fraction)
state_info += f'{symbol}: {fraction}'
if np.isfinite(self.n_elem):
value = "{:.2e}".format(state.number_density.si.value)
state_info += f" n_i = {value} m**-3"
states_info.append(state_info)
return states_info
def info(self, minimum_ionic_fraction: Real = 0.01) -> None:
"""
Print quicklook information for an
`~plasmapy.atomic.IonizationState` instance.
Parameters
----------
minimum_ionic_fraction: Real
If the ionic fraction for a particular ionization state is
below this level, then information for it will not be
printed. Defaults to 0.01.
Example
-------
>>> He_states = IonizationState(
... 'He',
... [0.941, 0.058, 0.001],
... T_e = 5.34 * u.K,
... kappa = 4.05,
... n_elem = 5.51e19 * u.m ** -3,
... )
>>> He_states.info()
IonizationState instance for He with Z_mean = 0.06
----------------------------------------------------------------
He 0+: 0.941 n_i = 5.18e+19 m**-3
He 1+: 0.058 n_i = 3.20e+18 m**-3
----------------------------------------------------------------
n_elem = 5.51e+19 m**-3
n_e = 3.31e+18 m**-3
T_e = 5.34e+00 K
kappa = 4.05
----------------------------------------------------------------
"""
separator_line = [64 * '-']
scientific = "{:.2e}"
floaty = "{:.2f}"
n_elem = scientific.format(self.n_elem.value)
n_e = scientific.format(self.n_e.value)
T_e = scientific.format(self.T_e.value)
kappa = floaty.format(self.kappa)
Z_mean = floaty.format(self.Z_mean)
output = [f"IonizationState instance for {self.base_particle} with Z_mean = {Z_mean}"]
attributes = []
if not np.all(np.isnan(self.ionic_fractions)):
output += separator_line
output += self._get_states_info(minimum_ionic_fraction)
output += separator_line
if not np.isnan(self.n_elem):
attributes.append(f"n_elem = {n_elem} m**-3")
attributes.append(f"n_e = {n_e} m**-3")
if not np.isnan(self.T_e):
attributes.append(f"T_e = {T_e} K")
if np.isfinite(self.kappa):
attributes.append(f"kappa = {kappa}")
if attributes:
attributes += separator_line
output += attributes
for line in output:
print(line)
| 35.776435
| 95
| 0.582672
|
f7d23637e168b49c09d7149017f07c03b9e00f6e
| 14,711
|
py
|
Python
|
guppyproxy/reqview.py
|
0xBADCA7/guppy-proxy
|
016d18558c350995387253ce3244bf281b98dee9
|
[
"MIT"
] | 143
|
2018-02-22T18:50:34.000Z
|
2022-02-17T06:18:54.000Z
|
guppyproxy/reqview.py
|
0xBADCA7/guppy-proxy
|
016d18558c350995387253ce3244bf281b98dee9
|
[
"MIT"
] | 2
|
2018-03-16T22:03:25.000Z
|
2020-02-11T21:10:59.000Z
|
guppyproxy/reqview.py
|
0xBADCA7/guppy-proxy
|
016d18558c350995387253ce3244bf281b98dee9
|
[
"MIT"
] | 16
|
2018-03-11T03:57:58.000Z
|
2021-08-31T04:25:51.000Z
|
import re
from guppyproxy.util import datetime_string, DisableUpdates
from guppyproxy.proxy import HTTPRequest, get_full_url, parse_request
from guppyproxy.hexteditor import ComboEditor
from PyQt5.QtWidgets import QWidget, QTableWidget, QTableWidgetItem, QGridLayout, QHeaderView, QAbstractItemView, QLineEdit, QTabWidget, QVBoxLayout, QToolButton, QHBoxLayout, QStackedLayout
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt
from pygments.lexer import Lexer
from pygments.lexers import get_lexer_for_mimetype, TextLexer
from pygments.lexers.textfmts import HttpLexer
from pygments.util import ClassNotFound
from pygments.token import Token
class HybridHttpLexer(Lexer):
tl = TextLexer()
hl = HttpLexer()
def __init__(self, max_len=50000, *args, **kwargs):
self.max_len = max_len
Lexer.__init__(self, *args, **kwargs)
def get_tokens_unprocessed(self, text):
try:
split = re.split(r"(?:\r\n|\n)(?:\r\n|\n)", text, 1)
if len(split) == 2:
h = split[0]
body = split[1]
else:
h = split[0]
body = ''
except Exception as e:
for v in self.tl.get_tokens_unprocessed(text):
yield v
raise e
for token in self.hl.get_tokens_unprocessed(h):
yield token
if len(body) > 0:
if len(body) <= self.max_len or self.max_len < 0:
second_parser = None
if "Content-Type" in h:
try:
ct = re.search("Content-Type: (.*)", h)
if ct is not None:
hval = ct.groups()[0]
mime = hval.split(";")[0]
second_parser = get_lexer_for_mimetype(mime)
except ClassNotFound:
pass
if second_parser is None:
yield (len(h), Token.Text, text[len(h):])
else:
for index, tokentype, value in second_parser.get_tokens_unprocessed(text[len(h):]):
yield (index + len(h), tokentype, value)
else:
yield (len(h), Token.Text, text[len(h):])
class InfoWidget(QWidget):
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.request = None
self.setLayout(QVBoxLayout())
self.layout().setSpacing(0)
self.layout().setContentsMargins(0, 0, 0, 0)
self.infotable = QTableWidget()
self.infotable.setColumnCount(2)
self.infotable.verticalHeader().hide()
self.infotable.horizontalHeader().hide()
self.infotable.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.infotable.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.infotable.horizontalHeader().setStretchLastSection(True)
self.layout().addWidget(self.infotable)
def _add_info(self, k, v):
row = self.infotable.rowCount()
self.infotable.insertRow(row)
item1 = QTableWidgetItem(k)
item1.setFlags(item1.flags() ^ Qt.ItemIsEditable)
self.infotable.setItem(row, 0, item1)
self.infotable.setItem(row, 1, QTableWidgetItem(v))
def set_request(self, req):
with DisableUpdates(self.infotable):
self.request = req
self.infotable.setRowCount(0)
if self.request is None:
return
reqlen = len(self.request.body)
reqlen = '%d bytes' % reqlen
rsplen = 'No response'
mangle_str = 'Nothing mangled'
if self.request.unmangled:
mangle_str = 'Request'
if self.request.response:
response_code = str(self.request.response.status_code) + \
' ' + self.request.response.reason
rsplen = self.request.response.content_length
rsplen = '%d bytes' % rsplen
if self.request.response.unmangled:
if mangle_str == 'Nothing mangled':
mangle_str = 'Response'
else:
mangle_str += ' and Response'
else:
response_code = ''
time_str = '--'
if self.request.time_end is not None and self.request.time_start is not None:
time_delt = self.request.time_end - self.request.time_start
time_str = "%.2f sec" % time_delt.total_seconds()
if self.request.use_tls:
is_ssl = 'YES'
else:
is_ssl = 'NO'
if self.request.time_start:
time_made_str = datetime_string(self.request.time_start)
else:
time_made_str = '--'
verb = self.request.method
host = self.request.dest_host
self._add_info('Made on', time_made_str)
self._add_info('URL', get_full_url(self.request))
self._add_info('Host', host)
self._add_info('Path', self.request.url.path)
self._add_info('Verb', verb)
self._add_info('Status Code', response_code)
self._add_info('Request Length', reqlen)
self._add_info('Response Length', rsplen)
if self.request.response and self.request.response.unmangled:
self._add_info('Unmangled Response Length', self.request.response.unmangled.content_length)
self._add_info('Time', time_str)
self._add_info('Port', str(self.request.dest_port))
self._add_info('SSL', is_ssl)
self._add_info('Mangled', mangle_str)
self._add_info('Tags', ', '.join(self.request.tags))
class ParamWidget(QWidget):
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.request = None
self.setLayout(QVBoxLayout())
self.tab_widget = QTabWidget()
self.urltable = QTableWidget()
self.urltable.setColumnCount(2)
self.posttable = QTableWidget()
self.posttable.setColumnCount(2)
self.cookietable = QTableWidget()
self.cookietable.setColumnCount(2)
self.tab_widget.addTab(self.urltable, "URL")
self.tab_widget.addTab(self.posttable, "POST")
self.tab_widget.addTab(self.cookietable, "Cookies")
self.format_table(self.urltable)
self.format_table(self.posttable)
self.format_table(self.cookietable)
self.layout().addWidget(self.tab_widget)
def _add_info(self, table, k, v):
row = table.rowCount()
table.insertRow(row)
item1 = QTableWidgetItem(k)
item1.setFlags(item1.flags() ^ Qt.ItemIsEditable)
table.setItem(row, 0, item1)
table.setItem(row, 1, QTableWidgetItem(v))
def format_table(self, table):
table.verticalHeader().hide()
table.horizontalHeader().hide()
table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
table.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
table.horizontalHeader().setStretchLastSection(True)
def clear_tables(self):
self.urltable.setRowCount(0)
self.posttable.setRowCount(0)
self.cookietable.setRowCount(0)
def set_request(self, req):
with DisableUpdates(self.urltable, self.posttable, self.cookietable):
self.clear_tables()
if req is None:
return
post_params = req.parameters()
url_params = req.url.parameters()
cookies = [(k, v) for k, v in req.cookie_iter()]
if url_params:
for k, vv in url_params.items():
for v in vv:
self._add_info(self.urltable, k, v)
if post_params:
for k, vv in post_params.items():
for v in vv:
self._add_info(self.posttable, k, v)
if cookies:
for k, v in cookies:
self._add_info(self.cookietable, k, v)
class TagList(QTableWidget):
tagsUpdated = pyqtSignal(set)
# list part of the tag tab
def __init__(self, *args, **kwargs):
QTableWidget.__init__(self, *args, **kwargs)
self.tags = set()
# Set up table
self.setColumnCount(1)
self.horizontalHeader().hide()
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.verticalHeader().hide()
self.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
def add_tag(self, tag):
self.tags.add(tag)
self.redraw_table()
self.tagsUpdated.emit(set(self.tags))
def set_tags(self, tags, emit=True):
self.tags = set(tags)
self.redraw_table()
if emit:
self.tagsUpdated.emit(set(self.tags))
def clear_tags(self):
self.tags = set()
self.redraw_table()
self.tagsUpdated.emit(set(self.tags))
def _append_str_row(self, fstr):
row = self.rowCount()
self.insertRow(row)
self.setItem(row, 0, QTableWidgetItem(fstr))
def redraw_table(self):
self.setRowCount(0)
for tag in sorted(self.tags):
self._append_str_row(tag)
@pyqtSlot()
def delete_selected(self):
rows = self.selectionModel().selectedRows()
if len(rows) == 0:
return
for idx in rows:
tag = self.item(idx.row(), 0).text()
self.tags.remove(tag)
self.redraw_table()
self.tagsUpdated.emit(set(self.tags))
def get_tags(self):
return set(self.tags)
class TagWidget(QWidget):
tagsUpdated = pyqtSignal(set)
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.setLayout(QVBoxLayout())
self.taglist = TagList()
self.taglist.tagsUpdated.connect(self.tagsUpdated)
self.layout().addWidget(self.taglist)
self.taginput = QLineEdit()
self.taginput.returnPressed.connect(self.add_tag)
self.addbutton = QToolButton()
self.addbutton.setText("+")
self.removebutton = QToolButton()
self.removebutton.setText("-")
editbar = QHBoxLayout()
editbar.addWidget(self.addbutton)
editbar.addWidget(self.removebutton)
editbar.addWidget(self.taginput)
self.removebutton.clicked.connect(self.taglist.delete_selected)
self.addbutton.clicked.connect(self.add_tag)
self.layout().addLayout(editbar)
@pyqtSlot()
def add_tag(self):
if self.readonly:
return
tag = self.taginput.text()
if tag == "":
return
self.taglist.add_tag(tag)
self.taginput.setText("")
def set_read_only(self, readonly):
self.readonly = readonly
self.addbutton.setEnabled(not readonly)
self.removebutton.setEnabled(not readonly)
class ReqViewWidget(QWidget):
requestEdited = pyqtSignal(HTTPRequest)
def __init__(self, info_tab=False, param_tab=False, tag_tab=False, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.request = None
self.setLayout(QVBoxLayout())
self.layout().setSpacing(0)
self.layout().setContentsMargins(0, 0, 0, 0)
view_layout = QGridLayout()
view_layout.setSpacing(3)
view_layout.setContentsMargins(0, 0, 0, 0)
self.req_edit = ComboEditor()
self.rsp_edit = ComboEditor()
self.req_edit.setReadOnly(True)
self.rsp_edit.setReadOnly(True)
view_layout.addWidget(self.req_edit, 0, 0)
view_layout.addWidget(self.rsp_edit, 0, 1)
view_widg = QWidget()
view_widg.setLayout(view_layout)
use_tab = False
if info_tab or tag_tab: # or <other tab> or <other other tab>
use_tab = True
self.tab_widget = QTabWidget()
self.tab_widget.addTab(view_widg, "Message")
self.info_tab = False
self.info_widg = None
if info_tab:
self.info_tab = True
self.info_widg = InfoWidget()
self.tab_widget.addTab(self.info_widg, "Info")
self.param_tab = False
self.param_widg = None
if param_tab:
self.param_tab = True
self.param_widg = ParamWidget()
self.tab_widget.addTab(self.param_widg, "Params")
self.tag_tab = False
self.tag_widg = None
if tag_tab:
self.tag_tab = True
self.tag_widg = TagWidget()
self.tab_widget.addTab(self.tag_widg, "Tags")
if use_tab:
self.layout().addWidget(self.tab_widget)
else:
self.layout().addWidget(view_widg)
def set_read_only(self, ro):
self.req_edit.setReadOnly(ro)
def set_tags_read_only(self, ro):
if self.tag_tab:
self.tag_widg.set_read_only(ro)
def get_request(self):
try:
req = parse_request(self.req_edit.get_bytes())
req.dest_host = self.dest_host
req.dest_port = self.dest_port
req.use_tls = self.use_tls
if self.tag_widg:
req.tags = self.tag_widg.taglist.get_tags()
return req
except Exception as e:
raise e
return None
@pyqtSlot(HTTPRequest)
def set_request(self, req):
self.req = req
self.dest_host = ""
self.dest_port = -1
self.use_tls = False
if req:
self.dest_host = req.dest_host
self.dest_port = req.dest_port
self.use_tls = req.use_tls
self.update_editors()
if self.info_tab:
self.info_widg.set_request(req)
if self.tag_tab:
if req:
self.tag_widg.taglist.set_tags(req.tags, emit=False)
if self.param_tab:
self.param_widg.set_request(req)
def update_editors(self):
self.req_edit.set_bytes(b"")
self.rsp_edit.set_bytes(b"")
lex = HybridHttpLexer()
if self.req is not None:
self.req_edit.set_bytes_highlighted(self.req.full_message(), lexer=lex)
if self.req.response is not None:
self.rsp_edit.set_bytes_highlighted(self.req.response.full_message(), lexer=lex)
def show_message(self):
self.tab_widget.setCurrentIndex(0)
| 34.942993
| 190
| 0.59255
|
e675abd327b49f64493e9b622d289d60ba79be00
| 150
|
py
|
Python
|
botworks/config_constants.py
|
doubleyuhtee/botworks
|
f9ffc9e77b0b924a42e19d71542976e7cf406725
|
[
"MIT"
] | null | null | null |
botworks/config_constants.py
|
doubleyuhtee/botworks
|
f9ffc9e77b0b924a42e19d71542976e7cf406725
|
[
"MIT"
] | null | null | null |
botworks/config_constants.py
|
doubleyuhtee/botworks
|
f9ffc9e77b0b924a42e19d71542976e7cf406725
|
[
"MIT"
] | null | null | null |
SLEEP_TIME = "sleep_time"
ERROR_SLEEP_TIME = "error_sleep_time"
LOG_LEVEL = "log_level"
UNHEALTHY_HANDLER = "unhealthy"
HEALTHY_HANDLER = "healthy"
| 18.75
| 37
| 0.786667
|
a3b2f0a0d3876f08612404a4251d4ed3d91e4a86
| 118
|
py
|
Python
|
web_scraping/ec2files/ec2file148.py
|
nikibhatt/Groa
|
fc2d4ae87cb825e6d54a0831c72be16541eebe61
|
[
"MIT"
] | 1
|
2020-04-08T19:44:30.000Z
|
2020-04-08T19:44:30.000Z
|
web_scraping/ec2files/ec2file148.py
|
cmgospod/Groa
|
31b3624bfe61e772b55f8175b4e95d63c9e67966
|
[
"MIT"
] | null | null | null |
web_scraping/ec2files/ec2file148.py
|
cmgospod/Groa
|
31b3624bfe61e772b55f8175b4e95d63c9e67966
|
[
"MIT"
] | 1
|
2020-09-12T07:07:41.000Z
|
2020-09-12T07:07:41.000Z
|
from scraper import *
s = Scraper(start=263736, end=265517, max_iter=30, scraper_instance=148)
s.scrape_letterboxd()
| 39.333333
| 73
| 0.779661
|
6036f7dac2a82534ff340d1b02882589f352255e
| 15,041
|
py
|
Python
|
services/connectors/socket-connector/source/connector/main.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | 4
|
2021-09-10T09:46:18.000Z
|
2021-12-05T17:55:14.000Z
|
services/connectors/socket-connector/source/connector/main.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | null | null | null |
services/connectors/socket-connector/source/connector/main.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
__version__="0.4.0"
import os
import json
import yaml
import socket
import logging
from dotenv import load_dotenv, find_dotenv
from pyconnector_template.pyconnector_template import SensorFlow as SFTemplate
from pyconnector_template.pyconnector_template import ActuatorFlow as AFTemplate
from pyconnector_template.pyconnector_template import Connector as CTemplate
from pyconnector_template.dispatch import DispatchOnce
logger = logging.getLogger("pyconnector")
class SensorFlow(SFTemplate):
"""
Bundles all functionality to handle sensor messages.
This is a template for a SensorFlow class, i.e. one that holds all
functions that are necessary to handle messages from the device(s)
towards the message broker. The methods could also be implemented
into the Connector class, but are separated to support clarity.
Overload these functions
------------------------
In order to transform this class into operational code you need
to inherit from it and overload the following methods:
- receive_raw_msg
- parse_raw_msg
Connector Methods
-----------------
The connector must provide the following methods to allow correct
operation of the methods in this class:
- _update_available_datapoints
Connector Attributes
--------------------
The following attributes must be set up by the connector to
allow these methods to run correctly:
mqtt_client : class instance.
Initialized Mqtt client library with signature of paho MQTT.
SEND_RAW_MESSAGE_TO_DB : string
if SEND_RAW_MESSAGE_TO_DB == "TRUE" will send raw message
to designated DB via MQTT.
MQTT_TOPIC_RAW_MESSAGE_TO_DB : string
The topic which on which the raw messages will be published.
datapoint_map : dict of dict.
Mapping from datapoint key to topic. Is generated by the AdminUI.
Looks e.e. like this:
datapoint_map = {
"sensor": {
"Channel__P__value__0": "example-connector/msgs/0001",
"Channel__P__unit__0": "example-connector/msgs/0002",
},
"actuator": {
"example-connector/msgs/0003": "Channel__P__setpoint__0",
}
}
Note thereby that the keys "sensor" and "actuator"" must alaways be
present, even if the child dicts are empty.
"""
def receive_raw_msg(self, raw_data=None):
"""
Functionality to receive a raw message from device.
Poll the device/gateway for data and transforms this raw data
into the format expected by run_sensor_flow. If the device/gateway
uses some protocol that pushes data, the raw data should be passed
as the raw_data argument to the function.
Parameters
----------
raw_data : TYPE, optional
Raw data of device/gateway if the device pushes and is not
pulled for data. The default is None.
Returns
-------
msg : dict
The message object containing the raw data. It must be
JSON serializable (to allow sending the raw_message object as JSON
object to the raw message DB). If the data received from the device
or gateway cannot be packed to JSON directly (like e.g. for bytes)
it must modified accordingly. Avoid manipulation of the data as much
as possible, to prevent data losses when these operations fail.
A simple solution may often be to cast the raw data to strings.
Dict structures are fine, especially if created in this function,
e.g. by iterating over many endpoints of one device.
Should be formatted like this:
msg = {
"payload": {
"raw_message": <raw data in JSON serializable form>
}
}
E.g.
msg = {
"payload": {
"raw_message": "device_1:{sensor_1:2.12,sensor_2:3.12}"
}
}
"""
msg = {
"payload": {
# Raw message is bytes
"raw_message": str(raw_data)
}
}
return msg
def parse_raw_msg(self, raw_msg):
"""
Parses the values from the raw_message.
This parses the raw_message into an object (in a JSON meaning, a
dict in Python). The resulting object can be nested to allow
representation of hierarchical data.
Be aware: All keys in the output message should be strings. All values
must be convertable to JSON.
Parameters
----------
raw_msg : dict.
Raw msg with data from device/gateway. Should be formatted like:
msg = {
"payload": {
"raw_message": <the raw data>,
"timestamp": <milliseconds since epoch>
}
}
Returns
-------
msg : dict
The message object containing the parsed data as python dicts from
dicts structure. All keys should be strings. All value should be
of type string, bool or numbers. Should be formatted like this:
msg = {
"payload": {
"parsed_message": <the parsed data as object>,
"timestamp": <milliseconds since epoch>
}
}
E.g:
msg = {
"payload": {
"parsed_message": {
"device_1": {
"sensor_1": "test",
"sensor_2": 3.12,
"sensor_2": True,
}
},
"timestamp": 1573680749000
}
}
"""
timestamp = raw_msg["payload"]["timestamp"]
raw_message_as_bytes = eval(raw_msg["payload"]["raw_message"])
decoded_raw_message = raw_message_as_bytes.decode()
if self.parse_as == "JSON":
parsed_message = json.loads(decoded_raw_message)
elif self.parse_as == "YAML":
parsed_message = yaml.safe_load(decoded_raw_message)
msg = {
"payload": {
"parsed_message": parsed_message,
"timestamp": timestamp
}
}
return msg
class ActuatorFlow(AFTemplate):
"""
Bundles all functionality to handle actuator messages.
This is a template for a ActuatorFlow class, i.e. one that holds all
functions that are necessary to handle messages from the message
broker towards the devices/gateway. The methods could also be implemented
into the Connector class, but are separated to support clarity.
Overload these functions
------------------------
In order to transform this class into operational code you need
to inherit from it and overload the following methods:
- send_command
Connector Attributes
--------------------
The following attributes must be set up by the connector to
allow these methods to run correctly:
datapoint_map : dict of dict.
Mapping from datapoint key to topic. Is generated by the AdminUI.
Looks e.e. like this:
datapoint_map = {
"sensor": {
"Channel__P__value__0": "example-connector/msgs/0001",
"Channel__P__unit__0": "example-connector/msgs/0002",
},
"actuator": {
"example-connector/msgs/0003": "Channel__P__setpoint__0",
}
}
Note thereby that the keys "sensor" and "actuator"" must always be
present, even if the child dicts are empty.
"""
def send_command(self, datapoint_key, datapoint_value):
"""
Send message to target device, via gateway if applicable.
Parameters
----------
datapoint_key : string.
The internal key that is used by device/gateway to identify
the datapoint.
value : string.
The value that should be sent to the datapoint.
"""
raise NotImplementedError("send_command has not been implemented.")
class Connector(CTemplate, SensorFlow, ActuatorFlow):
"""
The generic logic of the connector.
It should not be necessary to overload any of these methods nor
to call any of those apart from __init__() and run().
Configuration Attributes
------------------------
Confiugration will be populated from environment variables on init.
CONNECTOR_NAME : string
The name of the connector instance as seen by the AdminUI.
MQTT_TOPIC_LOGS : string
The topics used by the log handler to publish log messages on.
MQTT_TOPIC_HEARTBEAT : string
The topics used by the connector to publish heartbeats on.
MQTT_TOPIC_AVAILABLE_DATAPOINTS : string
The topic on which the available datapoints will be published.
MQTT_TOPIC_DATAPOINT_MAP : string
The topic the connector will listen on for datapoint maps
SEND_RAW_MESSAGE_TO_DB : string
if SEND_RAW_MESSAGE_TO_DB == "TRUE" will send raw message
to designated DB via MQTT. This is a string and not a bool as
environment variables are always strings.
MQTT_TOPIC_RAW_MESSAGE_TO_DB : string
The topic which on which the raw messages will be published.
DEBUG : string
if DEBUG == "TRUE" will log debug message to, elso loglevel is info.
Computed Attributes
-------------------
These attributes are created by init and are then dynamically used
by the Connector.
mqtt_client : class instance.
Initialized MQTT client library with signature of paho mqtt.
available_datapoints : dict of dict.
Lists all datapoints known to the connector and is sent to the
AdminUI. Actuator datapoints must be specified manually. Sensor
datapoints are additionally automatically added once a value for
a new datapoint is received. The object contains the connector
internal key and a sample and value looks e.g. like this:
available_datapoints = {
"sensor": {
"Channel__P__value__0": 0.122,
"Channel__P__unit__0": "kW",
},
"actuator": {
"Channel__P__setpoint__0": 0.4,
}
}
datapoint_map : dict of dict.
Mapping from datapoint key to topic. Is generated by the AdminUI.
Looks e.e. like this:
datapoint_map = {
"sensor": {
"Channel__P__value__0": "example-connector/msgs/0001",
"Channel__P__unit__0": "example-connector/msgs/0002",
},
"actuator": {
"example-connector/msgs/0003": "Channel__P__setpoint__0",
}
}
Note thereby that the keys "sensor" and "actuator"" must always be
present, even if the child dicts are empty.
"""
def __init__(self, *args, **kwargs):
"""
Init the inherited code from python_connector_template and add
connector specific code, like parsing additional environment variables
or specifying actuator datapoints.
"""
# dotenv allows us to load env variables from .env files which is
# convenient for developing. If you set override to True tests
# may fail as the tests assume that the existing environ variables
# have higher priority over ones defined in the .env file.
load_dotenv(find_dotenv(), verbose=True, override=False)
# Load the socket connector specific settings. See Readme.
server_ip = os.getenv("SERVER_IP")
server_port = int(os.getenv("SERVER_PORT"))
recv_bufsize = int(os.getenv("RECV_BUFSIZE") or 4096)
if os.getenv("PARSE_AS") == "YAML":
self.parse_as = "YAML"
else:
self.parse_as = "JSON"
# The rate of messages is actually the rate the server
# sends in data. Hence we use a custom function that blocks until
# data is received and which triggers run_sensor_flow for every
# received msg.
kwargs["DeviceDispatcher"] = DispatchOnce
kwargs["device_dispatcher_kwargs"] = {
"target_func": self.run_socket_client,
"target_kwargs": {
"server_ip": server_ip,
"server_port": server_port,
"recv_bufsize": recv_bufsize,
},
"cleanup_func": self.close_socket_client,
}
# Sensor datapoints will be added to available_datapoints automatically
# once they are first appear in run_sensor_flow method. It is thus not
# necessary to specify them here. actuator datapoints in contrast must
# be specified here.
kwargs["available_datapoints"] = {
"sensor": {},
"actuator": {}
}
CTemplate.__init__(self, *args, **kwargs)
self.custom_env_var = os.getenv("CUSTOM_ENV_VARR") or "default_value"
def run_socket_client(self, server_ip, server_port, recv_bufsize):
"""
Connects to an UDP server, waits for data and calls run_sensor_flow
for every incoming.
Arguements:
-----------
see Readme.md
"""
logger.info("Connecting to device %s:%s", *(server_ip, server_port))
self.socket = socket.socket(
family=socket.AF_INET,
type=socket.SOCK_STREAM
)
self.socket.connect((server_ip, server_port))
while True:
raw_data = self.socket.recv(recv_bufsize)
if len(raw_data) == 0:
# This should only be the case once the other side has
# closed the connection.
logger.error("Connection to device lost.")
break
# This call will certainly block the connector from receiving.
# new data until the most recent msg has been handled. This is
# might become an issue if a LOT of data is incoming. However,
# it is very likely that other parts of BEMCom (like the DBs)
# will also not be able to handle that much information.
self.run_sensor_flow(raw_data=raw_data)
def close_socket_client(self):
"""
Hope this gets called on shutdown.
"""
logger.info("Disconnecting from device")
self.socket.close()
if __name__ == "__main__":
connector = Connector(version=__version__)
connector.run()
| 38.369898
| 80
| 0.595107
|
767fb0fdb6bef8676c664bf725e1c681f7d4b099
| 284
|
py
|
Python
|
setup.py
|
mhearne-usgs/earthquake-sequence
|
3b642a6c202894b0ea421635f0f258fa045fa271
|
[
"CC-BY-4.0"
] | null | null | null |
setup.py
|
mhearne-usgs/earthquake-sequence
|
3b642a6c202894b0ea421635f0f258fa045fa271
|
[
"CC-BY-4.0"
] | null | null | null |
setup.py
|
mhearne-usgs/earthquake-sequence
|
3b642a6c202894b0ea421635f0f258fa045fa271
|
[
"CC-BY-4.0"
] | null | null | null |
from distutils.core import setup
setup(name='sequence',
version='0.1dev',
description='USGS sequence Product Generator',
author='Mike Hearne',
author_email='mhearne@usgs.gov',
url='',
packages=['sequence'],
scripts=['bin/sequence'],
)
| 23.666667
| 52
| 0.616197
|
125db46f9b8b85b91247c7be9215dc3fae0a1d58
| 2,472
|
py
|
Python
|
projects/clc/analyze/comb.py
|
kmckiern/scripts
|
acc8326ca653d804ee06752af9e7f5b011fc6e0e
|
[
"MIT"
] | 2
|
2015-04-27T01:57:43.000Z
|
2015-05-01T18:18:56.000Z
|
projects/clc/analyze/comb.py
|
kmckiern/scripts
|
acc8326ca653d804ee06752af9e7f5b011fc6e0e
|
[
"MIT"
] | null | null | null |
projects/clc/analyze/comb.py
|
kmckiern/scripts
|
acc8326ca653d804ee06752af9e7f5b011fc6e0e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import mdtraj
from mdtraj import Trajectory as t
import os, sys
import argparse
parser = argparse.ArgumentParser(description='lig-template pdb generation')
parser.add_argument('--trj_dir', type=str, help='directory of simulation trajectories')
parser.add_argument('--trj_ext', type=str, help='input trajectory extension', default='netcdf')
parser.add_argument('--out', type=str, help='output trajectory', default='out.dcd')
parser.add_argument('--top', type=str, help='reference pdb topology')
parser.add_argument('--stride1', type=int, help='individual trj subsample rate', default=1)
parser.add_argument('--stride2', type=int, help='combined trj subsample rate', default=1)
parser.add_argument('--cut', type=int, help='frames before this index will be cut', default=0)
parser.add_argument('--vs', action='store_true', help='write trj of voltage sensitive residues', default=False)
parser.add_argument('--pro', action='store_true', help='write trj of protein only', default=False)
parser.add_argument('--sr', type=str, help='script root', default='/home/kmckiern/scripts/')
args = parser.parse_args()
sr = args.sr
sys.path.insert(0, sr + 'py_general/')
from toolz import natural_sort
ext_i = '.' + args.trj_ext
td = args.trj_dir
# combine trajectories
trjs = [f for f in os.listdir(td) if ext_i in f]
trjs = natural_sort(trjs)
ts = t.load(trjs[0], top=args.top, stride=args.stride1)
if args.vs:
# i'm going to pad these residues by 8
arg = ts.top.select('resid 23 to 39')
lysglu = ts.top.select('resid 118 to 135')
lys = ts.top.select('resid 308 to 324')
vs = np.concatenate([arg, lysglu, lys])
ts = ts.atom_slice(vs)
try:
ts[0].save_pdb('/home/kmckiern/clc/analysis/vs_dihed/pro/vs_ref.pdb')
except:
print 'usual protonation error, probably'
nt = len(trjs)
for ndx, i in enumerate(trjs[1:]):
# for newest trj, cut end just in case write is incomplete
if ndx + 1 == nt:
i = i[:-2]
new = t.load(i, top=args.top, atom_indices=vs, stride=args.stride1)
ts += new
elif args.pro:
pro = ts.top.select('protein')
ts = ts.atom_slice(pro)
ts[0].save_pdb('pro_ref.pdb')
for i in trjs[1:]:
ts += t.load(i, top=args.top, atom_indices=pro, stride=args.stride1)
else:
for i in trjs[1:]:
ts += t.load(i, top=args.top, stride=args.stride1)
# save combined data
ts[args.cut::args.stride2].save(args.out)
| 38.625
| 111
| 0.686893
|
4e1afc03799d8f4c4ed3c6a04a5a52cee851102d
| 3,021
|
py
|
Python
|
acme/agents/tf/dqn/agent_run_bsuite.py
|
nrocketmann/acme-intrinsic
|
ce90aa15ec785a8618a2505410ab6b9f1f9b5a32
|
[
"Apache-2.0"
] | null | null | null |
acme/agents/tf/dqn/agent_run_bsuite.py
|
nrocketmann/acme-intrinsic
|
ce90aa15ec785a8618a2505410ab6b9f1f9b5a32
|
[
"Apache-2.0"
] | null | null | null |
acme/agents/tf/dqn/agent_run_bsuite.py
|
nrocketmann/acme-intrinsic
|
ce90aa15ec785a8618a2505410ab6b9f1f9b5a32
|
[
"Apache-2.0"
] | null | null | null |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DQN agent."""
from absl.testing import absltest
from absl import app
import acme
from acme import specs
from acme.agents.tf import dqn
from acme.testing import fakes
from acme import wrappers
from absl import flags
import bsuite
import numpy as np
import sonnet as snt
def _make_Qnetwork(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([256, 256, action_spec.num_values]),
])
def _make_qnetwork(action_spec: specs.DiscreteArray) -> snt.Module: #takes in s + s' + action, spits out probability
return dqn.ConditionalProductNetwork(output_dims=action_spec.num_values,categorical=True)
def _make_feat_network(action_spec: specs.DiscreteArray) -> snt.Module: #lol this just makes features, so we'll just flatten for now
return snt.Sequential([
snt.Flatten(),snt.Linear(64)
])
def _make_rnetwork(action_spec: specs.DiscreteArray) -> snt.Module: #takes in just s and action, spits out probability
return dqn.RNetwork(output_dims=action_spec.num_values,categorical=True)
def main(_):
flags.DEFINE_string('bsuite_id', 'deep_sea/0', 'Bsuite id.')
flags.DEFINE_string('results_dir', '~/tmp/bsuite', 'CSV results directory.')
flags.DEFINE_boolean('overwrite', True, 'Whether to overwrite csv results.')
flags.DEFINE_integer('episodes',100,'Number of episodes to write')
FLAGS = flags.FLAGS
raw_environment = bsuite.load_and_record_to_csv(
bsuite_id=FLAGS.bsuite_id,
results_dir=FLAGS.results_dir,
overwrite=FLAGS.overwrite,
)
environment = wrappers.SinglePrecisionWrapper(raw_environment)
spec = specs.make_environment_spec(environment)
# Create a fake environment to test with.
# Construct the agent.
agent = dqn.DQNEmpowerment(
environment_spec=spec,
Qnetwork=_make_Qnetwork(spec.actions),
qnetwork = _make_qnetwork(spec.actions),
feat_network = _make_feat_network(spec.actions),
feat_dims=64,
rnetwork = _make_rnetwork(spec.actions),
batch_size=10,
samples_per_insert=2,
min_replay_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=FLAGS.episodes)
if __name__ == '__main__':
app.run(main)
| 34.724138
| 132
| 0.746111
|
3b7dd4bcf7997c1c4b9d2e30eee919b2b490b3b3
| 10,969
|
py
|
Python
|
sympy/series/limits.py
|
pnijhara/sympy
|
7f30e325fbc144452e336abad013c50c5b71b7aa
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/series/limits.py
|
pnijhara/sympy
|
7f30e325fbc144452e336abad013c50c5b71b7aa
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/series/limits.py
|
pnijhara/sympy
|
7f30e325fbc144452e336abad013c50c5b71b7aa
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from sympy.core import S, Symbol, Add, sympify, Expr, PoleError, Mul
from sympy.core.exprtools import factor_terms
from sympy.core.symbol import Dummy
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.special.gamma_functions import gamma
from sympy.polys import PolynomialError, factor
from sympy.series.order import Order
from sympy.simplify.ratsimp import ratsimp
from sympy.simplify.simplify import together
from .gruntz import gruntz
def limit(e, z, z0, dir="+"):
"""Computes the limit of ``e(z)`` at the point ``z0``.
Parameters
==========
e : expression, the limit of which is to be taken
z : symbol representing the variable in the limit.
Other symbols are treated as constants. Multivariate limits
are not supported.
z0 : the value toward which ``z`` tends. Can be any expression,
including ``oo`` and ``-oo``.
dir : string, optional (default: "+")
The limit is bi-directional if ``dir="+-"``, from the right
(z->z0+) if ``dir="+"``, and from the left (z->z0-) if
``dir="-"``. For infinite ``z0`` (``oo`` or ``-oo``), the ``dir``
argument is determined from the direction of the infinity
(i.e., ``dir="-"`` for ``oo``).
Examples
========
>>> from sympy import limit, sin, oo
>>> from sympy.abc import x
>>> limit(sin(x)/x, x, 0)
1
>>> limit(1/x, x, 0) # default dir='+'
oo
>>> limit(1/x, x, 0, dir="-")
-oo
>>> limit(1/x, x, 0, dir='+-')
zoo
>>> limit(1/x, x, oo)
0
Notes
=====
First we try some heuristics for easy and frequent cases like "x", "1/x",
"x**2" and similar, so that it's fast. For all other cases, we use the
Gruntz algorithm (see the gruntz() function).
See Also
========
limit_seq : returns the limit of a sequence.
"""
return Limit(e, z, z0, dir).doit(deep=False)
def heuristics(e, z, z0, dir):
"""Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
"""
from sympy.calculus.util import AccumBounds
rv = None
if abs(z0) is S.Infinity:
rv = limit(e.subs(z, 1/z), z, S.Zero, "+" if z0 is S.Infinity else "-")
if isinstance(rv, Limit):
return
elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function:
r = []
for a in e.args:
l = limit(a, z, z0, dir)
if l.has(S.Infinity) and l.is_finite is None:
if isinstance(e, Add):
m = factor_terms(e)
if not isinstance(m, Mul): # try together
m = together(m)
if not isinstance(m, Mul): # try factor if the previous methods failed
m = factor(e)
if isinstance(m, Mul):
return heuristics(m, z, z0, dir)
return
return
elif isinstance(l, Limit):
return
elif l is S.NaN:
return
else:
r.append(l)
if r:
rv = e.func(*r)
if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r):
r2 = []
e2 = []
for ii in range(len(r)):
if isinstance(r[ii], AccumBounds):
r2.append(r[ii])
else:
e2.append(e.args[ii])
if len(e2) > 0:
e3 = Mul(*e2).simplify()
l = limit(e3, z, z0, dir)
rv = l * Mul(*r2)
if rv is S.NaN:
try:
rat_e = ratsimp(e)
except PolynomialError:
return
if rat_e is S.NaN or rat_e == e:
return
return limit(rat_e, z, z0, dir)
return rv
class Limit(Expr):
"""Represents an unevaluated limit.
Examples
========
>>> from sympy import Limit, sin
>>> from sympy.abc import x
>>> Limit(sin(x)/x, x, 0)
Limit(sin(x)/x, x, 0)
>>> Limit(1/x, x, 0, dir="-")
Limit(1/x, x, 0, dir='-')
"""
def __new__(cls, e, z, z0, dir="+"):
e = sympify(e)
z = sympify(z)
z0 = sympify(z0)
if z0 is S.Infinity:
dir = "-"
elif z0 is S.NegativeInfinity:
dir = "+"
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("direction must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-', '+-'):
raise ValueError("direction must be one of '+', '-' "
"or '+-', not %s" % dir)
obj = Expr.__new__(cls)
obj._args = (e, z, z0, dir)
return obj
@property
def free_symbols(self):
e = self.args[0]
isyms = e.free_symbols
isyms.difference_update(self.args[1].free_symbols)
isyms.update(self.args[2].free_symbols)
return isyms
def doit(self, **hints):
"""Evaluates the limit.
Parameters
==========
deep : bool, optional (default: True)
Invoke the ``doit`` method of the expressions involved before
taking the limit.
hints : optional keyword arguments
To be passed to ``doit`` methods; only used if deep is True.
"""
from sympy import Abs, exp, log, sign
from sympy.calculus.util import AccumBounds
from sympy.functions import RisingFactorial
e, z, z0, dir = self.args
if z0 is S.ComplexInfinity:
raise NotImplementedError("Limits at complex "
"infinity are not implemented")
if hints.get('deep', True):
e = e.doit(**hints)
z = z.doit(**hints)
z0 = z0.doit(**hints)
if e == z:
return z0
if not e.has(z):
return e
cdir = 0
if str(dir) == "+":
cdir = 1
elif str(dir) == "-":
cdir = -1
def remove_abs(expr):
if not expr.args:
return expr
newargs = tuple(remove_abs(arg) for arg in expr.args)
if newargs != expr.args:
expr = expr.func(*newargs)
if isinstance(expr, Abs):
sig = limit(expr.args[0], z, z0, dir)
if sig.is_zero:
sig = limit(1/expr.args[0], z, z0, dir)
if sig.is_extended_real:
if (sig < 0) == True:
return -expr.args[0]
elif (sig > 0) == True:
return expr.args[0]
return expr
e = remove_abs(e)
if e.is_meromorphic(z, z0):
if abs(z0) is S.Infinity:
newe = e.subs(z, -1/z)
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir)
except (ValueError, NotImplementedError):
pass
else:
if ex > 0:
return S.Zero
elif ex == 0:
return coeff
if str(dir) == "+" or not(int(ex) & 1):
return S.Infinity*sign(coeff)
elif str(dir) == "-":
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
# gruntz fails on factorials but works with the gamma function
# If no factorial term is present, e should remain unchanged.
# factorial is defined to be zero for negative inputs (which
# differs from gamma) so only rewrite for positive z0.
if z0.is_extended_positive:
e = e.rewrite([factorial, RisingFactorial], gamma)
if e.is_Mul and abs(z0) is S.Infinity:
e = factor_terms(e)
u = Dummy('u', positive=True)
if z0 is S.NegativeInfinity:
inve = e.subs(z, -1/u)
else:
inve = e.subs(z, 1/u)
try:
f = inve.as_leading_term(u).gammasimp()
if f.is_meromorphic(u, S.Zero):
r = limit(f, u, S.Zero, "+")
if isinstance(r, Limit):
return self
else:
return r
except (ValueError, NotImplementedError, PoleError):
pass
if e.is_Order:
return Order(limit(e.expr, z, z0), *e.args[1:])
if e.is_Pow:
if e.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN):
return self
b1, e1 = e.base, e.exp
f1 = e1*log(b1)
if f1.is_meromorphic(z, z0):
res = limit(f1, z, z0)
return exp(res)
ex_lim = limit(e1, z, z0)
base_lim = limit(b1, z, z0)
if base_lim is S.One:
if ex_lim in (S.Infinity, S.NegativeInfinity):
res = limit(e1*(b1 - 1), z, z0)
return exp(res)
elif ex_lim.is_real:
return S.One
if base_lim in (S.Zero, S.Infinity, S.NegativeInfinity) and ex_lim is S.Zero:
res = limit(f1, z, z0)
return exp(res)
if base_lim is S.NegativeInfinity:
if ex_lim is S.NegativeInfinity:
return S.Zero
if ex_lim is S.Infinity:
return S.ComplexInfinity
if not isinstance(base_lim, AccumBounds) and not isinstance(ex_lim, AccumBounds):
res = base_lim**ex_lim
if res is not S.ComplexInfinity and not res.is_Pow:
return res
l = None
try:
if str(dir) == '+-':
r = gruntz(e, z, z0, '+')
l = gruntz(e, z, z0, '-')
if l != r:
raise ValueError("The limit does not exist since "
"left hand limit = %s and right hand limit = %s"
% (l, r))
else:
r = gruntz(e, z, z0, dir)
if r is S.NaN or l is S.NaN:
raise PoleError()
except (PoleError, ValueError):
if l is not None:
raise
r = heuristics(e, z, z0, dir)
if r is None:
return self
return r
| 31.979592
| 93
| 0.486371
|
815fa78dc9c1700f6b6c0d30a5a9fc0f8fd1997f
| 472
|
py
|
Python
|
resources/usr/local/lib/python2.7/dist-packages/sklearn/gaussian_process/__init__.py
|
edawson/parliament2
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
[
"Apache-2.0"
] | null | null | null |
resources/usr/local/lib/python2.7/dist-packages/sklearn/gaussian_process/__init__.py
|
edawson/parliament2
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
[
"Apache-2.0"
] | null | null | null |
resources/usr/local/lib/python2.7/dist-packages/sklearn/gaussian_process/__init__.py
|
edawson/parliament2
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
[
"Apache-2.0"
] | 1
|
2020-05-28T23:01:44.000Z
|
2020-05-28T23:01:44.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The :mod:`sklearn.gaussian_process` module implements scalar Gaussian Process
based predictions.
"""
from .gaussian_process import GaussianProcess
from . import correlation_models
from . import regression_models
__all__ = ['GaussianProcess', 'correlation_models', 'regression_models']
| 26.222222
| 77
| 0.756356
|
4532a859cf52d93ac940d0ec3c3c6231903af4b0
| 3,205
|
py
|
Python
|
setup.py
|
jzitelli/three.py
|
9cdc28bd776ac227d880332e41d46a78415f532f
|
[
"MIT"
] | 12
|
2015-12-09T21:59:48.000Z
|
2021-05-06T13:05:16.000Z
|
setup.py
|
jzitelli/three.py
|
9cdc28bd776ac227d880332e41d46a78415f532f
|
[
"MIT"
] | null | null | null |
setup.py
|
jzitelli/three.py
|
9cdc28bd776ac227d880332e41d46a78415f532f
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
import os.path
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='three.py',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.79.0.0.dev0',
description='Python package for defining 3D objects / scenes in the conceptual / class framework of three.js',
long_description=long_description,
# The project's main homepage.
url='https://jzitelli.github.io/three.py',
# Author details
author='Jeffrey Zitelli',
author_email='jeffrey.zitelli@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5'
],
keywords='three.js WebGL OpenGL 3d graphics Cannon.js',
packages=['three'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'pyexecjs', 'pillow'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 35.21978
| 114
| 0.661778
|
9772b7ae9ff40a587ac0db61a91cd9e48f4a18ba
| 1,763
|
py
|
Python
|
tests/assessment_authoring/test_record_templates.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
tests/assessment_authoring/test_record_templates.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
tests/assessment_authoring/test_record_templates.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""Unit tests of assessment.authoring records."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
@pytest.mark.usefixtures("assessment_part_record_class_fixture", "assessment_part_record_test_fixture")
class TestAssessmentPartRecord(object):
"""Tests for AssessmentPartRecord"""
@pytest.mark.usefixtures("assessment_part_query_record_class_fixture", "assessment_part_query_record_test_fixture")
class TestAssessmentPartQueryRecord(object):
"""Tests for AssessmentPartQueryRecord"""
@pytest.mark.usefixtures("assessment_part_form_record_class_fixture", "assessment_part_form_record_test_fixture")
class TestAssessmentPartFormRecord(object):
"""Tests for AssessmentPartFormRecord"""
@pytest.mark.usefixtures("assessment_part_search_record_class_fixture", "assessment_part_search_record_test_fixture")
class TestAssessmentPartSearchRecord(object):
"""Tests for AssessmentPartSearchRecord"""
@pytest.mark.usefixtures("sequence_rule_record_class_fixture", "sequence_rule_record_test_fixture")
class TestSequenceRuleRecord(object):
"""Tests for SequenceRuleRecord"""
@pytest.mark.usefixtures("sequence_rule_query_record_class_fixture", "sequence_rule_query_record_test_fixture")
class TestSequenceRuleQueryRecord(object):
"""Tests for SequenceRuleQueryRecord"""
@pytest.mark.usefixtures("sequence_rule_form_record_class_fixture", "sequence_rule_form_record_test_fixture")
class TestSequenceRuleFormRecord(object):
"""Tests for SequenceRuleFormRecord"""
@pytest.mark.usefixtures("sequence_rule_search_record_class_fixture", "sequence_rule_search_record_test_fixture")
class TestSequenceRuleSearchRecord(object):
"""Tests for SequenceRuleSearchRecord"""
| 36.729167
| 117
| 0.836642
|
578dae5997775b6a9468f8641a207822a51c85d2
| 398
|
py
|
Python
|
Chapter06/httprecv.py
|
PacktPublishing/Kali-Linux-Network-Scanning-Cookbook-Second-Edition
|
bbb88df785f86e4c41494b867419c53262c700a4
|
[
"MIT"
] | 17
|
2017-07-20T08:19:28.000Z
|
2021-11-08T13:11:58.000Z
|
Chapter09/httprecv.py
|
PacktPublishing/Kali-Linux-Network-Scanning-Cookbook-Second-Edition
|
bbb88df785f86e4c41494b867419c53262c700a4
|
[
"MIT"
] | null | null | null |
Chapter09/httprecv.py
|
PacktPublishing/Kali-Linux-Network-Scanning-Cookbook-Second-Edition
|
bbb88df785f86e4c41494b867419c53262c700a4
|
[
"MIT"
] | 13
|
2017-07-20T08:20:42.000Z
|
2021-12-07T20:29:26.000Z
|
#!/usr/bin/python
import socket
print "Awaiting connection...\n"
httprecv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
httprecv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
httprecv.bind(("0.0.0.0",8000))
httprecv.listen(2)
(client, ( ip,sock)) = httprecv.accept()
print "Received connection from : ", ip
data = client.recv(4096)
print str(data)
client.close()
httprecv.close()
| 22.111111
| 62
| 0.741206
|
df36e4c55a0b5b0e53d6dc62703ddc2112d799b3
| 8,510
|
py
|
Python
|
sdk/python/pulumi_google_native/file/v1/get_backup.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/file/v1/get_backup.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/file/v1/get_backup.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetBackupResult',
'AwaitableGetBackupResult',
'get_backup',
'get_backup_output',
]
@pulumi.output_type
class GetBackupResult:
def __init__(__self__, capacity_gb=None, create_time=None, description=None, download_bytes=None, labels=None, name=None, satisfies_pzs=None, source_file_share=None, source_instance=None, source_instance_tier=None, state=None, storage_bytes=None):
if capacity_gb and not isinstance(capacity_gb, str):
raise TypeError("Expected argument 'capacity_gb' to be a str")
pulumi.set(__self__, "capacity_gb", capacity_gb)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if download_bytes and not isinstance(download_bytes, str):
raise TypeError("Expected argument 'download_bytes' to be a str")
pulumi.set(__self__, "download_bytes", download_bytes)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if satisfies_pzs and not isinstance(satisfies_pzs, bool):
raise TypeError("Expected argument 'satisfies_pzs' to be a bool")
pulumi.set(__self__, "satisfies_pzs", satisfies_pzs)
if source_file_share and not isinstance(source_file_share, str):
raise TypeError("Expected argument 'source_file_share' to be a str")
pulumi.set(__self__, "source_file_share", source_file_share)
if source_instance and not isinstance(source_instance, str):
raise TypeError("Expected argument 'source_instance' to be a str")
pulumi.set(__self__, "source_instance", source_instance)
if source_instance_tier and not isinstance(source_instance_tier, str):
raise TypeError("Expected argument 'source_instance_tier' to be a str")
pulumi.set(__self__, "source_instance_tier", source_instance_tier)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if storage_bytes and not isinstance(storage_bytes, str):
raise TypeError("Expected argument 'storage_bytes' to be a str")
pulumi.set(__self__, "storage_bytes", storage_bytes)
@property
@pulumi.getter(name="capacityGb")
def capacity_gb(self) -> str:
"""
Capacity of the source file share when the backup was created.
"""
return pulumi.get(self, "capacity_gb")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time when the backup was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="downloadBytes")
def download_bytes(self) -> str:
"""
Amount of bytes that will be downloaded if the backup is restored. This may be different than storage bytes, since sequential backups of the same disk will share storage.
"""
return pulumi.get(self, "download_bytes")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Resource labels to represent user provided metadata.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the backup, in the format `projects/{project_number}/locations/{location_id}/backups/{backup_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="satisfiesPzs")
def satisfies_pzs(self) -> bool:
"""
Reserved for future use.
"""
return pulumi.get(self, "satisfies_pzs")
@property
@pulumi.getter(name="sourceFileShare")
def source_file_share(self) -> str:
"""
Name of the file share in the source Cloud Filestore instance that the backup is created from.
"""
return pulumi.get(self, "source_file_share")
@property
@pulumi.getter(name="sourceInstance")
def source_instance(self) -> str:
"""
The resource name of the source Cloud Filestore instance, in the format `projects/{project_number}/locations/{location_id}/instances/{instance_id}`, used to create this backup.
"""
return pulumi.get(self, "source_instance")
@property
@pulumi.getter(name="sourceInstanceTier")
def source_instance_tier(self) -> str:
"""
The service tier of the source Cloud Filestore instance that this backup is created from.
"""
return pulumi.get(self, "source_instance_tier")
@property
@pulumi.getter
def state(self) -> str:
"""
The backup state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageBytes")
def storage_bytes(self) -> str:
"""
The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion.
"""
return pulumi.get(self, "storage_bytes")
class AwaitableGetBackupResult(GetBackupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBackupResult(
capacity_gb=self.capacity_gb,
create_time=self.create_time,
description=self.description,
download_bytes=self.download_bytes,
labels=self.labels,
name=self.name,
satisfies_pzs=self.satisfies_pzs,
source_file_share=self.source_file_share,
source_instance=self.source_instance,
source_instance_tier=self.source_instance_tier,
state=self.state,
storage_bytes=self.storage_bytes)
def get_backup(backup_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBackupResult:
"""
Gets the details of a specific backup.
"""
__args__ = dict()
__args__['backupId'] = backup_id
__args__['location'] = location
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:file/v1:getBackup', __args__, opts=opts, typ=GetBackupResult).value
return AwaitableGetBackupResult(
capacity_gb=__ret__.capacity_gb,
create_time=__ret__.create_time,
description=__ret__.description,
download_bytes=__ret__.download_bytes,
labels=__ret__.labels,
name=__ret__.name,
satisfies_pzs=__ret__.satisfies_pzs,
source_file_share=__ret__.source_file_share,
source_instance=__ret__.source_instance,
source_instance_tier=__ret__.source_instance_tier,
state=__ret__.state,
storage_bytes=__ret__.storage_bytes)
@_utilities.lift_output_func(get_backup)
def get_backup_output(backup_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBackupResult]:
"""
Gets the details of a specific backup.
"""
...
| 39.398148
| 251
| 0.659459
|
4dfe9a10c958326aec146a3a0f5c3ce118c69469
| 10,178
|
py
|
Python
|
test/functional/p2p_filter.py
|
qtdatainc/SAM-Coin
|
3ff99809c5ce6ddad10d29fef688cd54f0ee37fb
|
[
"MIT"
] | 1
|
2022-02-22T03:37:26.000Z
|
2022-02-22T03:37:26.000Z
|
test/functional/p2p_filter.py
|
qtdatainc/SAM-Coin
|
3ff99809c5ce6ddad10d29fef688cd54f0ee37fb
|
[
"MIT"
] | null | null | null |
test/functional/p2p_filter.py
|
qtdatainc/SAM-Coin
|
3ff99809c5ce6ddad10d29fef688cd54f0ee37fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The Samcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test BIP 37
"""
from test_framework.messages import (
CInv,
MAX_BLOOM_FILTER_SIZE,
MAX_BLOOM_HASH_FUNCS,
MSG_BLOCK,
MSG_FILTERED_BLOCK,
msg_filteradd,
msg_filterclear,
msg_filterload,
msg_getdata,
msg_mempool,
msg_version,
)
from test_framework.p2p import (
P2PInterface,
P2P_SERVICES,
P2P_SUBVERSION,
P2P_VERSION,
p2p_lock,
)
from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE
from test_framework.test_framework import SamcoinTestFramework
class P2PBloomFilter(P2PInterface):
# This is a P2SH watch-only wallet
watch_script_pubkey = 'a914ffffffffffffffffffffffffffffffffffffffff87'
# The initial filter (n=10, fp=0.000001) with just the above scriptPubKey added
watch_filter_init = msg_filterload(
data=
b'@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00 \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00',
nHashFuncs=19,
nTweak=0,
nFlags=1,
)
def __init__(self):
super().__init__()
self._tx_received = False
self._merkleblock_received = False
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
# inv messages can only contain TX or BLOCK, so translate BLOCK to FILTERED_BLOCK
if i.type == MSG_BLOCK:
want.inv.append(CInv(MSG_FILTERED_BLOCK, i.hash))
else:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_merkleblock(self, message):
self._merkleblock_received = True
def on_tx(self, message):
self._tx_received = True
@property
def tx_received(self):
with p2p_lock:
return self._tx_received
@tx_received.setter
def tx_received(self, value):
with p2p_lock:
self._tx_received = value
@property
def merkleblock_received(self):
with p2p_lock:
return self._merkleblock_received
@merkleblock_received.setter
def merkleblock_received(self, value):
with p2p_lock:
self._merkleblock_received = value
class FilterTest(SamcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-peerbloomfilters',
'-whitelist=noban@127.0.0.1', # immediate tx relay
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_size_limits(self, filter_peer):
self.log.info('Check that too large filter is rejected')
with self.nodes[0].assert_debug_log(['Misbehaving']):
filter_peer.send_and_ping(msg_filterload(data=b'\xbb'*(MAX_BLOOM_FILTER_SIZE+1)))
self.log.info('Check that max size filter is accepted')
with self.nodes[0].assert_debug_log([], unexpected_msgs=['Misbehaving']):
filter_peer.send_and_ping(msg_filterload(data=b'\xbb'*(MAX_BLOOM_FILTER_SIZE)))
filter_peer.send_and_ping(msg_filterclear())
self.log.info('Check that filter with too many hash functions is rejected')
with self.nodes[0].assert_debug_log(['Misbehaving']):
filter_peer.send_and_ping(msg_filterload(data=b'\xaa', nHashFuncs=MAX_BLOOM_HASH_FUNCS+1))
self.log.info('Check that filter with max hash functions is accepted')
with self.nodes[0].assert_debug_log([], unexpected_msgs=['Misbehaving']):
filter_peer.send_and_ping(msg_filterload(data=b'\xaa', nHashFuncs=MAX_BLOOM_HASH_FUNCS))
# Don't send filterclear until next two filteradd checks are done
self.log.info('Check that max size data element to add to the filter is accepted')
with self.nodes[0].assert_debug_log([], unexpected_msgs=['Misbehaving']):
filter_peer.send_and_ping(msg_filteradd(data=b'\xcc'*(MAX_SCRIPT_ELEMENT_SIZE)))
self.log.info('Check that too large data element to add to the filter is rejected')
with self.nodes[0].assert_debug_log(['Misbehaving']):
filter_peer.send_and_ping(msg_filteradd(data=b'\xcc'*(MAX_SCRIPT_ELEMENT_SIZE+1)))
filter_peer.send_and_ping(msg_filterclear())
def test_msg_mempool(self):
self.log.info("Check that a node with bloom filters enabled services p2p mempool messages")
filter_peer = P2PBloomFilter()
self.log.debug("Create a tx relevant to the peer before connecting")
filter_address = self.nodes[0].decodescript(filter_peer.watch_script_pubkey)['address']
txid = self.nodes[0].sendtoaddress(filter_address, 90)
self.log.debug("Send a mempool msg after connecting and check that the tx is received")
self.nodes[0].add_p2p_connection(filter_peer)
filter_peer.send_and_ping(filter_peer.watch_filter_init)
filter_peer.send_message(msg_mempool())
filter_peer.wait_for_tx(txid)
def test_frelay_false(self, filter_peer):
self.log.info("Check that a node with fRelay set to false does not receive invs until the filter is set")
filter_peer.tx_received = False
filter_address = self.nodes[0].decodescript(filter_peer.watch_script_pubkey)['address']
self.nodes[0].sendtoaddress(filter_address, 90)
# Sync to make sure the reason filter_peer doesn't receive the tx is not p2p delays
filter_peer.sync_with_ping()
assert not filter_peer.tx_received
# Clear the mempool so that this transaction does not impact subsequent tests
self.nodes[0].generate(1)
def test_filter(self, filter_peer):
# Set the bloomfilter using filterload
filter_peer.send_and_ping(filter_peer.watch_filter_init)
# If fRelay is not already True, sending filterload sets it to True
assert self.nodes[0].getpeerinfo()[0]['relaytxes']
filter_address = self.nodes[0].decodescript(filter_peer.watch_script_pubkey)['address']
self.log.info('Check that we receive merkleblock and tx if the filter matches a tx in a block')
block_hash = self.nodes[0].generatetoaddress(1, filter_address)[0]
txid = self.nodes[0].getblock(block_hash)['tx'][0]
filter_peer.wait_for_merkleblock(block_hash)
filter_peer.wait_for_tx(txid)
self.log.info('Check that we only receive a merkleblock if the filter does not match a tx in a block')
filter_peer.tx_received = False
block_hash = self.nodes[0].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
filter_peer.wait_for_merkleblock(block_hash)
assert not filter_peer.tx_received
self.log.info('Check that we not receive a tx if the filter does not match a mempool tx')
filter_peer.merkleblock_received = False
filter_peer.tx_received = False
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 90)
filter_peer.sync_send_with_ping()
assert not filter_peer.merkleblock_received
assert not filter_peer.tx_received
self.log.info('Check that we receive a tx if the filter matches a mempool tx')
filter_peer.merkleblock_received = False
txid = self.nodes[0].sendtoaddress(filter_address, 90)
filter_peer.wait_for_tx(txid)
assert not filter_peer.merkleblock_received
self.log.info('Check that after deleting filter all txs get relayed again')
filter_peer.send_and_ping(msg_filterclear())
for _ in range(5):
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 7)
filter_peer.wait_for_tx(txid)
self.log.info('Check that request for filtered blocks is ignored if no filter is set')
filter_peer.merkleblock_received = False
filter_peer.tx_received = False
with self.nodes[0].assert_debug_log(expected_msgs=['received getdata']):
block_hash = self.nodes[0].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
filter_peer.wait_for_inv([CInv(MSG_BLOCK, int(block_hash, 16))])
filter_peer.sync_with_ping()
assert not filter_peer.merkleblock_received
assert not filter_peer.tx_received
self.log.info('Check that sending "filteradd" if no filter is set is treated as misbehavior')
with self.nodes[0].assert_debug_log(['Misbehaving']):
filter_peer.send_and_ping(msg_filteradd(data=b'letsmisbehave'))
self.log.info("Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed")
filter_peer.send_and_ping(msg_filterload(data=b'', nHashFuncs=1))
filter_peer.send_and_ping(msg_filteradd(data=b'letstrytocrashthisnode'))
self.nodes[0].disconnect_p2ps()
def run_test(self):
filter_peer = self.nodes[0].add_p2p_connection(P2PBloomFilter())
self.log.info('Test filter size limits')
self.test_size_limits(filter_peer)
self.log.info('Test BIP 37 for a node with fRelay = True (default)')
self.test_filter(filter_peer)
self.nodes[0].disconnect_p2ps()
self.log.info('Test BIP 37 for a node with fRelay = False')
# Add peer but do not send version yet
filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False)
# Send version with relay=False
version_without_fRelay = msg_version()
version_without_fRelay.nVersion = P2P_VERSION
version_without_fRelay.strSubVer = P2P_SUBVERSION
version_without_fRelay.nServices = P2P_SERVICES
version_without_fRelay.relay = 0
filter_peer_without_nrelay.send_message(version_without_fRelay)
filter_peer_without_nrelay.wait_for_verack()
assert not self.nodes[0].getpeerinfo()[0]['relaytxes']
self.test_frelay_false(filter_peer_without_nrelay)
self.test_filter(filter_peer_without_nrelay)
self.test_msg_mempool()
if __name__ == '__main__':
FilterTest().main()
| 42.232365
| 130
| 0.69326
|
da20577f4ecaf5a1f05d41072021430d52392cdc
| 6,705
|
py
|
Python
|
bibtex2html/bibtex2html.py
|
MUONetwork/muon.github.io
|
ce40c4efd44a325f0f2e33f565e8b117cb50f7da
|
[
"CC-BY-3.0"
] | 1
|
2020-07-25T16:21:31.000Z
|
2020-07-25T16:21:31.000Z
|
bibtex2html/bibtex2html.py
|
MUONetwork/muon.github.io
|
ce40c4efd44a325f0f2e33f565e8b117cb50f7da
|
[
"CC-BY-3.0"
] | null | null | null |
bibtex2html/bibtex2html.py
|
MUONetwork/muon.github.io
|
ce40c4efd44a325f0f2e33f565e8b117cb50f7da
|
[
"CC-BY-3.0"
] | null | null | null |
#! /usr/bin/env python2
"""
Copyright (C) 2009-2015 Gustavo de Oliveira. Licensed under the GPL (see the
license file).
This program reads a BibTeX file and converts it to a list of references in
HTML format.
To use this program you need Python installed on your computer.
To run the program, in a command-line interface enter the command
python bibtex2html.py bibtex.bib template.html output.html
Here, `bibtex.bib` is the BibTeX file that you want to convert, and
`template.html` is any template file containing the following placeholders:
<!--NUMBER_OF_REFERENCES-->
<!--NEWER-->
<!--OLDER-->
<!--DATE-->
<!--LIST_OF_REFERENCES-->
These placeholders will be replaced by the program, and the result will be
written to the file `output.html`.
"""
import sys
from datetime import date
def cleanup_author(s):
"""Clean up and format author names.
cleanup_author(str) -> str
"""
dictionary = {'\\"a': 'ä', '\\"A': 'Ä', '\\"e': 'ë',
'\\"E': 'Ë', '\\"i': 'ï', '\\"I': 'Ï', '\\"o': 'ö',
'\\"O': 'Ö', '\\"u': 'ü', '\\"U': 'Ü', "\\'a": 'á',
"\\'A": 'Á', "\\'e": 'é', "\\'i": 'í',
"\\'I": 'Í', "\\'E": 'É', "\\'o": 'ó',
"\\'O": 'Ó', "\\'u": 'ú', "\\'U": 'Ú',
'\\~n': 'ñ', '\\~N': 'Ñ', '\\~a': 'ã',
'\\~A': 'Ã', '\\~o': 'õ', '\\~O': 'Õ',
'.': ' ', "\\'\\": '', '{': '', '}': '', ' And ': ' and '}
for k, v in dictionary.iteritems():
s = s.replace(k, v)
s = s.strip()
before, sep, after = s.rpartition(' and ')
before = before.replace(' and ', ', ')
s = before + sep + after
return s
def cleanup_title(s):
"""Clean up and format article titles.
cleanup_title(str) -> str
"""
s = s.lower()
s = s.capitalize()
return s
def cleanup_page(s):
"""Clean up the article page string.
cleanup_pages(str) -> str
"""
s = s.replace('--', '-')
return s
# Get the BibTeX, template, and output file names
bibfile = sys.argv[1]
templatefile = sys.argv[2]
outputfile = sys.argv[3]
# Open, read and close the BivTeX and template files
with open(templatefile, 'r') as f:
template = f.read()
with open(bibfile, 'r') as f:
datalist = f.readlines()
# Discard unwanted characteres and commented lines
datalist = [s.strip(' \n\t') for s in datalist]
datalist = [s for s in datalist if s[:2] != '%%']
# Convert a list into a string
data = ''
for s in datalist: data += s
# Split the data at the separators @ and put it in a list
biblist = data.split('@')
# Discard empty strings from the list
biblist = [s for s in biblist if s != '']
# Create a list of lists containing the strings "key = value" of each bibitem
listlist = []
for s in biblist:
type, sep, s = s.partition('{')
id, sep, s = s.partition(',')
s = s.rpartition('}')[0]
keylist = ['type = ' + type.lower(), 'id = ' + id]
number = 0
flag = 0
i = 0
while len(s) > i:
print len(s),i,s
if s[i] == '{':
number += 1
flag = 1
elif s[i] == '}':
number -= 1
if number == 0 and flag == 1:
keylist.append(s[:i+1])
s = s[i+1:]
flag = 0
i = 0
continue
i += 1
keylist = [t.strip(' ,\t\n') for t in keylist]
listlist.append(keylist)
# Create a list of dicts containing key : value of each bibitem
dictlist = []
for l in listlist:
keydict = {}
for s in l:
key, sep, value = s.partition('=')
key = key.strip(' ,\n\t{}')
key = key.lower()
value = value.strip(' ,\n\t{}')
keydict[key] = value
dictlist.append(keydict)
# Backup all the original data
full_dictlist = dictlist
# Keep only articles in the list
dictlist = [d for d in dictlist if d['type'] == 'article' or d['type'] == 'inproceedings']
# keep only articles that have author and title
dictlist = [d for d in dictlist if 'author' in d and 'title' in d]
dictlist = [d for d in dictlist if d['author'] != '' and d['title'] != '']
# Get a list of the article years and the min and max values
years = [int(d['year']) for d in dictlist if 'year' in d]
years.sort()
older = years[0]
newer = years[-1]
###########################################################################
# Set the fields to be exported to html (following this order)
mandatory = ['author', 'title']
optional = ['journal', 'eprint', 'volume', 'pages', 'year', 'url', 'doi']
###########################################################################
# Clean up data
for i in range(len(dictlist)):
dictlist[i]['author'] = cleanup_author(dictlist[i]['author'])
dictlist[i]['title'] = cleanup_title(dictlist[i]['title'])
# Write down the list html code
counter = 0
html = ''
for y in reversed(range(older, newer + 1)):
if y in years:
html += '<h3 id="y{0}">{0}</h3>\n\n\n<ul>\n'.format(y)
for d in dictlist:
if 'year' in d and int(d['year']) == y:
mandata = [d[key] for key in mandatory]
if 'url' in d:
print d
html += '<p>{0}, <a href="{2}" target="_blank"><i>{1}</i></a>'.format(*mandata + [d['url']])
else:
html += '<p>{0}, <i>{1}</i>'.format(*mandata)
for t in optional:
if t in d:
if t == 'journal': html += ', {0}'.format(d[t])
if t == 'eprint': html += ':{0}'.format(d[t])
if t == 'volume': html += ' <b>{0}</b>'.format(d[t])
if t == 'pages':
a = cleanup_page(d[t])
html += ', {0}'.format(a)
if t == 'year': html += ', {0}'.format(d[t])
if t == 'doi':
html += ' <a href="{0}" target="_blank">[doi]</a>'.format(d[t])
html += '</p>\n'
counter += 1
html += '</ul>\n'
# Fill up the empty fields in the template
a, mark, b = template.partition('<!--LIST_OF_REFERENCES-->')
a = a.replace('<!--NUMBER_OF_REFERENCES-->', str(counter), 1)
a = a.replace('<!--NEWER-->', str(newer), 1)
a = a.replace('<!--OLDER-->', str(older), 1)
now = date.today()
a = a.replace('<!--DATE-->', date.today().strftime('%d %b %Y'))
# Join the header, list and footer html code
final = a + html + b
# Write the final result to the output file
with open(outputfile, 'w') as f:
f.write(final)
| 27.479508
| 112
| 0.515138
|
53b2185ef515881dde9728584904aa0e753b2040
| 1,082
|
py
|
Python
|
701-800/761-770/763-partitionLabels/partitionLabels.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
701-800/761-770/763-partitionLabels/partitionLabels.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
701-800/761-770/763-partitionLabels/partitionLabels.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
class Solution(object):
def partitionLabels(self, S):
"""
:type S: str
:rtype: List[int]
"""
dictionary = {}
index = -1
counter = []
for char in S:
if char not in dictionary:
index += 1
dictionary[char] = index
counter.append(0)
elif dictionary[char] != index:
for key in dictionary:
dictionary[key] = min(dictionary[key], dictionary[char])
for _ in range(index-dictionary[char]):
counter[dictionary[char]] += counter.pop()
index = dictionary[char]
counter[index] += 1
return counter
def partition_labels(self, S):
rightmost = {c:i for i, c in enumerate(S)}
left, right = 0, 0
result = []
for i, letter in enumerate(S):
right = max(right,rightmost[letter])
if i == right:
result += [right-left + 1]
left = i+1
return result
| 25.162791
| 76
| 0.467652
|
34998dafee2d5b0abab5b24a8a1a045fcb034d90
| 3,173
|
py
|
Python
|
mt/base/aio/procedure.py
|
inteplus/mtbase
|
b211f25110f95be8b78be3e44feb1c16789c13b8
|
[
"MIT"
] | null | null | null |
mt/base/aio/procedure.py
|
inteplus/mtbase
|
b211f25110f95be8b78be3e44feb1c16789c13b8
|
[
"MIT"
] | null | null | null |
mt/base/aio/procedure.py
|
inteplus/mtbase
|
b211f25110f95be8b78be3e44feb1c16789c13b8
|
[
"MIT"
] | null | null | null |
'''Asynchronous procedure.
An asynchronous procedure, a.k.a. an aproc, is a procedure that is asynchronous and has been
wrapped into an :class:`asyncio.Future`. A procedure is a function that returns None.
'''
import asyncio
__all__ = ['AprocManager']
class AprocManager:
'''Manages the completion of aprocs.
With this manager, the user can just send an aproc to it and forget. To ensure all aprocs
are completed, please invoke the cleanup function. Otherwise, some aprocs may never get
awaited when the manager dies.
Parameters
----------
max_concurrency : int
maximum number of concurrent aprocs that can be held pending
handle_exception : {'raise', 'silent', 'warn'}
policy for handling an exception raised by an aproc. If 'raise', re-raise the caught
exception. If 'silent', ignore the exception. If 'warn', use the provided logger to
warn the user.
logger : logging.Logger or equivalent
logger for warning purposes
'''
def __init__(self, max_concurrency: int = 1024, handle_exception: str = 'raise', logger=None):
self.max_concurrency = max_concurrency
self.aproc_set = set()
self.handle_exception = handle_exception
self.logger = logger
if handle_exception == 'warn' and logger is None:
raise ValueError("A logger must be provided if keyword 'handle_exception' is set to 'warn'.")
async def _sleep_well(self, max_concurrency=None):
max_concurrency = self.max_concurrency if max_concurrency is None else 1
while len(self.aproc_set) >= max_concurrency:
done_set, completed_set = await asyncio.wait(self.aproc_set, return_when=asyncio.FIRST_COMPLETED)
for task in done_set:
if task.cancelled():
if self.handle_exception == 'raise':
raise asyncio.CancelledError("An aproc has been cancelled.")
if self.handle_exception == 'warn':
self.logger.warn("An aproc has been cancelled: {}.".format(task))
elif task.exception() is not None:
if self.handle_exception == 'raise':
raise task.exception()
if self.handle_exception == 'warn':
self.logger.warn("An exception has been caught (and ignored) in an aproc.")
self.logger.warn(str(task.exception()))
self.aproc_set = completed_set
async def send(self, aproc: asyncio.Future):
'''Sends an aproc to the manager so the user can forget about it.
The function usually returns immediately. However, if the maximum number of concurrent
aprocs has been exceeded. It will await.
Parameters
----------
aproc : asyncio.Future
a future (returned via :func:`asyncio.create_task` or :func:`asyncio.ensure_future`)
that is a procedure
'''
await self._sleep_well()
self.aproc_set.add(aproc)
async def cleanup(self):
'''Awaits until all aprocs are done.'''
await self._sleep_well(1)
| 39.6625
| 109
| 0.6341
|
4aa608f4c4a712aa20c339c1e574011ead0e687f
| 118,829
|
py
|
Python
|
python/ccxt/ascendex.py
|
ChristianCoenen/ccxt
|
261e3549b4cfe9fa4ecf1a00feb0450337eab686
|
[
"MIT"
] | null | null | null |
python/ccxt/ascendex.py
|
ChristianCoenen/ccxt
|
261e3549b4cfe9fa4ecf1a00feb0450337eab686
|
[
"MIT"
] | null | null | null |
python/ccxt/ascendex.py
|
ChristianCoenen/ccxt
|
261e3549b4cfe9fa4ecf1a00feb0450337eab686
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ascendex(Exchange):
def describe(self):
return self.deep_extend(super(ascendex, self).describe(), {
'id': 'ascendex',
'name': 'AscendEX',
'countries': ['SG'], # Singapore
# 8 requests per minute = 0.13333 per second => rateLimit = 750
# testing 400 works
'rateLimit': 400,
'certified': True,
'pro': True,
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': False,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDepositAddresses': False,
'fetchDepositAddressesByNetwork': False,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': 'emulated',
'fetchFundingRateHistory': False,
'fetchFundingRates': True,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchPosition': False,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransactionFee': False,
'fetchTransactionFees': False,
'fetchTransactions': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawal': False,
'fetchWithdrawals': True,
'reduceMargin': True,
'setLeverage': True,
'setMarginMode': True,
'setPositionMode': False,
'transfer': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1d',
'1w': '1w',
'1M': '1m',
},
'version': 'v2',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/112027508-47984600-8b48-11eb-9e17-d26459cc36c6.jpg',
'api': {
'rest': 'https://ascendex.com',
},
'test': {
'rest': 'https://api-test.ascendex-sandbox.com',
},
'www': 'https://ascendex.com',
'doc': [
'https://ascendex.github.io/ascendex-pro-api/#ascendex-pro-api-documentation',
],
'fees': 'https://ascendex.com/en/feerate/transactionfee-traderate',
'referral': {
'url': 'https://ascendex.com/en-us/register?inviteCode=EL6BXBQM',
'discount': 0.25,
},
},
'api': {
'v1': {
'public': {
'get': {
'assets': 1,
'products': 1,
'ticker': 1,
'barhist/info': 1,
'barhist': 1,
'depth': 1,
'trades': 1,
'cash/assets': 1, # not documented
'cash/products': 1, # not documented
'margin/assets': 1, # not documented
'margin/products': 1, # not documented
'futures/collateral': 1,
'futures/contracts': 1,
'futures/ref-px': 1,
'futures/market-data': 1,
'futures/funding-rates': 1,
'risk-limit-info': 1,
},
},
'private': {
'get': {
'info': 1,
'wallet/transactions': 1,
'wallet/deposit/address': 1, # not documented
'data/balance/snapshot': 1,
'data/balance/history': 1,
},
'accountCategory': {
'get': {
'balance': 1,
'order/open': 1,
'order/status': 1,
'order/hist/current': 1,
'risk': 1,
},
'post': {
'order': 1,
'order/batch': 1,
},
'delete': {
'order': 1,
'order/all': 1,
'order/batch': 1,
},
},
'accountGroup': {
'get': {
'cash/balance': 1,
'margin/balance': 1,
'margin/risk': 1,
'futures/collateral-balance': 1,
'futures/position': 1,
'futures/risk': 1,
'futures/funding-payments': 1,
'order/hist': 1,
'spot/fee': 1,
},
'post': {
'transfer': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
},
},
},
},
'v2': {
'public': {
'get': {
'assets': 1,
'futures/contract': 1,
'futures/collateral': 1,
'futures/pricing-data': 1,
},
},
'private': {
'get': {
'account/info': 1,
},
'accountGroup': {
'get': {
'order/hist': 1,
'futures/position': 1,
'futures/free-margin': 1,
'futures/order/hist/current': 1,
'futures/order/open': 1,
'futures/order/status': 1,
},
'post': {
'futures/isolated-position-margin': 1,
'futures/margin-type': 1,
'futures/leverage': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/open': 1,
'subuser/subuser-transfer': 1,
'subuser/subuser-transfer-hist': 1,
},
'delete': {
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/all': 1,
},
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
},
'precisionMode': TICK_SIZE,
'options': {
'account-category': 'cash', # 'cash', 'margin', 'futures' # obsolete
'account-group': None,
'fetchClosedOrders': {
'method': 'v1PrivateAccountGroupGetOrderHist', # 'v1PrivateAccountGroupGetAccountCategoryOrderHistCurrent'
},
'defaultType': 'spot', # 'spot', 'margin', 'swap'
'accountsByType': {
'spot': 'cash',
'swap': 'futures',
'future': 'futures',
'margin': 'margin',
},
'transfer': {
'fillResponseFromRequest': True,
},
},
'exceptions': {
'exact': {
# not documented
'1900': BadRequest, # {"code":1900,"message":"Invalid Http Request Input"}
'2100': AuthenticationError, # {"code":2100,"message":"ApiKeyFailure"}
'5002': BadSymbol, # {"code":5002,"message":"Invalid Symbol"}
'6001': BadSymbol, # {"code":6001,"message":"Trading is disabled on symbol."}
'6010': InsufficientFunds, # {'code': 6010, 'message': 'Not enough balance.'}
'60060': InvalidOrder, # {'code': 60060, 'message': 'The order is already filled or canceled.'}
'600503': InvalidOrder, # {"code":600503,"message":"Notional is too small."}
# documented
'100001': BadRequest, # INVALID_HTTP_INPUT Http request is invalid
'100002': BadRequest, # DATA_NOT_AVAILABLE Some required data is missing
'100003': BadRequest, # KEY_CONFLICT The same key exists already
'100004': BadRequest, # INVALID_REQUEST_DATA The HTTP request contains invalid field or argument
'100005': BadRequest, # INVALID_WS_REQUEST_DATA Websocket request contains invalid field or argument
'100006': BadRequest, # INVALID_ARGUMENT The arugment is invalid
'100007': BadRequest, # ENCRYPTION_ERROR Something wrong with data encryption
'100008': BadSymbol, # SYMBOL_ERROR Symbol does not exist or not valid for the request
'100009': AuthenticationError, # AUTHORIZATION_NEEDED Authorization is require for the API access or request
'100010': BadRequest, # INVALID_OPERATION The action is invalid or not allowed for the account
'100011': BadRequest, # INVALID_TIMESTAMP Not a valid timestamp
'100012': BadRequest, # INVALID_STR_FORMAT String format does not
'100013': BadRequest, # INVALID_NUM_FORMAT Invalid number input
'100101': ExchangeError, # UNKNOWN_ERROR Some unknown error
'150001': BadRequest, # INVALID_JSON_FORMAT Require a valid json object
'200001': AuthenticationError, # AUTHENTICATION_FAILED Authorization failed
'200002': ExchangeError, # TOO_MANY_ATTEMPTS Tried and failed too many times
'200003': ExchangeError, # ACCOUNT_NOT_FOUND Account not exist
'200004': ExchangeError, # ACCOUNT_NOT_SETUP Account not setup properly
'200005': ExchangeError, # ACCOUNT_ALREADY_EXIST Account already exist
'200006': ExchangeError, # ACCOUNT_ERROR Some error related with error
'200007': ExchangeError, # CODE_NOT_FOUND
'200008': ExchangeError, # CODE_EXPIRED Code expired
'200009': ExchangeError, # CODE_MISMATCH Code does not match
'200010': AuthenticationError, # PASSWORD_ERROR Wrong assword
'200011': ExchangeError, # CODE_GEN_FAILED Do not generate required code promptly
'200012': ExchangeError, # FAKE_COKE_VERIFY
'200013': ExchangeError, # SECURITY_ALERT Provide security alert message
'200014': PermissionDenied, # RESTRICTED_ACCOUNT Account is restricted for certain activity, such as trading, or withdraw.
'200015': PermissionDenied, # PERMISSION_DENIED No enough permission for the operation
'300001': InvalidOrder, # INVALID_PRICE Order price is invalid
'300002': InvalidOrder, # INVALID_QTY Order size is invalid
'300003': InvalidOrder, # INVALID_SIDE Order side is invalid
'300004': InvalidOrder, # INVALID_NOTIONAL Notional is too small or too large
'300005': InvalidOrder, # INVALID_TYPE Order typs is invalid
'300006': InvalidOrder, # INVALID_ORDER_ID Order id is invalid
'300007': InvalidOrder, # INVALID_TIME_IN_FORCE Time In Force in order request is invalid
'300008': InvalidOrder, # INVALID_ORDER_PARAMETER Some order parameter is invalid
'300009': InvalidOrder, # TRADING_VIOLATION Trading violation on account or asset
'300011': InsufficientFunds, # INVALID_BALANCE No enough account or asset balance for the trading
'300012': BadSymbol, # INVALID_PRODUCT Not a valid product supported by exchange
'300013': InvalidOrder, # INVALID_BATCH_ORDER Some or all orders are invalid in batch order request
'300014': InvalidOrder, # {"code":300014,"message":"Order price doesn't conform to the required tick size: 0.1","reason":"TICK_SIZE_VIOLATION"}
'300020': InvalidOrder, # TRADING_RESTRICTED There is some trading restriction on account or asset
'300021': InvalidOrder, # TRADING_DISABLED Trading is disabled on account or asset
'300031': InvalidOrder, # NO_MARKET_PRICE No market price for market type order trading
'310001': InsufficientFunds, # INVALID_MARGIN_BALANCE No enough margin balance
'310002': InvalidOrder, # INVALID_MARGIN_ACCOUNT Not a valid account for margin trading
'310003': InvalidOrder, # MARGIN_TOO_RISKY Leverage is too high
'310004': BadSymbol, # INVALID_MARGIN_ASSET This asset does not support margin trading
'310005': InvalidOrder, # INVALID_REFERENCE_PRICE There is no valid reference price
'510001': ExchangeError, # SERVER_ERROR Something wrong with server.
'900001': ExchangeError, # HUMAN_CHALLENGE Human change do not pass
},
'broad': {},
},
'commonCurrencies': {
'BOND': 'BONDED',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'PLN': 'Pollen',
},
})
def get_account(self, params={}):
# get current or provided bitmax sub-account
account = self.safe_value(params, 'account', self.options['account'])
return account.lower().capitalize()
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: an associative dictionary of currencies
"""
assets = self.v1PublicGetAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode" : "LTCBULL",
# "assetName" : "3X Long LTC Token",
# "precisionScale" : 9,
# "nativeScale" : 4,
# "withdrawalFee" : "0.2",
# "minWithdrawalAmt" : "1.0",
# "status" : "Normal"
# },
# ]
# }
#
margin = self.v1PublicGetMarginAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"BTT",
# "borrowAssetCode":"BTT-B",
# "interestAssetCode":"BTT-I",
# "nativeScale":0,
# "numConfirmations":1,
# "withdrawFee":"100.0",
# "minWithdrawalAmt":"1000.0",
# "statusCode":"Normal",
# "statusMessage":"",
# "interestRate":"0.001"
# }
# ]
# }
#
cash = self.v1PublicGetCashAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"LTCBULL",
# "nativeScale":4,
# "numConfirmations":20,
# "withdrawFee":"0.2",
# "minWithdrawalAmt":"1.0",
# "statusCode":"Normal",
# "statusMessage":""
# }
# ]
# }
#
assetsData = self.safe_value(assets, 'data', [])
marginData = self.safe_value(margin, 'data', [])
cashData = self.safe_value(cash, 'data', [])
assetsById = self.index_by(assetsData, 'assetCode')
marginById = self.index_by(marginData, 'assetCode')
cashById = self.index_by(cashData, 'assetCode')
dataById = self.deep_extend(assetsById, marginById, cashById)
ids = list(dataById.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = dataById[id]
code = self.safe_currency_code(id)
scale = self.safe_string_2(currency, 'precisionScale', 'nativeScale')
precision = self.parse_number(self.parse_precision(scale))
# why would the exchange API have different names for the same field
fee = self.safe_number_2(currency, 'withdrawFee', 'withdrawalFee')
status = self.safe_string_2(currency, 'status', 'statusCode')
active = (status == 'Normal')
margin = ('borrowAssetCode' in currency)
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'margin': margin,
'name': self.safe_string(currency, 'assetName'),
'active': active,
'deposit': None,
'withdraw': None,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': precision,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'minWithdrawalAmt'),
'max': None,
},
},
}
return result
def fetch_markets(self, params={}):
"""
retrieves data on all markets for ascendex
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
products = self.v1PublicGetProducts(params)
#
# {
# "code":0,
# "data":[
# {
# "symbol":"LBA/BTC",
# "baseAsset":"LBA",
# "quoteAsset":"BTC",
# "status":"Normal",
# "minNotional":"0.000625",
# "maxNotional":"6.25",
# "marginTradable":false,
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "tickSize":"0.000000001",
# "lotSize":"1"
# },
# ]
# }
#
cash = self.v1PublicGetCashProducts(params)
#
# {
# "code":0,
# "data":[
# {
# "symbol":"QTUM/BTC",
# "domain":"BTC",
# "tradingStartTime":1569506400000,
# "collapseDecimals":"0.0001,0.000001,0.00000001",
# "minQty":"0.000000001",
# "maxQty":"1000000000",
# "minNotional":"0.000625",
# "maxNotional":"12.5",
# "statusCode":"Normal",
# "statusMessage":"",
# "tickSize":"0.00000001",
# "useTick":false,
# "lotSize":"0.1",
# "useLot":false,
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "qtyScale":1,
# "priceScale":8,
# "notionalScale":4
# }
# ]
# }
#
perpetuals = self.v2PublicGetFuturesContract(params)
#
# {
# "code":0,
# "data":[
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
# ]
# }
#
productsData = self.safe_value(products, 'data', [])
productsById = self.index_by(productsData, 'symbol')
cashData = self.safe_value(cash, 'data', [])
perpetualsData = self.safe_value(perpetuals, 'data', [])
cashAndPerpetualsData = self.array_concat(cashData, perpetualsData)
cashAndPerpetualsById = self.index_by(cashAndPerpetualsData, 'symbol')
dataById = self.deep_extend(productsById, cashAndPerpetualsById)
ids = list(dataById.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = dataById[id]
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
settleId = self.safe_value(market, 'settlementAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
status = self.safe_string(market, 'status')
active = False
if (status == 'Normal') or (status == 'InternalTrading'):
active = True
spot = settle is None
swap = not spot
linear = True if swap else None
minQty = self.safe_number(market, 'minQty')
maxQty = self.safe_number(market, 'maxQty')
minPrice = self.safe_number(market, 'tickSize')
maxPrice = None
symbol = base + '/' + quote
if swap:
lotSizeFilter = self.safe_value(market, 'lotSizeFilter')
minQty = self.safe_number(lotSizeFilter, 'minQty')
maxQty = self.safe_number(lotSizeFilter, 'maxQty')
priceFilter = self.safe_value(market, 'priceFilter')
minPrice = self.safe_number(priceFilter, 'minPrice')
maxPrice = self.safe_number(priceFilter, 'maxPrice')
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('/')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote + ':' + settle
fee = self.safe_number(market, 'commissionReserveRate')
marginTradable = self.safe_value(market, 'marginTradable', False)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': 'swap' if swap else 'spot',
'spot': spot,
'margin': marginTradable if spot else None,
'swap': swap,
'future': False,
'option': False,
'active': active,
'contract': swap,
'linear': linear,
'inverse': not linear if swap else None,
'taker': fee,
'maker': fee,
'contractSize': self.parse_number('1') if swap else None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': minQty,
'max': maxQty,
},
'price': {
'min': minPrice,
'max': maxPrice,
},
'cost': {
'min': self.safe_number(market, 'minNotional'),
'max': self.safe_number(market, 'maxNotional'),
},
},
'info': market,
})
return result
def fetch_accounts(self, params={}):
accountGroup = self.safe_string(self.options, 'account-group')
response = None
if accountGroup is None:
response = self.v1PrivateGetInfo(params)
#
# {
# "code":0,
# "data":{
# "email":"igor.kroitor@gmail.com",
# "accountGroup":8,
# "viewPermission":true,
# "tradePermission":true,
# "transferPermission":true,
# "cashAccount":["cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda"],
# "marginAccount":["martXoh1v1N3EMQC5FDtSj5VHso8aI2Z"],
# "futuresAccount":["futc9r7UmFJAyBY2rE3beA2JFxav2XFF"],
# "userUID":"U6491137460"
# }
# }
#
data = self.safe_value(response, 'data', {})
accountGroup = self.safe_string(data, 'accountGroup')
self.options['account-group'] = accountGroup
return [
{
'id': accountGroup,
'type': None,
'currency': None,
'info': response,
},
]
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['total'] = self.safe_string(balance, 'totalBalance')
result[code] = account
return self.safe_balance(result)
def parse_swap_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
data = self.safe_value(response, 'data', {})
collaterals = self.safe_value(data, 'collaterals', [])
for i in range(0, len(collaterals)):
balance = collaterals[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
self.load_accounts()
marketType, query = self.handle_market_type_and_params('fetchBalance', None, params)
options = self.safe_value(self.options, 'fetchBalance', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetBalance')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesPosition',
})
if accountCategory == 'cash':
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# cash
#
# {
# 'code': 0,
# 'data': [
# {
# 'asset': 'BCHSV',
# 'totalBalance': '64.298000048',
# 'availableBalance': '64.298000048',
# },
# ]
# }
#
# margin
#
# {
# 'code': 0,
# 'data': [
# {
# 'asset': 'BCHSV',
# 'totalBalance': '64.298000048',
# 'availableBalance': '64.298000048',
# 'borrowed': '0',
# 'interest': '0',
# },
# ]
# }
#
# swap
#
# {
# "code": 0,
# "data": {
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "collaterals": [
# {"asset":"ADA","balance":"0.355803","referencePrice":"1.05095","discountFactor":"0.9"},
# {"asset":"USDT","balance":"0.000014519","referencePrice":"1","discountFactor":"1"}
# ],
# }j
# }
#
if marketType == 'swap':
return self.parse_swap_balance(response)
else:
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v1PublicGetDepth(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"depth-snapshot",
# "symbol":"BTC-PERP",
# "data":{
# "ts":1590223998202,
# "seqnum":115444921,
# "asks":[
# ["9207.5","18.2383"],
# ["9207.75","18.8235"],
# ["9208","10.7873"],
# ],
# "bids":[
# ["9207.25","0.4009"],
# ["9207","0.003"],
# ["9206.5","0.003"],
# ]
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
orderbook = self.safe_value(data, 'data', {})
timestamp = self.safe_integer(orderbook, 'ts')
result = self.parse_order_book(orderbook, symbol, timestamp)
result['nonce'] = self.safe_integer(orderbook, 'seqnum')
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"QTUM/BTC",
# "open":"0.00016537",
# "close":"0.00019077",
# "high":"0.000192",
# "low":"0.00016537",
# "volume":"846.6",
# "ask":["0.00018698","26.2"],
# "bid":["0.00018408","503.7"],
# "type":"spot"
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
type = self.safe_string(ticker, 'type')
delimiter = '/' if (type == 'spot') else None
symbol = self.safe_symbol(marketId, market, delimiter)
close = self.safe_string(ticker, 'close')
bid = self.safe_value(ticker, 'bid', [])
ask = self.safe_value(ticker, 'ask', [])
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': None,
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(bid, 0),
'bidVolume': self.safe_string(bid, 1),
'ask': self.safe_string(ask, 0),
'askVolume': self.safe_string(ask, 1),
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}, market)
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v1PublicGetTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "symbol":"BTC-PERP", # or "BTC/USDT"
# "open":"9073",
# "close":"9185.75",
# "high":"9185.75",
# "low":"9185.75",
# "volume":"576.8334",
# "ask":["9185.75","15.5863"],
# "bid":["9185.5","0.003"],
# "type":"derivatives", # or "spot"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
request = {}
if symbols is not None:
marketIds = self.market_ids(symbols)
request['symbol'] = ','.join(marketIds)
response = self.v1PublicGetTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "symbol":"QTUM/BTC",
# "open":"0.00016537",
# "close":"0.00019077",
# "high":"0.000192",
# "low":"0.00016537",
# "volume":"846.6",
# "ask":["0.00018698","26.2"],
# "bid":["0.00018408","503.7"],
# "type":"spot"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_tickers(data, symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
#
data = self.safe_value(ohlcv, 'data', {})
return [
self.safe_integer(data, 'ts'),
self.safe_number(data, 'o'),
self.safe_number(data, 'h'),
self.safe_number(data, 'l'),
self.safe_number(data, 'c'),
self.safe_number(data, 'v'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the ascendex api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
# if since and limit are not specified
# the exchange will return just 1 last candle by default
duration = self.parse_timeframe(timeframe)
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultLimit = self.safe_integer(options, 'limit', 500)
if since is not None:
request['from'] = since
if limit is None:
limit = defaultLimit
else:
limit = min(limit, defaultLimit)
request['to'] = self.sum(since, limit * duration * 1000, 1)
elif limit is not None:
request['n'] = limit # max 500
response = self.v1PublicGetBarhist(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "p":"9128.5", # price
# "q":"0.0030", # quantity
# "ts":1590229002385, # timestamp
# "bm":false, # if True, the buyer is the market maker, we only use self field to "define the side" of a public trade
# "seqnum":180143985289898554
# }
#
timestamp = self.safe_integer(trade, 'ts')
priceString = self.safe_string_2(trade, 'price', 'p')
amountString = self.safe_string(trade, 'q')
buyerIsMaker = self.safe_value(trade, 'bm', False)
makerOrTaker = 'maker' if buyerIsMaker else 'taker'
side = 'buy' if buyerIsMaker else 'sell'
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': None,
'order': None,
'type': None,
'takerOrMaker': makerOrTaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the ascendex api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['n'] = limit # max 100
response = self.v1PublicGetTrades(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"trades",
# "symbol":"BTC-PERP",
# "data":[
# {"p":"9128.5","q":"0.0030","ts":1590229002385,"bm":false,"seqnum":180143985289898554},
# {"p":"9129","q":"0.0030","ts":1590229002642,"bm":false,"seqnum":180143985289898587},
# {"p":"9129.5","q":"0.0030","ts":1590229021306,"bm":false,"seqnum":180143985289899043}
# ]
# }
# }
#
records = self.safe_value(response, 'data', [])
trades = self.safe_value(records, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'PendingNew': 'open',
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Canceled': 'canceled',
'Rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "16e607e2b83a8bXHbAwwoqDo55c166fa",
# "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830",
# "orderType": "Market",
# "symbol": "BTC/USDT",
# "timestamp": 1573576916201
# }
#
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640819389454,
# "orderId": "a17e0874ecbdU0711043490bbtcpDU5X",
# "seqNum": -1,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.002",
# "stopPrice": "0",
# "stopBy": "ref-px",
# "status": "Ack",
# "lastExecTime": 1640819389454,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL"
# }
#
# {
# "ac": "FUTURES",
# "accountId": "testabcdefg",
# "avgPx": "0",
# "cumFee": "0",
# "cumQty": "0",
# "errorCode": "NULL_VAL",
# "execInst": "NULL_VAL",
# "feeAsset": "USDT",
# "lastExecTime": 1584072844085,
# "orderId": "r170d21956dd5450276356bbtcpKa74",
# "orderQty": "1.1499",
# "orderType": "Limit",
# "price": "4000",
# "sendingTime": 1584072841033,
# "seqNum": 24105338,
# "side": "Buy",
# "status": "Canceled",
# "stopPrice": "",
# "symbol": "BTC-PERP"
# },
#
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '/')
timestamp = self.safe_integer_2(order, 'timestamp', 'sendingTime')
lastTradeTimestamp = self.safe_integer(order, 'lastExecTime')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
average = self.safe_string(order, 'avgPx')
filled = self.safe_string_2(order, 'cumFilledQty', 'cumQty')
id = self.safe_string(order, 'orderId')
clientOrderId = self.safe_string(order, 'id')
if clientOrderId is not None:
if len(clientOrderId) < 1:
clientOrderId = None
type = self.safe_string_lower(order, 'orderType')
side = self.safe_string_lower(order, 'side')
feeCost = self.safe_number(order, 'cumFee')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
stopPrice = self.safe_number(order, 'stopPrice')
reduceOnly = None
execInst = self.safe_string(order, 'execInst')
if execInst == 'reduceOnly':
reduceOnly = True
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'reduceOnly': reduceOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v1PrivateAccountGroupGetSpotFee(self.extend(request, params))
#
# {
# code: '0',
# data: {
# domain: 'spot',
# userUID: 'U1479576458',
# vipLevel: '0',
# fees: [
# {symbol: 'HT/USDT', fee: {taker: '0.001', maker: '0.001'}},
# {symbol: 'LAMB/BTC', fee: {taker: '0.002', maker: '0.002'}},
# {symbol: 'STOS/USDT', fee: {taker: '0.002', maker: '0.002'}},
# ...
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
fees = self.safe_value(data, 'fees', [])
result = {}
for i in range(0, len(fees)):
fee = fees[i]
marketId = self.safe_string(fee, 'symbol')
symbol = self.safe_symbol(marketId, None, '/')
takerMaker = self.safe_value(fee, 'fee', {})
result[symbol] = {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(takerMaker, 'maker'),
'taker': self.safe_number(takerMaker, 'taker'),
}
return result
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = self.market(symbol)
marketType = None
marketType, params = self.handle_market_type_and_params('createOrder', market, params)
options = self.safe_value(self.options, 'createOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'orderQty': self.amount_to_precision(symbol, amount),
'orderType': type, # "limit", "market", "stop_market", "stop_limit"
'side': side, # "buy" or "sell"
# 'orderPrice': self.price_to_precision(symbol, price),
# 'stopPrice': self.price_to_precision(symbol, stopPrice), # required for stop orders
# 'postOnly': 'false', # 'false', 'true'
# 'timeInForce': 'GTC', # GTC, IOC, FOK
# 'respInst': 'ACK', # ACK, 'ACCEPT, DONE
# 'posStopLossPrice': position stop loss price( v2 swap orders only)
# 'posTakeProfitPrice': position take profit price(v2 swap orders only)
}
reduceOnly = self.safe_value(params, 'reduceOnly')
if reduceOnly is not None:
if (marketType != 'swap'):
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for perpetuals only')
if reduceOnly is True:
request['execInst'] = 'reduceOnly'
if clientOrderId is not None:
request['id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'id'])
if (type == 'limit') or (type == 'stop_limit'):
request['orderPrice'] = self.price_to_precision(symbol, price)
if (type == 'stop_limit') or (type == 'stop_market'):
stopPrice = self.safe_number(params, 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder() requires a stopPrice parameter for ' + type + ' orders')
else:
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
params = self.omit(params, 'stopPrice')
timeInForce = self.safe_string(params, 'timeInForce')
postOnly = self.safe_value(params, 'postOnly', False)
if (timeInForce == 'PO') or (postOnly):
request['postOnly'] = True
params = self.omit(params, ['postOnly', 'timeInForce'])
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryPostOrder')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupPostFuturesOrder',
})
if method == 'v1PrivateAccountCategoryPostOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, params))
#
# AccountCategoryPostOrder
#
# {
# "code": 0,
# "data": {
# "ac": "MARGIN",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "action": "place-order",
# "info": {
# "id": "16e607e2b83a8bXHbAwwoqDo55c166fa",
# "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830",
# "orderType": "Market",
# "symbol": "BTC/USDT",
# "timestamp": 1573576916201
# },
# "status": "Ack"
# }
# }
#
# AccountGroupPostFuturesOrder
#
# {
# "code": 0,
# "data": {
# "meta": {
# "id": "",
# "action": "place-order",
# "respInst": "ACK"
# },
# "order": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640819389454,
# "orderId": "a17e0874ecbdU0711043490bbtcpDU5X",
# "seqNum": -1,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.002",
# "stopPrice": "0",
# "stopBy": "ref-px",
# "status": "Ack",
# "lastExecTime": 1640819389454,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value_2(data, 'order', 'info', {})
return self.parse_order(order, market)
def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('fetchOrder', market, params)
options = self.safe_value(self.options, 'fetchOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'orderId': id,
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderStatus')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderStatus',
})
if method == 'v1PrivateAccountCategoryGetOrderStatus':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryGetOrderStatus
#
# {
# "code": 0,
# "accountCategory": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "data": [
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL"
# }
# ]
# }
#
# AccountGroupGetFuturesOrderStatus
#
# {
# "code": 0,
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "data": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640247020217,
# "orderId": "r17de65747aeU0711043490bbtcp0cmt",
# "seqNum": 28796162908,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640247020232,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
type, query = self.handle_market_type_and_params('fetchOpenOrders', market, params)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
}
options = self.safe_value(self.options, 'fetchOpenOrders', {})
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderOpen')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderOpen',
})
if method == 'v1PrivateAccountCategoryGetOrderOpen':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryGetOrderOpen
#
# {
# "ac": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "code": 0,
# "data": [
# {
# "avgPx": "0", # Average filled price of the order
# "cumFee": "0", # cumulative fee paid for self order
# "cumFilledQty": "0", # cumulative filled quantity
# "errorCode": "", # error code; could be empty
# "feeAsset": "USDT", # fee asset
# "lastExecTime": 1576019723550, # The last execution time of the order
# "orderId": "s16ef21882ea0866943712034f36d83", # server provided orderId
# "orderQty": "0.0083", # order quantity
# "orderType": "Limit", # order type
# "price": "7105", # order price
# "seqNum": 8193258, # sequence number
# "side": "Buy", # order side
# "status": "New", # order status on matching engine
# "stopPrice": "", # only available for stop market and stop limit orders; otherwise empty
# "symbol": "BTC/USDT",
# "execInst": "NULL_VAL" # execution instruction
# },
# ]
# }
#
# AccountGroupGetFuturesOrderOpen
#
# {
# "code": 0,
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640247020217,
# "orderId": "r17de65747aeU0711043490bbtcp0cmt",
# "seqNum": 28796162908,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640247020232,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
if accountCategory == 'futures':
return self.parse_orders(data, market, since, limit)
# a workaround for https://github.com/ccxt/ccxt/issues/7187
orders = []
for i in range(0, len(data)):
order = self.parse_order(data[i], market)
orders.append(order)
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
# 'category': accountCategory,
# 'symbol': market['id'],
# 'orderType': 'market', # optional, string
# 'side': 'buy', # or 'sell', optional, case insensitive.
# 'status': 'Filled', # "Filled", "Canceled", or "Rejected"
# 'startTime': exchange.milliseconds(),
# 'endTime': exchange.milliseconds(),
# 'page': 1,
# 'pageSize': 100,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type, query = self.handle_market_type_and_params('fetchClosedOrders', market, params)
options = self.safe_value(self.options, 'fetchClosedOrders', {})
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountGroupGetOrderHist')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderHistCurrent',
})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
if method == 'v1PrivateAccountGroupGetOrderHist':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
if since is not None:
request['startTime'] = since
if limit is not None:
request['pageSize'] = limit
response = getattr(self, method)(self.extend(request, query))
#
# accountCategoryGetOrderHistCurrent
#
# {
# "code":0,
# "accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda",
# "ac":"CASH",
# "data":[
# {
# "seqNum":15561826728,
# "orderId":"a17294d305c0U6491137460bethu7kw9",
# "symbol":"ETH/USDT",
# "orderType":"Limit",
# "lastExecTime":1591635618200,
# "price":"200",
# "orderQty":"0.1",
# "side":"Buy",
# "status":"Canceled",
# "avgPx":"0",
# "cumFilledQty":"0",
# "stopPrice":"",
# "errorCode":"",
# "cumFee":"0",
# "feeAsset":"USDT",
# "execInst":"NULL_VAL"
# }
# ]
# }
#
# accountGroupGetOrderHist
#
# {
# "code": 0,
# "data": {
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "testabcdefg",
# "avgPx": "0",
# "cumFee": "0",
# "cumQty": "0",
# "errorCode": "NULL_VAL",
# "execInst": "NULL_VAL",
# "feeAsset": "USDT",
# "lastExecTime": 1584072844085,
# "orderId": "r170d21956dd5450276356bbtcpKa74",
# "orderQty": "1.1499",
# "orderType": "Limit",
# "price": "4000",
# "sendingTime": 1584072841033,
# "seqNum": 24105338,
# "side": "Buy",
# "status": "Canceled",
# "stopPrice": "",
# "symbol": "BTC-PERP"
# },
# ],
# "hasNext": False,
# "limit": 500,
# "page": 1,
# "pageSize": 20
# }
# }
#
# accountGroupGetFuturesOrderHistCurrent
#
# {
# "code": 0,
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640245777002,
# "orderId": "r17de6444fa6U0711043490bbtcpJ2lI",
# "seqNum": 28796124902,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "Canceled",
# "lastExecTime": 1640246574886,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
isArray = isinstance(data, list)
if not isArray:
data = self.safe_value(data, 'data', [])
return self.parse_orders(data, market, since, limit)
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
type, query = self.handle_market_type_and_params('cancelOrder', market, params)
options = self.safe_value(self.options, 'cancelOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'id': 'foobar',
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrder')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupDeleteFuturesOrder',
})
if method == 'v1PrivateAccountCategoryDeleteOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
if clientOrderId is None:
request['orderId'] = id
else:
request['id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'id'])
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryDeleteOrder
#
# {
# "code": 0,
# "data": {
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "ac": "CASH",
# "action": "cancel-order",
# "status": "Ack",
# "info": {
# "id": "wv8QGquoeamhssvQBeHOHGQCGlcBjj23",
# "orderId": "16e6198afb4s8bXHbAwwoqDo2ebc19dc",
# "orderType": "", # could be empty
# "symbol": "ETH/USDT",
# "timestamp": 1573594877822
# }
# }
# }
#
# AccountGroupDeleteFuturesOrder
#
# {
# "code": 0,
# "data": {
# "meta": {
# "id": "foobar",
# "action": "cancel-order",
# "respInst": "ACK"
# },
# "order": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640244480476,
# "orderId": "r17de63086f4U0711043490bbtcpPUF4",
# "seqNum": 28795959269,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640244480491,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "BTCPC",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value_2(data, 'order', 'info', {})
return self.parse_order(order, market)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
options = self.safe_value(self.options, 'cancelAllOrders', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'time': self.milliseconds(),
}
if symbol is not None:
request['symbol'] = market['id']
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrderAll')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupDeleteFuturesOrderAll',
})
if method == 'v1PrivateAccountCategoryDeleteOrderAll':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryDeleteOrderAll
#
# {
# "code": 0,
# "data": {
# "ac": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "action": "cancel-all",
# "info": {
# "id": "2bmYvi7lyTrneMzpcJcf2D7Pe9V1P9wy",
# "orderId": "",
# "orderType": "NULL_VAL",
# "symbol": "",
# "timestamp": 1574118495462
# },
# "status": "Ack"
# }
# }
#
# AccountGroupDeleteFuturesOrderAll
#
# {
# "code": 0,
# "data": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "action": "cancel-all",
# "info": {
# "symbol":"BTC-PERP"
# }
# }
# }
#
return response
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# destTag: "",
# tagType: "",
# tagId: "",
# chainName: "ERC20",
# numConfirmations: 20,
# withdrawalFee: 1,
# nativeScale: 4,
# tips: []
# }
#
address = self.safe_string(depositAddress, 'address')
tagId = self.safe_string(depositAddress, 'tagId')
tag = self.safe_string(depositAddress, tagId)
self.check_address(address)
code = None if (currency is None) else currency['code']
chainName = self.safe_string(depositAddress, 'chainName')
network = self.safe_network(chainName)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'info': depositAddress,
}
def safe_network(self, networkId):
networksById = {
'TRC20': 'TRC20',
'ERC20': 'ERC20',
'GO20': 'GO20',
'BEP2': 'BEP2',
'BEP20(BSC)': 'BEP20',
'Bitcoin': 'BTC',
'Bitcoin ABC': 'BCH',
'Litecoin': 'LTC',
'Matic Network': 'MATIC',
'Solana': 'SOL',
'xDai': 'STAKE',
'Akash': 'AKT',
}
return self.safe_string(networksById, networkId, networkId)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
chainName = self.safe_string(params, 'chainName')
params = self.omit(params, 'chainName')
request = {
'asset': currency['id'],
}
response = self.v1PrivateGetWalletDepositAddress(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "asset":"USDT",
# "assetName":"Tether",
# "address":[
# {
# "address":"1N22odLHXnLPCjC8kwBJPTayarr9RtPod6",
# "destTag":"",
# "tagType":"",
# "tagId":"",
# "chainName":"Omni",
# "numConfirmations":3,
# "withdrawalFee":4.7,
# "nativeScale":4,
# "tips":[]
# },
# {
# "address":"0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# "destTag":"",
# "tagType":"",
# "tagId":"",
# "chainName":"ERC20",
# "numConfirmations":20,
# "withdrawalFee":1.0,
# "nativeScale":4,
# "tips":[]
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
addresses = self.safe_value(data, 'address', [])
numAddresses = len(addresses)
address = None
if numAddresses > 1:
addressesByChainName = self.index_by(addresses, 'chainName')
if chainName is None:
chainNames = list(addressesByChainName.keys())
chains = ', '.join(chainNames)
raise ArgumentsRequired(self.id + ' fetchDepositAddress() returned more than one address, a chainName parameter is required, one of ' + chains)
address = self.safe_value(addressesByChainName, chainName, {})
else:
# first address
address = self.safe_value(addresses, 0, {})
result = self.parse_deposit_address(address, currency)
return self.extend(result, {
'info': response,
})
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
request = {
'txType': 'deposit',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
request = {
'txType': 'withdrawal',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'asset': currency['id'],
# 'page': 1,
# 'pageSize': 20,
# 'startTs': self.milliseconds(),
# 'endTs': self.milliseconds(),
# 'txType': undefned, # deposit, withdrawal
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTs'] = since
if limit is not None:
request['pageSize'] = limit
response = self.v1PrivateGetWalletTransactions(self.extend(request, params))
#
# {
# code: 0,
# data: {
# data: [
# {
# requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB",
# time: 1591606166000,
# asset: "USDT",
# transactionType: "deposit",
# amount: "25",
# commission: "0",
# networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce",
# status: "pending",
# numConfirmed: 8,
# numConfirmations: 20,
# destAddress: {address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722"}
# }
# ],
# page: 1,
# pageSize: 20,
# hasNext: False
# }
# }
#
data = self.safe_value(response, 'data', {})
transactions = self.safe_value(data, 'data', [])
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'reviewing': 'pending',
'pending': 'pending',
'confirmed': 'ok',
'rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB",
# time: 1591606166000,
# asset: "USDT",
# transactionType: "deposit",
# amount: "25",
# commission: "0",
# networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce",
# status: "pending",
# numConfirmed: 8,
# numConfirmations: 20,
# destAddress: {
# address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# destTag: "..." # for currencies that have it
# }
# }
#
id = self.safe_string(transaction, 'requestId')
amount = self.safe_number(transaction, 'amount')
destAddress = self.safe_value(transaction, 'destAddress', {})
address = self.safe_string(destAddress, 'address')
tag = self.safe_string(destAddress, 'destTag')
txid = self.safe_string(transaction, 'networkTransactionId')
type = self.safe_string(transaction, 'transactionType')
timestamp = self.safe_integer(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
feeCost = self.safe_number(transaction, 'commission')
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'network': None,
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the ascendex api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v2PrivateAccountGroupGetFuturesPosition(self.extend(request, params))
#
# {
# "code": 0,
# "data": {
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "collaterals": [
# {
# "asset": "USDT",
# "balance": "44.570287262",
# "referencePrice": "1",
# "discountFactor": "1"
# }
# ],
# "contracts": [
# {
# "symbol": "BTC-PERP",
# "side": "LONG",
# "position": "0.0001",
# "referenceCost": "-3.12277254",
# "unrealizedPnl": "-0.001700233",
# "realizedPnl": "0",
# "avgOpenPrice": "31209",
# "marginType": "isolated",
# "isolatedMargin": "1.654972977",
# "leverage": "2",
# "takeProfitPrice": "0",
# "takeProfitTrigger": "market",
# "stopLossPrice": "0",
# "stopLossTrigger": "market",
# "buyOpenOrderNotional": "0",
# "sellOpenOrderNotional": "0",
# "markPrice": "31210.723063672",
# "indexPrice": "31223.148857925"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
position = self.safe_value(data, 'contracts', [])
result = []
for i in range(0, len(position)):
result.append(self.parse_position(position[i]))
return self.filter_by_array(result, 'symbol', symbols, False)
def parse_position(self, position, market=None):
#
# {
# "symbol": "BTC-PERP",
# "side": "LONG",
# "position": "0.0001",
# "referenceCost": "-3.12277254",
# "unrealizedPnl": "-0.001700233",
# "realizedPnl": "0",
# "avgOpenPrice": "31209",
# "marginType": "isolated",
# "isolatedMargin": "1.654972977",
# "leverage": "2",
# "takeProfitPrice": "0",
# "takeProfitTrigger": "market",
# "stopLossPrice": "0",
# "stopLossTrigger": "market",
# "buyOpenOrderNotional": "0",
# "sellOpenOrderNotional": "0",
# "markPrice": "31210.723063672",
# "indexPrice": "31223.148857925"
# },
#
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId, market)
notional = self.safe_number(position, 'buyOpenOrderNotional')
if notional == 0:
notional = self.safe_number(position, 'sellOpenOrderNotional')
marginMode = self.safe_string(position, 'marginType')
collateral = None
if marginMode == 'isolated':
collateral = self.safe_number(position, 'isolatedMargin')
return {
'info': position,
'id': None,
'symbol': market['symbol'],
'notional': notional,
'marginMode': marginMode,
'liquidationPrice': None,
'entryPrice': self.safe_number(position, 'avgOpenPrice'),
'unrealizedPnl': self.safe_number(position, 'unrealizedPnl'),
'percentage': None,
'contracts': None,
'contractSize': self.safe_number(position, 'position'),
'markPrice': self.safe_number(position, 'markPrice'),
'side': self.safe_string_lower(position, 'side'),
'hedged': None,
'timestamp': None,
'datetime': None,
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'collateral': collateral,
'initialMargin': None,
'initialMarginPercentage': None,
'leverage': self.safe_integer(position, 'leverage'),
'marginRatio': None,
}
def parse_funding_rate(self, fundingRate, market=None):
#
# {
# "time": 1640061364830,
# "symbol": "EOS-PERP",
# "markPrice": "3.353854865",
# "indexPrice": "3.3542",
# "openInterest": "14242",
# "fundingRate": "-0.000073026",
# "nextFundingTime": 1640073600000
# }
#
marketId = self.safe_string(fundingRate, 'symbol')
symbol = self.safe_symbol(marketId, market)
currentTime = self.safe_integer(fundingRate, 'time')
nextFundingRate = self.safe_number(fundingRate, 'fundingRate')
nextFundingRateTimestamp = self.safe_integer(fundingRate, 'nextFundingTime')
previousFundingTimestamp = None
return {
'info': fundingRate,
'symbol': symbol,
'markPrice': self.safe_number(fundingRate, 'markPrice'),
'indexPrice': self.safe_number(fundingRate, 'indexPrice'),
'interestRate': self.parse_number('0'),
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'previousFundingRate': None,
'nextFundingRate': nextFundingRate,
'previousFundingTimestamp': previousFundingTimestamp,
'nextFundingTimestamp': nextFundingRateTimestamp,
'previousFundingDatetime': self.iso8601(previousFundingTimestamp),
'nextFundingDatetime': self.iso8601(nextFundingRateTimestamp),
}
def fetch_funding_rates(self, symbols, params={}):
self.load_markets()
response = self.v2PublicGetFuturesPricingData(params)
#
# {
# "code": 0,
# "data": {
# "contracts": [
# {
# "time": 1640061364830,
# "symbol": "EOS-PERP",
# "markPrice": "3.353854865",
# "indexPrice": "3.3542",
# "openInterest": "14242",
# "fundingRate": "-0.000073026",
# "nextFundingTime": 1640073600000
# },
# ],
# "collaterals": [
# {
# "asset": "USDTR",
# "referencePrice": "1"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
contracts = self.safe_value(data, 'contracts', [])
result = self.parse_funding_rates(contracts)
return self.filter_by_array(result, 'symbol', symbols)
def modify_margin_helper(self, symbol, amount, type, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
amount = self.amount_to_precision(symbol, amount)
request = {
'account-group': accountGroup,
'symbol': market['id'],
'amount': amount, # positive value for adding margin, negative for reducing
}
response = self.v2PrivateAccountGroupPostFuturesIsolatedPositionMargin(self.extend(request, params))
#
# Can only change margin for perpetual futures isolated margin positions
#
# {
# "code": 0
# }
#
if type == 'reduce':
amount = Precise.string_abs(amount)
return self.extend(self.parse_margin_modification(response, market), {
'amount': self.parse_number(amount),
'type': type,
})
def parse_margin_modification(self, data, market=None):
errorCode = self.safe_string(data, 'code')
status = 'ok' if (errorCode == '0') else 'failed'
return {
'info': data,
'type': None,
'amount': None,
'code': market['quote'],
'symbol': market['symbol'],
'status': status,
}
def reduce_margin(self, symbol, amount, params={}):
"""
remove margin from a position
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#reduce-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'reduce', params)
def add_margin(self, symbol, amount, params={}):
"""
add margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#add-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'add', params)
def set_leverage(self, leverage, symbol=None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: response from the exchange
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
if (leverage < 1) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
if market['type'] != 'future':
raise BadSymbol(self.id + ' setLeverage() supports futures contracts only')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
'symbol': market['id'],
'leverage': leverage,
}
return self.v2PrivateAccountGroupPostFuturesLeverage(self.extend(request, params))
def set_margin_mode(self, marginMode, symbol=None, params={}):
"""
set margin mode to 'cross' or 'isolated'
:param str marginMode: 'cross' or 'isolated'
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: response from the exchange
"""
marginMode = marginMode.lower()
if marginMode == 'cross':
marginMode = 'crossed'
if marginMode != 'isolated' and marginMode != 'crossed':
raise BadRequest(self.id + ' setMarginMode() marginMode argument should be isolated or cross')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
'symbol': market['id'],
'marginMode': marginMode,
}
if market['type'] != 'future':
raise BadSymbol(self.id + ' setMarginMode() supports futures contracts only')
return self.v2PrivateAccountGroupPostFuturesMarginType(self.extend(request, params))
def fetch_leverage_tiers(self, symbols=None, params={}):
"""
retrieve information on the maximum leverage, and maintenance margin for trades of varying trade sizes
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `leverage tiers structures <https://docs.ccxt.com/en/latest/manual.html#leverage-tiers-structure>`, indexed by market symbols
"""
self.load_markets()
response = self.v2PublicGetFuturesContract(params)
#
# {
# "code":0,
# "data":[
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_leverage_tiers(data, symbols, 'symbol')
def parse_market_leverage_tiers(self, info, market=None):
"""
:param dict info: Exchange market response for 1 market
:param dict market: CCXT market
"""
#
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
#
marginRequirements = self.safe_value(info, 'marginRequirements', [])
id = self.safe_string(info, 'symbol')
market = self.safe_market(id, market)
tiers = []
for i in range(0, len(marginRequirements)):
tier = marginRequirements[i]
initialMarginRate = self.safe_string(tier, 'initialMarginRate')
tiers.append({
'tier': self.sum(i, 1),
'currency': market['quote'],
'minNotional': self.safe_number(tier, 'positionNotionalLowerBound'),
'maxNotional': self.safe_number(tier, 'positionNotionalUpperBound'),
'maintenanceMarginRate': self.safe_number(tier, 'maintenanceMarginRate'),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRate)),
'info': tier,
})
return tiers
def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the ascendex api endpoint
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
currency = self.currency(code)
amount = self.currency_to_precision(code, amount)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId != 'cash' and toId != 'cash':
raise ExchangeError(self.id + ' transfer() only supports direct balance transfer between spot and future, spot and margin')
request = {
'account-group': accountGroup,
'amount': amount,
'asset': currency['id'],
'fromAccount': fromId,
'toAccount': toId,
}
response = self.v1PrivateAccountGroupPostTransfer(self.extend(request, params))
#
# {code: '0'}
#
transferOptions = self.safe_value(self.options, 'transfer', {})
fillResponseFromRequest = self.safe_value(transferOptions, 'fillResponseFromRequest', True)
transfer = self.parse_transfer(response, currency)
if fillResponseFromRequest:
transfer['fromAccount'] = fromAccount
transfer['toAccount'] = toAccount
transfer['amount'] = amount
transfer['currency'] = code
return transfer
def parse_transfer(self, transfer, currency=None):
#
# {code: '0'}
#
status = self.safe_integer(transfer, 'code')
currencyCode = self.safe_currency_code(None, currency)
timestamp = self.milliseconds()
return {
'info': transfer,
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': currencyCode,
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': self.parse_transfer_status(status),
}
def parse_transfer_status(self, status):
if status == 0:
return 'ok'
return 'failed'
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
version = api[0]
access = api[1]
type = self.safe_string(api, 2)
url = ''
accountCategory = (type == 'accountCategory')
if accountCategory or (type == 'accountGroup'):
url += self.implode_params('/{account-group}', params)
params = self.omit(params, 'account-group')
request = self.implode_params(path, params)
url += '/api/pro/'
if version == 'v2':
request = version + '/' + request
else:
url += version + '/'
if accountCategory:
url += self.implode_params('{account-category}/', params)
params = self.omit(params, 'account-category')
url += request
if (version == 'v1') and (request == 'cash/balance') or (request == 'margin/balance'):
request = 'balance'
if (version == 'v1') and (request == 'spot/fee'):
request = 'fee'
if request.find('subuser') >= 0:
parts = request.split('/')
request = parts[2]
params = self.omit(params, self.extract_params(path))
if access == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.milliseconds())
payload = timestamp + '+' + request
hmac = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
headers = {
'x-auth-key': self.apiKey,
'x-auth-timestamp': timestamp,
'x-auth-signature': hmac,
}
if method == 'GET':
if params:
url += '?' + self.urlencode(params)
else:
headers['Content-Type'] = 'application/json'
body = self.json(params)
url = self.urls['api']['rest'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {'code': 6010, 'message': 'Not enough balance.'}
# {'code': 60060, 'message': 'The order is already filled or canceled.'}
# {"code":2100,"message":"ApiKeyFailure"}
# {"code":300001,"message":"Price is too low from market price.","reason":"INVALID_PRICE","accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda","ac":"CASH","action":"place-order","status":"Err","info":{"symbol":"BTC/USDT"}}
#
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'message')
error = (code is not None) and (code != '0')
if error or (message is not None):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| 43.945636
| 228
| 0.472141
|
7bbfa762b09967ef0845f9194a28774ebc7bd7ac
| 904
|
py
|
Python
|
tests/universal_functions_tests/divide_tests/inplace_broadcast.py
|
marcinz/cunumeric
|
c40b038d4eb0611f7bb16d5bd11891a633ef7892
|
[
"Apache-2.0"
] | 118
|
2021-04-12T18:06:59.000Z
|
2021-10-12T21:30:24.000Z
|
tests/universal_functions_tests/divide_tests/inplace_broadcast.py
|
marcinz/cunumeric
|
c40b038d4eb0611f7bb16d5bd11891a633ef7892
|
[
"Apache-2.0"
] | 51
|
2021-04-21T10:40:13.000Z
|
2021-09-10T22:09:26.000Z
|
tests/universal_functions_tests/divide_tests/inplace_broadcast.py
|
marcinz/cunumeric
|
c40b038d4eb0611f7bb16d5bd11891a633ef7892
|
[
"Apache-2.0"
] | 9
|
2021-04-14T03:07:42.000Z
|
2021-09-22T17:02:53.000Z
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import numpy as np
import cunumeric as num
def test():
# test inplace divide
anp = np.random.randn(4, 5)
b = random.randint(1, 13)
a = num.array(anp)
np.divide(anp, b, out=anp)
num.divide(a, b, out=a)
assert np.array_equal(a, anp)
if __name__ == "__main__":
test()
| 24.432432
| 74
| 0.710177
|
4b51546e89cfe26aba153444cf2c66e7f5c35b1c
| 17,905
|
py
|
Python
|
src/deep_dialog/agents/agent_dqn.py
|
YenChen-Wu/DDQ
|
a17ae49e232ead4519863bb2836f8026babb38e0
|
[
"MIT"
] | null | null | null |
src/deep_dialog/agents/agent_dqn.py
|
YenChen-Wu/DDQ
|
a17ae49e232ead4519863bb2836f8026babb38e0
|
[
"MIT"
] | null | null | null |
src/deep_dialog/agents/agent_dqn.py
|
YenChen-Wu/DDQ
|
a17ae49e232ead4519863bb2836f8026babb38e0
|
[
"MIT"
] | null | null | null |
'''
Created on Oct 30, 2017
An DQN Agent modified for DDQ Agent
Some methods are not consistent with super class Agent.
@author: Baolin Peng
'''
import random, copy, json
import cPickle as pickle
import numpy as np
from collections import namedtuple, deque
from deep_dialog import dialog_config
from agent import Agent
from deep_dialog.qlearning import DQN
import torch
import torch.optim as optim
import torch.nn.functional as F
DEVICE = torch.device('cpu')
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'term'))
class AgentDQN(Agent):
def __init__(self, movie_dict=None, act_set=None, slot_set=None, params=None):
self.movie_dict = movie_dict
self.act_set = act_set
self.slot_set = slot_set
self.act_cardinality = len(act_set.keys())
self.slot_cardinality = len(slot_set.keys())
self.feasible_actions = dialog_config.feasible_actions
self.num_actions = len(self.feasible_actions)
self.epsilon = params['epsilon']
self.agent_run_mode = params['agent_run_mode']
self.agent_act_level = params['agent_act_level']
self.experience_replay_pool_size = params.get('experience_replay_pool_size', 5000)
self.experience_replay_pool = deque(
maxlen=self.experience_replay_pool_size) # experience replay pool <s_t, a_t, r_t, s_t+1>
self.experience_replay_pool_from_model = deque(
maxlen=self.experience_replay_pool_size) # experience replay pool <s_t, a_t, r_t, s_t+1>
self.running_expereince_pool = None # hold experience from both user and world model
self.hidden_size = params.get('dqn_hidden_size', 60)
self.gamma = params.get('gamma', 0.9)
self.predict_mode = params.get('predict_mode', False)
self.warm_start = params.get('warm_start', 0)
self.max_turn = params['max_turn'] + 5
print(self.max_turn)
self.state_dimension = 2 * self.act_cardinality + 7 * self.slot_cardinality + 3 + self.max_turn
self.dqn = DQN(self.state_dimension, self.hidden_size, self.num_actions).to(DEVICE)
self.target_dqn = DQN(self.state_dimension, self.hidden_size, self.num_actions).to(DEVICE)
self.target_dqn.load_state_dict(self.dqn.state_dict())
self.target_dqn.eval()
self.optimizer = optim.RMSprop(self.dqn.parameters(), lr=1e-3)
self.cur_bellman_err = 0
# Prediction Mode: load trained DQN model
if params['trained_model_path'] != None:
self.load(params['trained_model_path'])
self.predict_mode = True
self.warm_start = 2
def initialize_episode(self):
""" Initialize a new episode. This function is called every time a new episode is run. """
self.current_slot_id = 0
self.phase = 0
self.request_set = ['moviename', 'starttime', 'city', 'date', 'theater', 'numberofpeople']
def state_to_action(self, state):
""" DQN: Input state, output action """
# self.state['turn'] += 2
self.representation = self.prepare_state_representation(state)
self.action = self.run_policy(self.representation)
if self.warm_start == 1:
act_slot_response = copy.deepcopy(self.feasible_actions[self.action])
else:
act_slot_response = copy.deepcopy(self.feasible_actions[self.action[0]])
return {'act_slot_response': act_slot_response, 'act_slot_value_response': None}
def prepare_state_representation(self, state):
""" Create the representation for each state """
user_action = state['user_action']
current_slots = state['current_slots']
kb_results_dict = state['kb_results_dict']
agent_last = state['agent_action']
########################################################################
# Create one-hot of acts to represent the current user action
########################################################################
user_act_rep = np.zeros((1, self.act_cardinality))
user_act_rep[0, self.act_set[user_action['diaact']]] = 1.0
########################################################################
# Create bag of inform slots representation to represent the current user action
########################################################################
user_inform_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in user_action['inform_slots'].keys():
user_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Create bag of request slots representation to represent the current user action
########################################################################
user_request_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in user_action['request_slots'].keys():
user_request_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Creat bag of filled_in slots based on the current_slots
########################################################################
current_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in current_slots['inform_slots']:
current_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent act
########################################################################
agent_act_rep = np.zeros((1, self.act_cardinality))
if agent_last:
agent_act_rep[0, self.act_set[agent_last['diaact']]] = 1.0
########################################################################
# Encode last agent inform slots
########################################################################
agent_inform_slots_rep = np.zeros((1, self.slot_cardinality))
if agent_last:
for slot in agent_last['inform_slots'].keys():
agent_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent request slots
########################################################################
agent_request_slots_rep = np.zeros((1, self.slot_cardinality))
if agent_last:
for slot in agent_last['request_slots'].keys():
agent_request_slots_rep[0, self.slot_set[slot]] = 1.0
# turn_rep = np.zeros((1,1)) + state['turn'] / 10.
turn_rep = np.zeros((1, 1))
########################################################################
# One-hot representation of the turn count?
########################################################################
turn_onehot_rep = np.zeros((1, self.max_turn))
turn_onehot_rep[0, state['turn']] = 1.0
# ########################################################################
# # Representation of KB results (scaled counts)
# ########################################################################
# kb_count_rep = np.zeros((1, self.slot_cardinality + 1)) + kb_results_dict['matching_all_constraints'] / 100.
# for slot in kb_results_dict:
# if slot in self.slot_set:
# kb_count_rep[0, self.slot_set[slot]] = kb_results_dict[slot] / 100.
#
# ########################################################################
# # Representation of KB results (binary)
# ########################################################################
# kb_binary_rep = np.zeros((1, self.slot_cardinality + 1)) + np.sum( kb_results_dict['matching_all_constraints'] > 0.)
# for slot in kb_results_dict:
# if slot in self.slot_set:
# kb_binary_rep[0, self.slot_set[slot]] = np.sum( kb_results_dict[slot] > 0.)
kb_count_rep = np.zeros((1, self.slot_cardinality + 1))
########################################################################
# Representation of KB results (binary)
########################################################################
kb_binary_rep = np.zeros((1, self.slot_cardinality + 1))
self.final_representation = np.hstack(
[user_act_rep, user_inform_slots_rep, user_request_slots_rep, agent_act_rep, agent_inform_slots_rep,
agent_request_slots_rep, current_slots_rep, turn_rep, turn_onehot_rep, kb_binary_rep, kb_count_rep])
return self.final_representation
def run_policy(self, representation):
""" epsilon-greedy policy """
if random.random() < self.epsilon:
return random.randint(0, self.num_actions - 1)
else:
if self.warm_start == 1:
if len(self.experience_replay_pool) > self.experience_replay_pool_size:
self.warm_start = 2
return self.rule_policy()
else:
return self.DQN_policy(representation)
def rule_policy(self):
""" Rule Policy """
act_slot_response = {}
if self.current_slot_id < len(self.request_set):
slot = self.request_set[self.current_slot_id]
self.current_slot_id += 1
act_slot_response = {}
act_slot_response['diaact'] = "request"
act_slot_response['inform_slots'] = {}
act_slot_response['request_slots'] = {slot: "UNK"}
elif self.phase == 0:
act_slot_response = {'diaact': "inform", 'inform_slots': {'taskcomplete': "PLACEHOLDER"},
'request_slots': {}}
self.phase += 1
elif self.phase == 1:
act_slot_response = {'diaact': "thanks", 'inform_slots': {}, 'request_slots': {}}
return self.action_index(act_slot_response)
def DQN_policy(self, state_representation):
""" Return action from DQN"""
with torch.no_grad():
#action = self.dqn.predict(torch.FloatTensor(state_representation))
#print (torch.FloatTensor(state_representation).size())
action = self.dqn.predict_prob(torch.FloatTensor(state_representation))
action = torch.argmax(action, 1)
#mask = self.user_planning.model.get_mask(torch.FloatTensor(state_representation))
return action
def action_index(self, act_slot_response):
""" Return the index of action """
for (i, action) in enumerate(self.feasible_actions):
if act_slot_response == action:
return i
print act_slot_response
raise Exception("action index not found")
return None
def register_experience_replay_tuple(self, s_t, a_t, reward, s_tplus1, episode_over, st_user, from_model=False):
""" Register feedback from either environment or world model, to be stored as future training data """
state_t_rep = self.prepare_state_representation(s_t)
action_t = self.action
reward_t = reward
state_tplus1_rep = self.prepare_state_representation(s_tplus1)
st_user = self.prepare_state_representation(st_user) # ???
training_example = (state_t_rep, action_t, reward_t, state_tplus1_rep, episode_over, st_user)
if self.predict_mode == False: # Training Mode
if self.warm_start == 1:
self.experience_replay_pool.append(training_example)
else: # Prediction Mode
if not from_model:
self.experience_replay_pool.append(training_example)
else:
self.experience_replay_pool_from_model.append(training_example)
def sample_from_buffer(self, batch_size):
"""Sample batch size examples from experience buffer and convert it to torch readable format"""
# type: (int, ) -> Transition
batch = [random.choice(self.running_expereince_pool) for i in xrange(batch_size)]
np_batch = []
for x in range(len(Transition._fields)):
v = []
for i in xrange(batch_size):
v.append(batch[i][x])
np_batch.append(np.vstack(v))
return Transition(*np_batch)
def train(self, batch_size=1, num_batches=100):
""" Train DQN with experience buffer that comes from both user and world model interaction."""
self.cur_bellman_err = 0.
self.cur_bellman_err_planning = 0.
self.running_expereince_pool = list(self.experience_replay_pool) + list(self.experience_replay_pool_from_model)
for iter_batch in range(num_batches):
for iter in range(len(self.running_expereince_pool) / (batch_size)):
self.optimizer.zero_grad()
batch = self.sample_from_buffer(batch_size)
state_value = self.dqn(torch.FloatTensor(batch.state)).gather(1, torch.tensor(batch.action))
next_state_value, _ = self.target_dqn(torch.FloatTensor(batch.next_state)).max(1)
next_state_value = next_state_value.unsqueeze(1)
term = np.asarray(batch.term, dtype=np.float32)
expected_value = torch.FloatTensor(batch.reward) + self.gamma * next_state_value * (
1 - torch.FloatTensor(term))
loss = F.mse_loss(state_value, expected_value)
loss.backward()
self.optimizer.step()
self.cur_bellman_err += loss.item()
if len(self.experience_replay_pool) != 0:
print (
"cur bellman err %.4f, experience replay pool %s, model replay pool %s, cur bellman err for planning %.4f" % (
float(self.cur_bellman_err) / (len(self.experience_replay_pool) / (float(batch_size))),
len(self.experience_replay_pool), len(self.experience_replay_pool_from_model),
self.cur_bellman_err_planning))
# def train_one_iter(self, batch_size=1, num_batches=100, planning=False):
# """ Train DQN with experience replay """
# self.cur_bellman_err = 0
# self.cur_bellman_err_planning = 0
# running_expereince_pool = self.experience_replay_pool + self.experience_replay_pool_from_model
# for iter_batch in range(num_batches):
# batch = [random.choice(self.experience_replay_pool) for i in xrange(batch_size)]
# np_batch = []
# for x in range(5):
# v = []
# for i in xrange(len(batch)):
# v.append(batch[i][x])
# np_batch.append(np.vstack(v))
#
# batch_struct = self.dqn.singleBatch(np_batch)
# self.cur_bellman_err += batch_struct['cost']['total_cost']
# if planning:
# plan_step = 3
# for _ in xrange(plan_step):
# batch_planning = [random.choice(self.experience_replay_pool) for i in
# xrange(batch_size)]
# np_batch_planning = []
# for x in range(5):
# v = []
# for i in xrange(len(batch_planning)):
# v.append(batch_planning[i][x])
# np_batch_planning.append(np.vstack(v))
#
# s_tp1, r, t = self.user_planning.predict(np_batch_planning[0], np_batch_planning[1])
# s_tp1[np.where(s_tp1 >= 0.5)] = 1
# s_tp1[np.where(s_tp1 <= 0.5)] = 0
#
# t[np.where(t >= 0.5)] = 1
#
# np_batch_planning[2] = r
# np_batch_planning[3] = s_tp1
# np_batch_planning[4] = t
#
# batch_struct = self.dqn.singleBatch(np_batch_planning)
# self.cur_bellman_err_planning += batch_struct['cost']['total_cost']
#
# if len(self.experience_replay_pool) != 0:
# print ("cur bellman err %.4f, experience replay pool %s, cur bellman err for planning %.4f" % (
# float(self.cur_bellman_err) / (len(self.experience_replay_pool) / (float(batch_size))),
# len(self.experience_replay_pool), self.cur_bellman_err_planning))
################################################################################
# Debug Functions
################################################################################
def save_experience_replay_to_file(self, path):
""" Save the experience replay pool to a file """
try:
pickle.dump(self.experience_replay_pool, open(path, "wb"))
print 'saved model in %s' % (path,)
except Exception, e:
print 'Error: Writing model fails: %s' % (path,)
print e
def load_experience_replay_from_file(self, path):
""" Load the experience replay pool from a file"""
self.experience_replay_pool = pickle.load(open(path, 'rb'))
def load_trained_DQN(self, path):
""" Load the trained DQN from a file """
trained_file = pickle.load(open(path, 'rb'))
model = trained_file['model']
print "Trained DQN Parameters:", json.dumps(trained_file['params'], indent=2)
return model
def set_user_planning(self, user_planning):
self.user_planning = user_planning
def save(self, filename):
torch.save(self.dqn.state_dict(), filename)
def load(self, filename):
self.dqn.load_state_dict(torch.load(filename))
def reset_dqn_target(self):
self.target_dqn.load_state_dict(self.dqn.state_dict())
| 45.444162
| 130
| 0.556102
|
42726c759473e42b254e756406c5702fd9edf1a3
| 2,389
|
py
|
Python
|
droidlet/memory/robot/tests/test_low_level_memory.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | null | null | null |
droidlet/memory/robot/tests/test_low_level_memory.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | null | null | null |
droidlet/memory/robot/tests/test_low_level_memory.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import unittest
from droidlet.memory.robot.loco_memory import LocoAgentMemory
from droidlet.memory.robot.loco_memory_nodes import DetectedObjectNode
from droidlet.base_util import Pos, Look, Player
class DO:
def __init__(self, eid, label, properties, color, xyz, bounds, feature_repr=None):
self.eid = eid
self.label = label
self.properties = properties
self.color = color
self.xyz = xyz
self.bounds = bounds
self.feature_repr = feature_repr
def get_bounds(self):
return self.bounds
def get_xyz(self):
return {"x": self.xyz[0], "y": self.xyz[1], "z": self.xyz[2]}
class BasicTest(unittest.TestCase):
def test_player_apis(self):
self.memory = LocoAgentMemory()
player_list = [
Player(20, "xyz", Pos(1, 1, 1), Look(1, 1)),
Player(10, "abc", Pos(0, 0, 3), Look(0, 0))
]
# test update_other_players
self.memory.update_other_players(player_list)
assert self.memory.get_player_by_name("xyz").pos == (1.0, 1.0, 1.0)
assert self.memory.get_player_by_eid(10).name == "abc"
def test_detected_object_apis(self):
self.memory = LocoAgentMemory()
d = DO(
eid=33,
label="smaug",
properties=["red_golden", "dragon", "lonely_mountain"],
color="mauve",
xyz=[-0.4, -0.08, 0.0],
bounds=[0, 0, 0, 0, 0, 0]
)
detected_object_mem_id = DetectedObjectNode.create(self.memory, d)
# test get_detected_objects_tagged
all_tags = ["red_golden", "dragon", "lonely_mountain", "mauve", "smaug"]
for t in all_tags:
assert len(self.memory.get_detected_objects_tagged(t)) == 1
assert self.memory.get_detected_objects_tagged(t).pop() == detected_object_mem_id
def test_dance_api(self):
self.memory = LocoAgentMemory()
def return_num():
return 10
self.memory.add_dance(return_num, "generate_num_10_dance", ["generate_num_10", "dance_with_numbers"])
assert len(self.memory.get_triples(obj_text="generate_num_10")) == 1
assert len(self.memory.get_triples(obj_text="dance_with_numbers")) == 1
if __name__ == "__main__":
unittest.main()
| 34.128571
| 109
| 0.615739
|
e7f9b9656ded1aa62bdd56ca74bf003068f4f806
| 3,097
|
py
|
Python
|
library/nsxt_fabric_compute_managers_facts.py
|
lxiaopei/ansible-for-nsxt
|
b2e0eb23dbc50d0bf2b78aeb379db4262052ec9a
|
[
"BSD-2-Clause"
] | 4
|
2020-12-22T18:39:33.000Z
|
2021-08-05T00:35:57.000Z
|
library/nsxt_fabric_compute_managers_facts.py
|
lxiaopei/ansible-for-nsxt
|
b2e0eb23dbc50d0bf2b78aeb379db4262052ec9a
|
[
"BSD-2-Clause"
] | null | null | null |
library/nsxt_fabric_compute_managers_facts.py
|
lxiaopei/ansible-for-nsxt
|
b2e0eb23dbc50d0bf2b78aeb379db4262052ec9a
|
[
"BSD-2-Clause"
] | 1
|
2021-05-21T15:14:12.000Z
|
2021-05-21T15:14:12.000Z
|
#!/usr/bin/env python
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_fabric_compute_managers_facts
short_description: Return the List of Compute managers
description: Returns information about all compute managers.
version_added: "2.7"
author: Rahul Raghuvanshi
options:
hostname:
description: Deployed NSX manager hostname.
required: true
type: str
username:
description: The username to authenticate with the NSX manager.
required: true
type: str
password:
description: The password to authenticate with the NSX manager.
required: true
type: str
'''
EXAMPLES = '''
- name: Lists all compute managers
nsxt_fabric_compute_managers_facts:
hostname: "10.192.167.137"
username: "admin"
password: "Admin!23Admin"
validate_certs: False
'''
RETURN = '''# '''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_nsxt import vmware_argument_spec, request
from ansible.module_utils.urls import open_url, fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
mgr_hostname = module.params['hostname']
mgr_username = module.params['username']
mgr_password = module.params['password']
validate_certs = module.params['validate_certs']
manager_url = 'https://{}/api/v1'.format(mgr_hostname)
changed = False
try:
(rc, resp) = request(manager_url+ '/fabric/compute-managers', headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing fabric compute manager. Error [%s]' % (to_native(err)))
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()
| 36.435294
| 136
| 0.738779
|
4d1860029c172b29cb0eaee7f7e8dc5617e42295
| 23,439
|
py
|
Python
|
jodconverter-web/src/main/office/program/python-core-2.7.6/lib/multiprocessing/pool.py
|
huleigithup/filepreview
|
815fac0e21547301604bb5fd623a91d885cb4437
|
[
"Apache-2.0"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
BitmessageKit/Vendor/static-python/Lib/multiprocessing/pool.py
|
VoluntaryLabs/BitmessageKit
|
dd634977a629ab4dec184e12bb6324cc01149ba3
|
[
"MIT"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
BitmessageKit/Vendor/static-python/Lib/multiprocessing/pool.py
|
VoluntaryLabs/BitmessageKit
|
dd634977a629ab4dec184e12bb6324cc01149ba3
|
[
"MIT"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import Queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join(1e100)
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join(1e100)
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
cache[self._job] = self
def ready(self):
return self._ready
def successful(self):
assert self._ready
return self._success
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| 32.020492
| 83
| 0.570118
|
03f599609aeb8cb19e858a6e978f51a9dd8c9cb1
| 8,757
|
py
|
Python
|
tests/test_compilejsx.py
|
virtalabs/django-jsx
|
ba8f77ca9b96c96328b44886b3c7389231570210
|
[
"BSD-2-Clause"
] | 63
|
2016-10-14T13:41:12.000Z
|
2022-03-11T22:13:10.000Z
|
tests/test_compilejsx.py
|
virtalabs/django-jsx
|
ba8f77ca9b96c96328b44886b3c7389231570210
|
[
"BSD-2-Clause"
] | 5
|
2017-01-27T16:39:16.000Z
|
2019-01-16T19:22:13.000Z
|
tests/test_compilejsx.py
|
virtalabs/django-jsx
|
ba8f77ca9b96c96328b44886b3c7389231570210
|
[
"BSD-2-Clause"
] | 6
|
2016-10-29T11:25:13.000Z
|
2019-01-07T02:14:29.000Z
|
from __future__ import unicode_literals
import hashlib
import io
import os
import sys
import tempfile
from django.core.management import call_command
from django.test import TestCase
from django_jsx.management.commands.compilejsx import compile_templates, END_JS, START_JS
class CompileJSXTest(TestCase):
"""
Tests for the compilejsx management command, which looks at all the
template files and emits a jsx_registry.jsx file with information about
the JSX blocks in the templates, and some JavaScript code to make use of the
information.
"""
@classmethod
def setUpClass(cls):
cls.files_to_delete = []
@classmethod
def tearDownClass(cls):
for fn in cls.files_to_delete:
try:
os.remove(fn)
except Exception as e:
print(e)
delattr(cls, 'files_to_delete')
@classmethod
def make_testfile(cls):
"""Returns name of the test file"""
(filehandle, filename) = tempfile.mkstemp()
os.close(filehandle)
cls.files_to_delete.append(filename)
return filename
def test_invoking_for_stdout(self):
# Default output is to stdout
out = io.StringIO()
orig_out = sys.stdout
try:
sys.stdout = out
call_command('compilejsx')
self.assertIn(START_JS, out.getvalue())
finally:
sys.stdout = orig_out
def test_invoking_to_output_file(self):
# --output sends output to named file
filename = type(self).make_testfile()
call_command('compilejsx', output=filename)
output = open(filename, "rb").read().decode('utf-8')
self.assertIn(START_JS, output)
def try_it(self, test_content, expected_result, raw=False):
# Make template file containing a jsx block whose body is `test_content`, run
# compilejsx, and make sure the output is `expected_result`. Or if `raw` is true,
# then `test_content` is the entire content of the test file to compile.
filename = type(self).make_testfile()
expected_result = expected_result.replace('{filename}', filename)
if raw:
test_text = test_content
else:
test_text = "{% jsx %}" + test_content + "{% endjsx %}"
with open(filename, "w") as f:
f.write(test_text)
# "Compile" it
output = io.StringIO()
compile_templates([filename], output)
# Strip boilerplate to simplify checking
start = len(START_JS) + 1
end = 0 - (len(END_JS) + 1)
result = output.getvalue()[start:end - 1]
self.maxDiff = None
self.assertEqual(expected_result, result)
def test_empty_template(self):
# If template is empty, output is just the boilerplate.
# Make empty file
filename = type(self).make_testfile()
with open(filename, "w"):
pass
# "Compile" it
output = io.StringIO()
compile_templates([filename], output)
# Check boilerplate
self.assertTrue(output.getvalue().startswith(START_JS + "\n"))
self.assertTrue(output.getvalue().endswith(END_JS + "\n"))
# Strip boilerplate to simplify checking what's not boilerplate
start = len(START_JS) + 1
end = 0 - (len(END_JS) + 1)
result = output.getvalue()[start:end - 1]
self.assertEqual('', result)
def test_template_with_empty_jsx_block(self):
# If the block is empty, the output is pretty minimal
test_content = ''
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
return ();
}''' % sha1
self.try_it(test_content, expected)
def test_template_with_minimal_component(self):
# If the block just has a minimal React component, the output includes
# a jsx_registry entry for it.
test_content = '<NeatThing/>'
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'NeatThing')) var {NeatThing} = COMPONENTS;
return (%s);
}''' % (sha1, test_content)
self.try_it(test_content, expected)
def test_template_with_component_with_static_property(self):
# Static properties don't change the output
test_content = '<NiftyFeature foo="bar"/>'
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'NiftyFeature')) var {NiftyFeature} = COMPONENTS;
return (%s);
}''' % (sha1, test_content)
self.try_it(test_content, expected)
def test_template_with_component_with_variable_property(self):
# Variable properties don't change the output
test_content = '<WonderBar foo="{{ ctx.bar }}"/>'
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'WonderBar')) var {WonderBar} = COMPONENTS;
return (%s);
}''' % (sha1, test_content)
self.try_it(test_content, expected)
def test_template_with_component_with_expression_property(self):
# Expressions in properties don't change the output
test_content = '<Component foo="{{ ctx.bar ? 3 : ctx.zip }}"/>'
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'Component')) var {Component} = COMPONENTS;
return (%s);
}''' % (sha1, test_content)
self.try_it(test_content, expected)
def test_template_with_component_with_deep_variable(self):
# Variable properties don't change the output
test_content = '<Component foo="{{ ctx.foo.bar.baz }}"/>'
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'Component')) var {Component} = COMPONENTS;
return (%s);
}''' % (sha1, test_content)
self.try_it(test_content, expected)
def test_template_with_nested_html(self):
# Each tag level contributes to the output. compilejsx doesn't know or care
# which tags are React components.
test_content = '''<div id="search-mnt" className="search-section-mnt">
<MobileModalSectionSearch
section="attraction"
sectionLabel="Attractions"
currentLocation={ctx.location ? ctx.location.full_name : null}
mapLink={ctx.map_link}
useDistance={ctx.location && ctx.location.kind === 'city'}
subCat={ctx.type_slug || ""}
/>
</div>'''
sha1 = hashlib.sha1(test_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'MobileModalSectionSearch')) var {MobileModalSectionSearch} = COMPONENTS;
if (Object.hasOwnProperty.call(COMPONENTS, 'div')) var {div} = COMPONENTS;
return (%s);
}''' % (sha1, test_content) # noqa (long line hard to avoid here)
self.try_it(test_content, expected)
def test_duplicate_blocks_with_different_contexts(self):
# compilejsx comes up with the same jsx_registry entry repeatedly if there are multiple
# blocks with the same content but with different contexts. But this is okay, as
# the rendered template will have a tag for each occurrence of the block, each
# with its own unique context, and the JavaScript will render a component for
# each one using that context.
block_content = '<Component prop={ctx.foo}/>'
test_content = '''{% load jsx %}
{% with foo=1 %}
{% jsx %}BLOCK_CONTENT{% endjsx %}
{% endwith %}
{% with foo=2 %}
{% jsx %}BLOCK_CONTENT{% endjsx %}
{% endwith %}
'''.replace("BLOCK_CONTENT", block_content)
sha1 = hashlib.sha1(block_content.encode('utf-8')).hexdigest()
expected = '''/* {filename} */
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'Component')) var {Component} = COMPONENTS;
return (%s);
}
jsx_registry["%s"] = (COMPONENTS, ctx) => {
if (Object.hasOwnProperty.call(COMPONENTS, 'Component')) var {Component} = COMPONENTS;
return (%s);
}''' % (sha1, block_content, sha1, block_content)
self.try_it(test_content, expected, raw=True)
| 39.624434
| 116
| 0.639374
|
e297562190a1076047b5c62365b34c4169906407
| 291
|
py
|
Python
|
building/flowerTrail.py
|
cssidy/minecraft
|
84aeef7c4658a048babcd7c7f900775dd8db41fa
|
[
"MIT"
] | 1
|
2020-11-05T04:06:02.000Z
|
2020-11-05T04:06:02.000Z
|
building/flowerTrail.py
|
cssidy/minecraft
|
84aeef7c4658a048babcd7c7f900775dd8db41fa
|
[
"MIT"
] | null | null | null |
building/flowerTrail.py
|
cssidy/minecraft
|
84aeef7c4658a048babcd7c7f900775dd8db41fa
|
[
"MIT"
] | null | null | null |
# as seen in Learn To Program With Minecraft: Transform Your World With The Power of Python, written by Craig Richardson
from mcpi.minecraft import Minecraft
import time
mc = Minecraft.create()
while True:
pos = mc.player.getPos()
mc.setBlock(pos.x, pos.y, pos.z, 38)
time.sleep(0.2)
| 22.384615
| 120
| 0.745704
|
c34313eb74d5698dd48500c63d0d97f6b9cc2507
| 5,685
|
py
|
Python
|
skinnywms/fields/GRIBField.py
|
EduardRosert/skinnywms
|
c324428573e17c829a7b6c381f95feedfd0ccd9a
|
[
"Apache-2.0"
] | 32
|
2019-08-30T16:51:38.000Z
|
2022-03-22T13:30:02.000Z
|
skinnywms/fields/GRIBField.py
|
EduardRosert/skinnywms
|
c324428573e17c829a7b6c381f95feedfd0ccd9a
|
[
"Apache-2.0"
] | 26
|
2019-09-12T07:39:55.000Z
|
2022-02-16T17:30:39.000Z
|
skinnywms/fields/GRIBField.py
|
DeutscherWetterdienst/skinnywms
|
cf415547634864b699697cbbbe4ee4b6648405ac
|
[
"Apache-2.0"
] | 20
|
2019-08-21T07:29:59.000Z
|
2022-02-16T08:38:43.000Z
|
# (C) Copyright 2012-2019 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
from skinnywms import datatypes
import logging
from skinnywms import grib_bindings
companions = { "10u" : "10v" , "10v" : "10u" }
ucomponents = ["10u"]
vcomponents = ["10v"]
possible_matches = {}
class GRIBField(datatypes.Field):
log = logging.getLogger(__name__)
def __init__(self, context, path, grib, index):
self.path = path
self.index = index
self.mars = grib.mars_request
self.render = self.render_contour
self.time = grib.valid_date
self.levtype = grib.levtype
self.shortName = grib.shortName
if grib.levtype == "sfc":
self.name = grib.shortName
self.title = grib.name
else:
self.name = "%s_%s" % (grib.shortName, grib.levelist)
self.title = "%s at %s" % (grib.name, grib.levelist)
self.levelist = grib.levelist
if self.shortName in companions:
companion = companions[self.shortName]
matches = possible_matches.get(companion, [])
found = False
for match in matches:
found = self.match(match)
if found :
break;
if not found:
if self.name not in possible_matches:
possible_matches[self.name] = [self]
else:
possible_matches[self.name].append(self)
key = "style.grib.%s" % (self.name,)
# Optimisation
self.styles = context.stash.get(key)
if self.styles is None:
self.styles = context.stash[key] = context.styler.grib_styles(
self, grib, path, index
)
def match(self, companion):
if self.time != companion.time:
return False
if self.levtype != companion.levtype:
return False
if self.levtype != "sfc":
if self.levelist != companion.levelist:
return False
# Found a match WE have a vector
self.render = self.render_wind
if self.name in ucomponents:
self.ucomponent = self.index
self.vcomponent = companion.index
companion.ucomponent = self.index
companion.vcomponent = companion.index
if self.levtype == "sfc":
self.name = "_".format(self.shortName, companion.shortName)
self.title = "/".format(self.name, companion.name)
else:
self.name = "{}_{}_%s" % (self.shortName, companion.shortName, self.levelist)
self.title = "{}/{} at %s" % (self.shortName, companion.shortName, self.levelist)
else:
self.vcomponent = self.index
self.ucomponent = companion.index
companion.vcomponent = self.index
companion.ucomponent = companion.index
if self.levtype == "sfc":
self.name = "{}/{}".format(companion.shortName, self.shortName)
self.title = "{}/{}".format(companion.shortName, self.shortName)
else:
self.name = "{}_{}_{}".format(companion.shortName, self.shortName, self.levelist)
self.title = "{}/{} at {}".format(companion.shortName, self.shortName, self.levelist)
return True
def render_contour(self, context, driver, style, legend={}):
data = []
params = dict(
grib_input_file_name=self.path, grib_field_position=self.index + 1
)
if style:
style.adjust_grib_plotting(params)
data.append(driver.mgrib(**params))
data.append(context.styler.contours(self, driver, style, legend))
return data
def render_wind(self, context, driver, style, legend={}):
data = []
params = dict(
grib_input_file_name = self.path,
grib_wind_position_1 = self.ucomponent+1,
grib_wind_position_2 = self.vcomponent+1
)
if style:
style.adjust_grib_plotting(params)
data.append(driver.mgrib(**params))
data.append(context.styler.winds(self, driver, style, legend))
return data
def as_dict(self):
return dict(
_class=self.__class__.__module__ + "." + self.__class__.__name__,
name=self.name,
title=self.title,
path=self.path,
index=self.index,
mars=self.mars,
styles=[s.as_dict() for s in self.styles],
time=self.time.isoformat() if self.time is not None else None,
)
def __repr__(self):
return "GRIBField[%r,%r,%r]" % (self.path, self.index, self.mars)
class GRIBReader:
"""Get WMS layers from a GRIB file."""
log = logging.getLogger(__name__)
def __init__(self, context, path):
self.path = path
self.context = context
def get_fields(self):
self.log.info("Scanning file: %s", self.path)
fields = []
for i, m in enumerate(grib_bindings.GribFile(self.path)):
fields.append(GRIBField(self.context, self.path, m, i))
if not fields:
raise Exception("GRIBReader no 2D fields found in %s", self.path)
return fields
| 32.118644
| 101
| 0.574494
|
7f17aea4803f196a0aed8b0dd6201d89df52ca6d
| 2,785
|
py
|
Python
|
modules/action/exploit_msf_javarmi.py
|
mrpnkt/apt2
|
542fb0593069c900303421f3f24a499ce8f3a6a8
|
[
"MIT"
] | 2
|
2018-05-09T15:57:07.000Z
|
2018-11-20T10:11:51.000Z
|
modules/action/exploit_msf_javarmi.py
|
zu3s/apt2-1
|
67325052d2713a363183c23188a67e98a379eec7
|
[
"MIT"
] | null | null | null |
modules/action/exploit_msf_javarmi.py
|
zu3s/apt2-1
|
67325052d2713a363183c23188a67e98a379eec7
|
[
"MIT"
] | 1
|
2018-11-02T18:31:55.000Z
|
2018-11-02T18:31:55.000Z
|
import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.mymsf import myMsf
from core.utils import Utils
class exploit_msf_javarmi(actionModule):
def __init__(self, config, display, lock):
super(exploit_msf_javarmi, self).__init__(config, display, lock)
self.triggers = ["newPort_tcp_1099"]
self.requirements = ["msfconsole"]
self.title = "Attempt to Exploit A Java RMI Service"
self.shortName = "MSFJavaRMI"
self.description = "execute [exploit/multi/misc/java_rmi_server] on each target"
self.safeLevel = 5
def getTargets(self):
self.targets = kb.get('port/tcp/1099')
def process(self):
# load any targets we are interested in
self.getTargets()
if len(self.targets) > 0:
# connect to msfrpc
msf = myMsf(host=self.config['msfhost'], port=int(self.config['msfport']), user=self.config['msfuser'],
password=self.config['msfpass'])
if not msf.isAuthenticated():
return
# If any results are succesful, this will become true and Fire will be called in the end
callFire = False
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
# add the new IP to the already seen list
self.addseentarget(t)
myMsf.lock.acquire()
self.display.verbose(self.shortName + " - Connecting to " + t)
msf.execute("use exploit/multi/misc/java_rmi_server\n")
msf.execute("set RHOST %s\n" % t)
#msf.execute("set TARGET 0\n")
msf.execute("set TARGET 2\n")
msf.execute("set PAYLOAD linux/x86/meterpreter/reverse_tcp")
msf.execute("set LPORT 4445\n")
msf.execute("exploit -j\n")
msf.sleep(int(self.config['msfexploitdelay']))
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
result = msf.getResult()
myMsf.lock.release()
Utils.writeFile(result, outfile)
parts = re.findall(".*Meterpreter session.*", result)
for part in parts:
callFire = True
self.addVuln(t, "JavaRMI", {"port": "1099", "output": outfile.replace("/", "%2F")})
if callFire:
self.fire("msfSession")
# clean up after ourselves
result = msf.cleanup()
return
| 37.635135
| 115
| 0.549013
|
46cbdf11402f4a5a2aea63692e9b628a02be83ae
| 2,414
|
py
|
Python
|
tools/config_validation/validate_fragment.py
|
lopter-dbx/envoy
|
d342e96e7ba2319329838e799021838354e88118
|
[
"Apache-2.0"
] | 5
|
2020-11-30T15:36:04.000Z
|
2022-02-28T00:30:28.000Z
|
tools/config_validation/validate_fragment.py
|
lopter-dbx/envoy
|
d342e96e7ba2319329838e799021838354e88118
|
[
"Apache-2.0"
] | 104
|
2021-10-03T11:09:20.000Z
|
2022-01-05T00:21:59.000Z
|
tools/config_validation/validate_fragment.py
|
lopter-dbx/envoy
|
d342e96e7ba2319329838e799021838354e88118
|
[
"Apache-2.0"
] | 12
|
2020-09-11T14:26:33.000Z
|
2022-03-24T12:26:30.000Z
|
# Validate a YAML fragment against an Envoy API proto3 type.
#
# Example usage:
#
# bazel run //tools/config_validation:validate_fragment -- \
# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/envoyproxy_io_proxy.yaml
import json
import pathlib
import sys
import yaml
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import json_format
from google.protobuf import message_factory
from google.protobuf import text_format
from bazel_tools.tools.python.runfiles import runfiles
import argparse
def ValidateFragment(type_name, fragment):
"""Validate a dictionary representing a JSON/YAML fragment against an Envoy API proto3 type.
Throws Protobuf errors on parsing exceptions, successful validations produce
no result.
Args:
type_name: a string providing the type name, e.g.
envoy.config.bootstrap.v3.Bootstrap.
fragment: a dictionary representing the parsed JSON/YAML configuration
fragment.
"""
json_fragment = json.dumps(fragment)
r = runfiles.Create()
all_protos_pb_text_path = r.Rlocation(
'envoy/tools/type_whisperer/all_protos_with_ext_pb_text.pb_text')
file_desc_set = descriptor_pb2.FileDescriptorSet()
text_format.Parse(pathlib.Path(all_protos_pb_text_path).read_text(),
file_desc_set,
allow_unknown_extension=True)
pool = descriptor_pool.DescriptorPool()
for f in file_desc_set.file:
pool.Add(f)
desc = pool.FindMessageTypeByName(type_name)
msg = message_factory.MessageFactory(pool=pool).GetPrototype(desc)()
json_format.Parse(json_fragment, msg, descriptor_pool=pool)
def ParseArgs():
parser = argparse.ArgumentParser(
description='Validate a YAML fragment against an Envoy API proto3 type.')
parser.add_argument(
'message_type',
help='a string providing the type name, e.g. envoy.config.bootstrap.v3.Bootstrap.')
parser.add_argument('fragment_path', nargs='?', help='Path to a YAML configuration fragment.')
parser.add_argument('-s', required=False, help='YAML configuration fragment.')
return parser.parse_args()
if __name__ == '__main__':
parsed_args = ParseArgs()
message_type = parsed_args.message_type
content = parsed_args.s if (parsed_args.fragment_path is None) else pathlib.Path(
parsed_args.fragment_path).read_text()
ValidateFragment(message_type, yaml.safe_load(content))
| 33.068493
| 96
| 0.764292
|
78e63abe4289b6190ed69c3d63de290aa0e03537
| 7,453
|
py
|
Python
|
preferences_color.py
|
techdragon/graph-explorer
|
729b4e19dbfdd0f2ba031db08e6b5563c4eb6613
|
[
"Apache-2.0"
] | 1
|
2019-06-27T13:05:16.000Z
|
2019-06-27T13:05:16.000Z
|
preferences_color.py
|
techdragon/graph-explorer
|
729b4e19dbfdd0f2ba031db08e6b5563c4eb6613
|
[
"Apache-2.0"
] | null | null | null |
preferences_color.py
|
techdragon/graph-explorer
|
729b4e19dbfdd0f2ba031db08e6b5563c4eb6613
|
[
"Apache-2.0"
] | null | null | null |
from colors import colors
from backend import get_action_on_rules_match
# convenience functions
def get_unique_tag_value(graph, target, tag):
'''
get a tag corresponding to a target, if it's clear the target "owns" the tag.
this makes sure, if you're looking at cpu graphs with group by server,
each cpu type (user, idle, etc) has a representative color
but if you group by type (and compare servers on one graph for e.g. 'idle') you don't want
all targets to have the same color... except if due to filtering only 1 server shows up, we
can apply the color again.
note that if the graph has 6 targets: 3 different servers, each 2 different types, then this
will proceed and you'll see 3 targets of each color.
this could be extended to search for the value in the variables of all other targets, to guarantee
uniqueness (and avoid a color showing up multiple times)
TLDR: color a target based on tag value, but avoid all targets having the same color on 1 graph
'''
# the graph has other targets that have different values for this tag
if tag in target['variables']:
t = target['variables'][tag]
elif len(graph['targets']) == 1:
# there's no other targets in the graph, maybe due to a filter.
# so we can safely get the value from [promoted] constants
if tag in graph['constants']:
t = graph['constants'][tag]
elif tag in graph['promoted_constants']:
t = graph['promoted_constants'][tag]
else:
return None
else:
return None
# t can be a tuple if it's an aggregated tag
if isinstance(t, basestring):
return t
else:
return t[0]
def get_tag_value(graph, target, tag):
'''
get a tag, if it applies to the target. irrespective of other targets
i.e. color a target based on tag value, and don't try to avoid multiple targets with same color
on 1 graph.
'''
if tag in target['variables']:
t = target['variables'][tag]
elif tag in graph['constants']:
t = graph['constants'][tag]
elif tag in graph['promoted_constants']:
t = graph['promoted_constants'][tag]
else:
return None
if isinstance(t, basestring):
return t
else:
return t[0]
def bin_set_color(graph, target):
if 'bin_upper' not in target['tags']:
return
# later we could do a real green-to-red interpolation by looking at
# the total range (all bin_uppers in the entire class) and computing
# a color, maybe using something like color_variant("#FF0000", -150),
# for now, this will have to do
bin_upper = target['tags']['bin_upper']
colormap = {
'0.01': '#2FFF00',
'0.05': '#64DD0E',
'0.1': '#9CDD0E',
'0.5': '#DDCC0E',
'1': '#DDB70E',
'5': '#FF6200',
'10': '#FF3C00',
'50': '#FF1E00',
'inf': '#FF0000'
}
if bin_upper in colormap:
target['color'] = colormap[bin_upper]
def apply_colors(graph):
'''
update target colors in a clever, dynamic way. basically it's about defining
colors for certain metrics (such as cpu idle metric = green), but since you
can group by arbitrary things, you might have a graph comparing the idle
values for different servers, in which case they should not be all green.
# the graph will look something like:
{
'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
'from': '-24hours',
'until': 'now',
'constants': {'unit': 'ms', 'target_type': 'gauge'},
'targets': [
{
'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
'variables': {'agent': u'dfvimeographite2-a'},
'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
},
(...)
]
}
'''
# color targets based on tags, even when due to grouping metrics with the same tags (colors)
# show up on the same graph
rules_tags = [
# http stuff, for swift and others
[
{},
{
'http_method': {
'GET': colors['blue'][0],
'HEAD': colors['yellow'][0],
'PUT': colors['green'][0],
'REPLICATE': colors['brown'][0],
'DELETE': colors['red'][0]
}
}
],
[
{'stat': ['upper', 'upper_90']},
{
'http_method': {
'GET': colors['blue'][1],
'HEAD': colors['yellow'][1],
'PUT': colors['green'][1],
'REPLICATE': colors['brown'][1],
'DELETE': colors['red'][1]
}
}
],
]
# color targets based on tags, except when due to grouping metrics
# with the same tags show up on the same graph
rules_unique_tags = [
[
{'unit': 'cpu_state'},
{
'type': {
'idle': colors['green'][0],
'user': colors['blue'][0],
'system': colors['blue'][1],
'nice': colors['purple'][0],
'softirq': colors['red'][0],
'irq': colors['red'][1],
'iowait': colors['orange'][0],
'guest': colors['white'],
'guest_nice': colors['white'],
'steal': '#FFA791' # brighter red
}
}
],
[
{},
{
'mountpoint': {
'_var': colors['red'][0],
'_lib': colors['orange'][1],
'_boot': colors['blue'][0],
'_tmp': colors['purple'][0],
'root': colors['green'][0]
}
}
],
[
{'plugin': 'load'},
{
'type': {
'01': colors['red'][1],
'05': colors['red'][0],
'15': '#FFA791' # brighter red
}
}
],
[
{'unit': 'ms'},
{
'type': {
'update_time': colors['turq'][0]
}
}
],
[
{'unit': 'freq_abs'},
bin_set_color
]
]
for target in graph['targets']:
tags = dict(graph['constants'].items() + graph['promoted_constants'].items() + target['variables'].items())
for action in get_action_on_rules_match(rules_unique_tags, tags):
if callable(action): # hasattr(action, '__call__'):
action(graph, target)
else:
for (tag_key, matches) in action.items():
t = get_unique_tag_value(graph, target, tag_key)
if t is not None and t in matches:
target['color'] = matches[t]
for action in get_action_on_rules_match(rules_tags, target):
for (tag_key, matches) in action.items():
t = get_tag_value(graph, target, tag_key)
if t is not None and t in matches:
target['color'] = matches[t]
return graph
| 34.50463
| 115
| 0.504897
|
6139f994853b9ed4a5acd291f17192fddfc324bd
| 15,188
|
py
|
Python
|
src/runtime/runtime-gdb.py
|
simonferquel/go
|
f2a4c139c1e0cff35f89e4b5a531d5dedc5ed8e0
|
[
"BSD-3-Clause"
] | 536
|
2015-01-13T08:43:17.000Z
|
2021-09-27T11:36:22.000Z
|
src/runtime/runtime-gdb.py
|
simonferquel/go
|
f2a4c139c1e0cff35f89e4b5a531d5dedc5ed8e0
|
[
"BSD-3-Clause"
] | 17
|
2015-01-11T20:42:38.000Z
|
2021-11-23T01:18:26.000Z
|
src/runtime/runtime-gdb.py
|
simonferquel/go
|
f2a4c139c1e0cff35f89e4b5a531d5dedc5ed8e0
|
[
"BSD-3-Clause"
] | 161
|
2015-01-12T10:02:58.000Z
|
2022-01-05T07:27:06.000Z
|
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
from __future__ import print_function
import re
import sys
print("Loading Go Runtime support.", file=sys.stderr)
#http://python3porting.com/differences.html
if sys.version > '3':
xrange = range
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
# G state (runtime2.go)
def read_runtime_const(varname, default):
try:
return int(gdb.parse_and_eval(varname))
except Exception:
return int(default)
G_IDLE = read_runtime_const("'runtime._Gidle'", 0)
G_RUNNABLE = read_runtime_const("'runtime._Grunnable'", 1)
G_RUNNING = read_runtime_const("'runtime._Grunning'", 2)
G_SYSCALL = read_runtime_const("'runtime._Gsyscall'", 3)
G_WAITING = read_runtime_const("'runtime._Gwaiting'", 4)
G_MORIBUND_UNUSED = read_runtime_const("'runtime._Gmoribund_unused'", 5)
G_DEAD = read_runtime_const("'runtime._Gdead'", 6)
G_ENQUEUE_UNUSED = read_runtime_const("'runtime._Genqueue_unused'", 7)
G_COPYSTACK = read_runtime_const("'runtime._Gcopystack'", 8)
G_SCAN = read_runtime_const("'runtime._Gscan'", 0x1000)
G_SCANRUNNABLE = G_SCAN+G_RUNNABLE
G_SCANRUNNING = G_SCAN+G_RUNNING
G_SCANSYSCALL = G_SCAN+G_SYSCALL
G_SCANWAITING = G_SCAN+G_WAITING
sts = {
G_IDLE: 'idle',
G_RUNNABLE: 'runnable',
G_RUNNING: 'running',
G_SYSCALL: 'syscall',
G_WAITING: 'waiting',
G_MORIBUND_UNUSED: 'moribund',
G_DEAD: 'dead',
G_ENQUEUE_UNUSED: 'enqueue',
G_COPYSTACK: 'copystack',
G_SCAN: 'scan',
G_SCANRUNNABLE: 'runnable+s',
G_SCANRUNNING: 'running+s',
G_SCANSYSCALL: 'syscall+s',
G_SCANWAITING: 'waiting+s',
}
#
# Value wrappers
#
class SliceValue:
"Wrapper for slice values."
def __init__(self, val):
self.val = val
@property
def len(self):
return int(self.val['len'])
@property
def cap(self):
return int(self.val['cap'])
def __getitem__(self, i):
if i < 0 or i >= self.len:
raise IndexError(i)
ptr = self.val["array"]
return (ptr + i).dereference()
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string( \*)?$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
sval = SliceValue(self.val)
if sval.len > sval.cap:
return
for idx, item in enumerate(sval):
yield ('[{0}]'.format(idx), item)
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^map\[.*\].*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
B = self.val['B']
buckets = self.val['buckets']
oldbuckets = self.val['oldbuckets']
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
for bucket in xrange(2 ** int(B)):
bp = buckets + bucket
if oldbuckets:
oldbucket = bucket & (2 ** (B - 1) - 1)
oldbp = oldbuckets + oldbucket
oldb = oldbp.dereference()
if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
if bucket >= 2 ** (B - 1):
continue # already did old bucket
bp = oldbp
while bp:
b = bp.dereference()
for i in xrange(8):
if b['tophash'][i] != 0:
k = b['keys'][i]
v = b['values'][i]
if flags & 1:
k = k.dereference()
if flags & 2:
v = v.dereference()
yield str(cnt), k
yield str(cnt + 1), v
cnt += 2
bp = b['overflow']
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf(). et is the type stolen from hchan<T>::recvq->first->elem
et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[{0}]'.format(i), (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except Exception:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(var) for var in vars().values() if hasattr(var, 'pattern')])
#
# Utilities
#
def pc_to_int(pc):
# python2 will not cast pc (type void*) to an int cleanly
# instead python2 and python3 work with the hex string representation
# of the void pointer which we can parse back into an int.
# int(pc) will not work.
try:
# python3 / newer versions of gdb
pc = int(pc)
except gdb.error:
# str(pc) can return things like
# "0x429d6c <runtime.gopark+284>", so
# chop at first space.
pc = int(str(pc).split(None, 1)[0], 16)
return pc
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.rtype'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.rtype'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" and str(val['data'].type) == "void *"
except gdb.error:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" and str(val['data'].type) == "void *"
except gdb.error:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name)
except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except gdb.error:
pass
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
return go_type_ptr.cast(gdb.lookup_type("struct reflect.rtype").pointer()).dereference()
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
# known issue: dtype_name decoded from runtime.rtype is "nested.Foo"
# but the dwarf table lists it as "full/path/to/nested.Foo"
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
return dynamic_go_type['string'].dereference()['str'].string()
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except Exception:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
return "({typename}){data}".format(
typename=iface_dtype_name(self.val), data=self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
except Exception:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len'), (SliceTypePrinter, 'len'), (MapTypePrinter, 'count'), (ChanTypePrinter, 'qcount'))
def __init__(self):
gdb.Function.__init__(self, "len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'), (ChanTypePrinter, 'dataqsiz'))
def __init__(self):
gdb.Function.__init__(self, "cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
gdb.Function.__init__(self, "dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except gdb.error:
pass
return obj
#
# Commands
#
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
gdb.Command.__init__(self, "info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, _arg, _from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
if ptr['atomicstatus'] == G_DEAD:
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
pc = pc_to_int(pc)
blk = gdb.block_for_pc(pc)
status = int(ptr['atomicstatus'])
st = sts.get(status, "unknown(%d)" % status)
print(s, ptr['goid'], "{0:8s}".format(st), blk.function)
def find_goroutine(goid):
"""
find_goroutine attempts to find the goroutine identified by goid.
It returns a tuple of gdb.Value's representing the stack pointer
and program counter pointer for the goroutine.
@param int goid
@return tuple (gdb.Value, gdb.Value)
"""
vp = gdb.lookup_type('void').pointer()
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
if ptr['atomicstatus'] == G_DEAD:
continue
if ptr['goid'] == goid:
break
else:
return None, None
# Get the goroutine's saved state.
pc, sp = ptr['sched']['pc'], ptr['sched']['sp']
status = ptr['atomicstatus']&~G_SCAN
# Goroutine is not running nor in syscall, so use the info in goroutine
if status != G_RUNNING and status != G_SYSCALL:
return pc.cast(vp), sp.cast(vp)
# If the goroutine is in a syscall, use syscallpc/sp.
pc, sp = ptr['syscallpc'], ptr['syscallsp']
if sp != 0:
return pc.cast(vp), sp.cast(vp)
# Otherwise, the goroutine is running, so it doesn't have
# saved scheduler state. Find G's OS thread.
m = ptr['m']
if m == 0:
return None, None
for thr in gdb.selected_inferior().threads():
if thr.ptid[1] == m['procid']:
break
else:
return None, None
# Get scheduler state from the G's OS thread state.
curthr = gdb.selected_thread()
try:
thr.switch()
pc = gdb.parse_and_eval('$pc')
sp = gdb.parse_and_eval('$sp')
finally:
curthr.switch()
return pc.cast(vp), sp.cast(vp)
class GoroutineCmd(gdb.Command):
"""Execute gdb command in the context of goroutine <goid>.
Switch PC and SP to the ones in the goroutine's G structure,
execute an arbitrary gdb command, and restore PC and SP.
Usage: (gdb) goroutine <goid> <gdbcmd>
You could pass "all" as <goid> to apply <gdbcmd> to all goroutines.
For example: (gdb) goroutine all <gdbcmd>
Note that it is ill-defined to modify state in the context of a goroutine.
Restrict yourself to inspecting values.
"""
def __init__(self):
gdb.Command.__init__(self, "goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, _from_tty):
goid_str, cmd = arg.split(None, 1)
goids = []
if goid_str == 'all':
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
goids.append(int(ptr['goid']))
else:
goids = [int(gdb.parse_and_eval(goid_str))]
for goid in goids:
self.invoke_per_goid(goid, cmd)
def invoke_per_goid(self, goid, cmd):
pc, sp = find_goroutine(goid)
if not pc:
print("No such goroutine: ", goid)
return
pc = pc_to_int(pc)
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_sp = $sp')
gdb.parse_and_eval('$save_pc = $pc')
# In GDB, assignments to sp must be done from the
# top-most frame, so select frame 0 first.
gdb.execute('select-frame 0')
gdb.parse_and_eval('$sp = {0}'.format(str(sp)))
gdb.parse_and_eval('$pc = {0}'.format(str(pc)))
try:
gdb.execute(cmd)
finally:
# In GDB, assignments to sp must be done from the
# top-most frame, so select frame 0 first.
gdb.execute('select-frame 0')
gdb.parse_and_eval('$pc = $save_pc')
gdb.parse_and_eval('$sp = $save_sp')
save_frame.select()
class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
gdb.Command.__init__(self, "iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, _from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
obj = gdb.parse_and_eval(str(obj))
except Exception as e:
print("Can't parse ", obj, ": ", e)
continue
if obj['data'] == 0:
dtype = "nil"
else:
dtype = iface_dtype(obj)
if dtype is None:
print("Not an interface: ", obj.type)
continue
print("{0}: {1}".format(obj.type, dtype))
# TODO: print interface's methods and dynamic type's func pointers thereof.
#rsc: "to find the number of entries in the itab's Fn field look at
# itab.inter->numMethods
# i am sure i have the names wrong but look at the interface type
# and its method count"
# so Itype will start with a commontype which has kind = interface
#
# Register all convenience functions and CLI commands
#
GoLenFunc()
GoCapFunc()
DTypeFunc()
GoroutinesCmd()
GoroutineCmd()
GoIfaceCmd()
| 25.104132
| 118
| 0.686924
|
6011c83df49cb446b152e504c5b16a5dc45fb9be
| 4,115
|
py
|
Python
|
python/envs/hackathon/lib/python3.7/site-packages/gensim/test/test_keywords.py
|
FISHackathon2020/RAN
|
cb5e1459f4d26bd619ba7244979fce277b44aba9
|
[
"MIT"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
python/envs/hackathon/lib/python3.7/site-packages/gensim/test/test_keywords.py
|
FISHackathon2020/RAN
|
cb5e1459f4d26bd619ba7244979fce277b44aba9
|
[
"MIT"
] | null | null | null |
python/envs/hackathon/lib/python3.7/site-packages/gensim/test/test_keywords.py
|
FISHackathon2020/RAN
|
cb5e1459f4d26bd619ba7244979fce277b44aba9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to reproduce the results of Mihalcea and Tarau (2004).
Mihalcea and Tarau (2004) introduces the TextRank summarization algorithm.
As a validation of the gensim implementation we reproduced its results
in this test.
"""
import os.path
import logging
import unittest
from gensim import utils
from gensim.summarization import keywords
class TestKeywordsTest(unittest.TestCase):
def test_text_keywords(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# calculate keywords
generated_keywords = keywords(text, split=True)
# To be compared to the reference.
with utils.open(os.path.join(pre_path, "mihalcea_tarau.kw.txt"), mode="r") as f:
kw = f.read().strip().split("\n")
self.assertEqual({str(x) for x in generated_keywords}, {str(x) for x in kw})
def test_text_keywords_words(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# calculate exactly 13 keywords
generated_keywords = keywords(text, words=15, split=True)
self.assertEqual(len(generated_keywords), 16)
def test_text_keywords_pos(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# calculate keywords using only certain parts of speech
generated_keywords_nnvbjj = keywords(text, pos_filter=['NN', 'VB', 'JJ'], ratio=0.3, split=True)
# To be compared to the reference.
with utils.open(os.path.join(pre_path, "mihalcea_tarau.kwpos.txt"), mode="r") as f:
kw = f.read().strip().split("\n")
self.assertEqual({str(x) for x in generated_keywords_nnvbjj}, {str(x) for x in kw})
def test_text_summarization_raises_exception_on_short_input_text(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.open(os.path.join(pre_path, "testsummarization_unrelated.txt"), mode="r") as f:
text = f.read()
# Keeps the first 8 sentences to make the text shorter.
text = "\n".join(text.split('\n')[:8])
self.assertTrue(keywords(text) is not None)
def test_keywords_ratio(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# Check ratio parameter is well behaved. Because length is taken on tokenized clean text
# we just check that ratio 20% is twice as long as ratio 10%
# Values of 10% and 20% were carefully selected for this test to avoid
# numerical instabilities when several keywords have almost the same score
selected_docs_12 = keywords(text, ratio=0.1, split=True)
selected_docs_21 = keywords(text, ratio=0.2, split=True)
self.assertAlmostEqual(float(len(selected_docs_21)) / len(selected_docs_12), float(21) / 12, places=1)
def test_text_keywords_with_small_graph(self):
# regression test, we get graph 2x2 on this text
text = 'IT: Utilities A look at five utilities to make your PCs more, efficient, effective, and efficacious'
kwds = keywords(text, words=1, split=True)
self.assertTrue(len(kwds))
def test_text_keywords_without_graph_edges(self):
# regression test, we get graph with no edges on this text
text = 'Sitio construcción. Estaremos línea.'
kwds = keywords(text, deacc=False, scores=True)
self.assertFalse(len(kwds))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 38.101852
| 116
| 0.670717
|
53569f4b983a939d121273e7302500db05b9b037
| 366
|
py
|
Python
|
tests/test_exceptions.py
|
Roynecro97/easypy
|
9f36732b558477557b8a57cfad2840767eff0d12
|
[
"BSD-3-Clause"
] | 7
|
2020-03-23T08:30:29.000Z
|
2020-12-05T14:51:49.000Z
|
tests/test_exceptions.py
|
Roynecro97/easypy
|
9f36732b558477557b8a57cfad2840767eff0d12
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_exceptions.py
|
Roynecro97/easypy
|
9f36732b558477557b8a57cfad2840767eff0d12
|
[
"BSD-3-Clause"
] | 6
|
2020-04-28T12:20:14.000Z
|
2022-02-15T15:01:42.000Z
|
from easypy.exceptions import TException
from easypy.bunch import Bunch
class T(TException):
template = "The happened: {what}"
def test_pickle_texception():
import pickle
t1 = T(what="happened", a=1, b=Bunch(x=[1, 2, 3], y=range(5)))
t2 = pickle.loads(pickle.dumps(t1))
assert t1.render() == t2.render()
assert t1._params == t2._params
| 21.529412
| 66
| 0.666667
|
b830bc36c24bab7853e81f4fe55a1231e90d9390
| 110
|
py
|
Python
|
src/pipupgrade/jobs/__init__.py
|
brian6932/pipupgrade
|
daea6fe5a4e5300a4e8299e079e3d1d5e56b8a18
|
[
"MIT"
] | null | null | null |
src/pipupgrade/jobs/__init__.py
|
brian6932/pipupgrade
|
daea6fe5a4e5300a4e8299e079e3d1d5e56b8a18
|
[
"MIT"
] | null | null | null |
src/pipupgrade/jobs/__init__.py
|
brian6932/pipupgrade
|
daea6fe5a4e5300a4e8299e079e3d1d5e56b8a18
|
[
"MIT"
] | null | null | null |
jobs = [
{
"name": "build_dependency_tree"
},
{
"name": "build_proxy_list"
}
]
| 13.75
| 39
| 0.445455
|
987b746a2d9822f86ca2fb11200ea9434e3ebe1b
| 3,667
|
py
|
Python
|
generate_instagram_session.py
|
Soebb/Instagram-Bot
|
ab5d830498ef3dccdcc26a728cd212b11258d291
|
[
"MIT"
] | 195
|
2021-06-02T05:53:27.000Z
|
2022-03-30T20:41:30.000Z
|
generate_instagram_session.py
|
Soebb/Instagram-Bot
|
ab5d830498ef3dccdcc26a728cd212b11258d291
|
[
"MIT"
] | 11
|
2021-06-06T13:39:17.000Z
|
2022-03-25T19:47:26.000Z
|
generate_instagram_session.py
|
Soebb/Instagram-Bot
|
ab5d830498ef3dccdcc26a728cd212b11258d291
|
[
"MIT"
] | 295
|
2021-06-03T13:56:01.000Z
|
2022-03-29T05:22:54.000Z
|
#MIT License
#Copyright (c) 2021 subinps
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram import Client
from pyrogram.errors.exceptions.bad_request_400 import PeerIdInvalid
from pyrogram.errors.exceptions.bad_request_400 import UserIsBlocked
import asyncio
import os
from instaloader import Instaloader, TwoFactorAuthRequiredException
L = Instaloader()
async def generate():
print("Enter your Telegram API_ID")
API_ID = input()
print("Enter API_HASH")
API_HASH = input()
print("Enter Your BOT_TOKEN from Botfather")
BOT_TOKEN = input()
bot = Client("INSTASESSION", API_ID, API_HASH, bot_token=BOT_TOKEN)
await bot.start()
print("Now Enter your Instagram username")
id = input()
print("Enter Your Instagram Password")
pwd = input()
try:
L.login(id, pwd)
L.save_session_to_file(filename=f"./{id}")
except TwoFactorAuthRequiredException:
print(
"Your account has Two Factor authentication Enabled.\nNow Enter the code recived on your mobile."
)
code = input()
L.two_factor_login(code)
L.save_session_to_file(filename=f"./{id}")
except Exception as e:
print(e)
return
print("Succesfully Logged into Instagram")
while True:
print("To send your Session file enter Your Telegram ID as Integer")
tg_id = input()
try:
owner = int(tg_id)
break
except:
print("Oops Thats Invalid, Enter ID as Integer")
try:
f = await bot.send_document(
chat_id=owner,
document=f"./{id}",
file_name=tg_id,
caption=
"⚠️ KEEP THIS SESSION FILE SAFE AND DO NOT SHARE WITH ANYBODY",
)
file_id = f.document.file_id
await bot.send_message(
chat_id=owner,
text=
f"Here is Your <code>INSTA_SESSIONFILE_ID</code>\n\n<code>{file_id}</code>\n\n\n⚠️ KEEP THIS SESSION FILE SAFE AND DO NOT SHARE WITH ANYBODY"
)
print(
"I have messaged you the INSTA_SESSIONFILE_ID. Check your telegram messages"
)
except PeerIdInvalid:
print(
"It seems you have not yet started the bot or Telegram ID given is invalid. Send /start to your bot first and try again"
)
except UserIsBlocked:
print(
"It seems you have BLOCKED the Bot. Unblock the bot and try again."
)
except Exception as e:
print(e)
await bot.stop()
os.remove(f"./{id}")
os.remove("INSTASESSION.session")
loop = asyncio.get_event_loop()
loop.run_until_complete(generate())
| 35.601942
| 153
| 0.67712
|
e00b2d9b2b9b94e8a9b26245a0dcf91822b48ca3
| 797
|
py
|
Python
|
src/Not in use/proxie_check.py
|
Data-is-life/apt-get-home
|
77a212c19a90f201c70759fd9e99493657247ae7
|
[
"Unlicense"
] | null | null | null |
src/Not in use/proxie_check.py
|
Data-is-life/apt-get-home
|
77a212c19a90f201c70759fd9e99493657247ae7
|
[
"Unlicense"
] | null | null | null |
src/Not in use/proxie_check.py
|
Data-is-life/apt-get-home
|
77a212c19a90f201c70759fd9e99493657247ae7
|
[
"Unlicense"
] | null | null | null |
import time
import sys
import urllib
import time
import random
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from urllib.error import URLError
import requests
import string
def proxie_check(proxies):
default_list = []
url = 'https://httpbin.org/ip'
for i in range(0, len(proxies)):
proxy = proxies[i]
print(i+1)
start_time = time.time()
try:
response = requests.get(
url, proxies={"http": proxy, "https": proxy})
print(response.json())
print(time.time() - start_time)
except:
print("Skipping. Connnection error")
default_list.append(i+1)
print(time.time() - start_time)
print(default_list)
return default_list
| 25.709677
| 61
| 0.61857
|
ef79750d2f6f31e6cc59122d98d5bf69fd395549
| 8,122
|
py
|
Python
|
pyquil/tests/test_noise.py
|
nicolasochem/pyquil
|
3d2d5439d6773681e12fbafcf40bddc37cea18c2
|
[
"Apache-2.0"
] | 3
|
2021-11-08T11:46:42.000Z
|
2021-12-27T10:13:38.000Z
|
pyquil/tests/test_noise.py
|
nicolasochem/pyquil
|
3d2d5439d6773681e12fbafcf40bddc37cea18c2
|
[
"Apache-2.0"
] | 2
|
2021-11-09T14:57:09.000Z
|
2022-01-12T12:35:58.000Z
|
artifacts/old_dataset_versions/original_commits_v02/pyquil/pyquil#384/after/test_noise.py
|
MattePalte/Bugs-Quantum-Computing-Platforms
|
0c1c805fd5dfce465a8955ee3faf81037023a23e
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import numpy as np
import pytest
from mock import Mock
from pyquil.api import QPUConnection
from pyquil.gates import CZ, RZ, RX, I, H
from pyquil.noise import (damping_kraus_map, dephasing_kraus_map, tensor_kraus_maps,
_get_program_gates, _decoherence_noise_model,
add_decoherence_noise, combine_kraus_maps, damping_after_dephasing,
INFINITY, apply_noise_model, _noise_model_program_header, KrausModel,
NoiseModel, corrupt_bitstring_probs, correct_bitstring_probs,
estimate_bitstring_probs, bitstring_probs_to_z_moments,
estimate_assignment_probs, NO_NOISE)
from pyquil.quil import Pragma, Program
from pyquil.quilbase import DefGate, Gate
def test_damping_kraus_map():
p = 0.05
k1, k2 = damping_kraus_map(p=p)
assert k1[1, 1] == np.sqrt(1 - p)
assert k2[0, 1] == np.sqrt(p)
def test_dephasing_kraus_map():
p = 0.05
k1, k2 = dephasing_kraus_map(p=p)
np.testing.assert_allclose(np.diag(k1), [np.sqrt(1 - p)] * 2)
np.testing.assert_allclose(np.abs(np.diag(k2)), [np.sqrt(p)] * 2)
def test_tensor_kraus_maps():
damping = damping_kraus_map()
k1, k2, k3, k4 = tensor_kraus_maps(damping, damping)
assert k1.shape == (4, 4)
assert k2.shape == (4, 4)
assert k3.shape == (4, 4)
assert k4.shape == (4, 4)
np.testing.assert_allclose(k1[-1, -1], 1 - 0.1)
def test_combine_kraus_maps():
damping = damping_kraus_map()
dephasing = dephasing_kraus_map()
k1, k2, k3, k4 = combine_kraus_maps(damping, dephasing)
assert k1.shape == (2, 2)
assert k2.shape == (2, 2)
assert k3.shape == (2, 2)
assert k4.shape == (2, 2)
def test_damping_after_dephasing():
damping = damping_kraus_map()
dephasing = dephasing_kraus_map()
ks_ref = combine_kraus_maps(damping, dephasing)
ks_actual = damping_after_dephasing(10, 10, 1)
np.testing.assert_allclose(ks_actual, ks_ref)
def test_noise_helpers():
gates = RX(np.pi / 2)(0), RX(-np.pi / 2)(1), I(1), CZ(0, 1)
prog = Program(*gates)
inferred_gates = _get_program_gates(prog)
assert set(inferred_gates) == set(gates)
def test_decoherence_noise():
prog = Program(RX(np.pi / 2)(0), CZ(0, 1), RZ(np.pi)(0))
gates = _get_program_gates(prog)
m1 = _decoherence_noise_model(gates, T1=INFINITY, T2=INFINITY, ro_fidelity=1.)
# with no readout error, assignment_probs = identity matrix
assert np.allclose(m1.assignment_probs[0], np.eye(2))
assert np.allclose(m1.assignment_probs[1], np.eye(2))
for g in m1.gates:
# with infinite coherence time all kraus maps should only have a single, unitary kraus op
assert len(g.kraus_ops) == 1
k0, = g.kraus_ops
# check unitarity
k0dk0 = k0.dot(k0.conjugate().transpose())
assert np.allclose(k0dk0, np.eye(k0dk0.shape[0]))
# verify that selective (by qubit) dephasing and readout infidelity is working
m2 = _decoherence_noise_model(gates, T1=INFINITY, T2={0: 30e-6}, ro_fidelity={0: .95, 1: 1.0})
assert np.allclose(m2.assignment_probs[0], [[.95, 0.05],
[.05, .95]])
assert np.allclose(m2.assignment_probs[1], np.eye(2))
for g in m2.gates:
if 0 in g.targets:
# single dephasing (no damping) channel on qc 0, no noise on qc1 -> 2 Kraus ops
assert len(g.kraus_ops) == 2
else:
assert len(g.kraus_ops) == 1
# verify that combined T1 and T2 will lead to 4 outcome Kraus map.
m3 = _decoherence_noise_model(gates, T1={0: 30e-6}, T2={0: 30e-6})
for g in m3.gates:
if 0 in g.targets:
# damping (implies dephasing) channel on qc 0, no noise on qc1 -> 4 Kraus ops
assert len(g.kraus_ops) == 4
else:
assert len(g.kraus_ops) == 1
# verify that gate names are translated
new_prog = apply_noise_model(prog, m3)
new_gates = _get_program_gates(new_prog)
# check that headers have been embedded
headers = _noise_model_program_header(m3)
assert all((isinstance(i, Pragma) and i.command in ["ADD-KRAUS", "READOUT-POVM"]) or
isinstance(i, DefGate) for i in headers)
assert headers.out() in new_prog.out()
# verify that high-level add_decoherence_noise reproduces new_prog
new_prog2 = add_decoherence_noise(prog, T1={0: 30e-6}, T2={0: 30e-6})
assert new_prog == new_prog2
def test_kraus_model():
km = KrausModel('I', (5.,), (0, 1), [np.array([[1 + 1j]])], 1.0)
d = km.to_dict()
assert d == OrderedDict([
('gate', km.gate),
('params', km.params),
('targets', (0, 1)),
('kraus_ops', [[[[1.]], [[1.0]]]]),
('fidelity', 1.0)
])
assert KrausModel.from_dict(d) == km
def test_noise_model():
km1 = KrausModel('I', (5.,), (0, 1), [np.array([[1 + 1j]])], 1.0)
km2 = KrausModel('RX', (np.pi / 2,), (0,), [np.array([[1 + 1j]])], 1.0)
nm = NoiseModel([km1, km2], {0: np.eye(2), 1: np.eye(2)})
assert nm == NoiseModel.from_dict(nm.to_dict())
assert nm.gates_by_name("I") == [km1]
assert nm.gates_by_name("RX") == [km2]
def test_readout_compensation():
np.random.seed(1234124)
p = np.random.rand(2, 2, 2, 2, 2, 2)
p /= p.sum()
aps = [np.eye(2) + .2 * (np.random.rand(2, 2) - 1) for _ in range(p.ndim)]
for ap in aps:
ap.flat[ap.flat < 0] = 0.
ap /= ap.sum()
assert np.alltrue(ap >= 0)
assert np.alltrue(p >= 0)
p_corrupted = corrupt_bitstring_probs(p, aps)
p_restored = correct_bitstring_probs(p_corrupted, aps)
assert np.allclose(p, p_restored)
results = [[0, 0, 0]] * 100 + [[0, 1, 1]] * 200
p1 = estimate_bitstring_probs(results)
assert np.isclose(p1[0, 0, 0], 1. / 3.)
assert np.isclose(p1[0, 1, 1], 2. / 3.)
assert np.isclose(p1.sum(), 1.)
zm = bitstring_probs_to_z_moments(p1)
assert np.isclose(zm[0, 0, 0], 1)
assert np.isclose(zm[1, 0, 0], 1)
assert np.isclose(zm[0, 1, 0], -1. / 3)
assert np.isclose(zm[0, 0, 1], -1. / 3)
assert np.isclose(zm[0, 1, 1], 1.)
assert np.isclose(zm[1, 1, 0], -1. / 3)
assert np.isclose(zm[1, 0, 1], -1. / 3)
assert np.isclose(zm[1, 1, 1], 1.)
def test_estimate_assignment_probs():
cxn = Mock(spec=QPUConnection)
trials = 100
p00 = .8
p11 = .75
cxn.run.side_effect = [
[[0]] * int(round(p00 * trials)) + [[1]] * int(round((1 - p00) * trials)),
[[1]] * int(round(p11 * trials)) + [[0]] * int(round((1 - p11) * trials))
]
ap_target = np.array([[p00, 1 - p11],
[1 - p00, p11]])
povm_pragma = Pragma("READOUT-POVM", (0, "({} {} {} {})".format(*ap_target.flatten())))
ap = estimate_assignment_probs(0, trials, cxn, Program(povm_pragma))
assert np.allclose(ap, ap_target)
for call in cxn.run.call_args_list:
args, kwargs = call
prog = args[0]
assert prog._instructions[0] == povm_pragma
def test_apply_noise_model():
p = Program(RX(np.pi / 2)(0), RX(np.pi / 2)(1), CZ(0, 1), RX(np.pi / 2)(1))
noise_model = _decoherence_noise_model(_get_program_gates(p))
pnoisy = apply_noise_model(p, noise_model)
for i in pnoisy:
if isinstance(i, DefGate):
pass
elif isinstance(i, Pragma):
assert i.command in ['ADD-KRAUS', 'READOUT-POVM']
elif isinstance(i, Gate):
assert i.name in NO_NOISE or not i.params
def test_apply_noise_model_perturbed_angles():
eps = 1e-15
p = Program(RX(np.pi / 2 + eps)(0), RX(np.pi / 2 - eps)(1), CZ(0, 1), RX(np.pi / 2 + eps)(1))
noise_model = _decoherence_noise_model(_get_program_gates(p))
pnoisy = apply_noise_model(p, noise_model)
for i in pnoisy:
if isinstance(i, DefGate):
pass
elif isinstance(i, Pragma):
assert i.command in ['ADD-KRAUS', 'READOUT-POVM']
elif isinstance(i, Gate):
assert i.name in NO_NOISE or not i.params
| 36.097778
| 98
| 0.610933
|
c36f0a4973cb4b962ff502526a8ca99ae068ddf6
| 1,632
|
py
|
Python
|
translate-google.py
|
sgbalogh/nlu-winograd
|
5c067ae9b03dffddfb5627d4764e771f7f2470d2
|
[
"MIT"
] | 1
|
2018-12-22T08:34:02.000Z
|
2018-12-22T08:34:02.000Z
|
translate-google.py
|
sgbalogh/nlu-winograd
|
5c067ae9b03dffddfb5627d4764e771f7f2470d2
|
[
"MIT"
] | null | null | null |
translate-google.py
|
sgbalogh/nlu-winograd
|
5c067ae9b03dffddfb5627d4764e771f7f2470d2
|
[
"MIT"
] | 2
|
2018-04-18T04:13:43.000Z
|
2020-01-30T23:33:15.000Z
|
import json
import requests
def translate(api_key, text, source, target):
api_endpoint = "https://translation.googleapis.com/language/translate/v2?key=" + api_key
headers = {'Content-Type': 'application/json'}
query = {
'q': text,
'source': source,
'target': target,
'format': 'text'
}
response = requests.post(api_endpoint, headers=headers, json=query)
return response.json()
def paraphrase(api_key, text, via_language):
translated_result = translate(api_key, text, "en", via_language)
paraphrase = translate(api_key, translated_result['data']['translations'][0]['translatedText'], via_language, "en")
return paraphrase['data']['translations'][0]['translatedText']
def create_paraphrases(api_key, path_to_input_text, path_to_output_text):
f_out = open(path_to_output_text, "w")
f = open(path_to_input_text, "r")
for line in f:
if line == "\n":
continue
else:
wino_id = line.strip()
premise = f.readline().strip()
hypothesis = f.readline().strip()
label = f.readline().strip()
out = generate_paraphrase_output(wino_id, premise, hypothesis, label)
f_out.write(out)
f_out.close()
def generate_paraphrase_output(api_key, id,premise,hypothesis,label):
lines = []
languages = ['es','zh','hu','fr', 'eu', 'ja']
for language in languages:
lines.append(id)
lines.append(premise)
lines.append(paraphrase(api_key, hypothesis, language))
lines.append(label)
lines.append("")
return "\n".join(lines)
| 35.478261
| 119
| 0.635417
|
704138347a03ef22919264aafcc5a464eaab1963
| 871
|
py
|
Python
|
tests/common/datatypes.py
|
ssato/ansibl-lint-custom-rules
|
bdef2bab13bdb08509aed45a274f9f1a681542ec
|
[
"MIT"
] | 7
|
2020-05-26T16:01:19.000Z
|
2022-02-11T15:12:39.000Z
|
tests/common/datatypes.py
|
ansible-middleware/ansible-lint-custom-rules
|
181ec6b8e562fc136caf6fe67a488c9e996686a3
|
[
"MIT"
] | 6
|
2021-05-02T15:28:38.000Z
|
2022-02-26T15:22:54.000Z
|
tests/common/datatypes.py
|
ansible-middleware/ansible-lint-custom-rules
|
181ec6b8e562fc136caf6fe67a488c9e996686a3
|
[
"MIT"
] | 3
|
2021-05-11T03:13:20.000Z
|
2022-02-04T08:44:51.000Z
|
# Copyright (C) 2020, 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
# pylint: disable=inherit-non-class
"""Common utility functios and classes - datatypes.
"""
import pathlib
import typing
class SubCtx(typing.NamedTuple):
"""A namedtuple object to keep sub context info, conf and env.
"""
conf: typing.Dict[str, typing.Any]
env: typing.Dict[str, str]
os_env: typing.Dict[str, str]
class Context(typing.NamedTuple):
"""A namedtuple object to keep context info.
"""
workdir: pathlib.Path
lintables: typing.List[typing.Any] # TBD
conf: typing.Dict[str, typing.Any]
env: typing.Dict[str, str]
os_env: typing.Dict[str, str]
class Result(typing.NamedTuple):
"""A namedtuple object to keep lint result and context info.
"""
result: typing.Any
ctx: Context
# vim:sw=4:ts=4:et:
| 24.194444
| 66
| 0.681975
|
ddef13a94367bc32ea7f52dba0b0bcf748f88186
| 3,135
|
py
|
Python
|
structure/bridge.py
|
GustavoBoaz/design_patterns
|
b46c6dd6e355fce8f769b76c432ac8a00f236438
|
[
"MIT"
] | null | null | null |
structure/bridge.py
|
GustavoBoaz/design_patterns
|
b46c6dd6e355fce8f769b76c432ac8a00f236438
|
[
"MIT"
] | null | null | null |
structure/bridge.py
|
GustavoBoaz/design_patterns
|
b46c6dd6e355fce8f769b76c432ac8a00f236438
|
[
"MIT"
] | null | null | null |
"""
Bridge é um padrão de design estrutural que permite dividir uma classe grande
ou um conjunto de classes estreitamente relacionadas em duas hierarquias
separadas - abstração e implementação - que podem ser desenvolvidas
independentemente uma da outra.
COMO IMPLEMENTAR:
1. Identifique as dimensões ortogonais em suas aulas. Esses conceitos
independentes podem ser: abstração / plataforma, domínio / infraestrutura,
front-end / back-end ou interface / implementação.
2. Veja quais operações o cliente precisa e defina-as na classe de abstração
básica.
3. Determine as operações disponíveis em todas as plataformas. Declare os que
a abstração precisa na interface de implementação geral.
4. Para todas as plataformas do seu domínio, crie classes de implementação
concretas, mas verifique se todas elas seguem a interface de implementação.
5. Dentro da classe de abstração, adicione um campo de referência para o tipo
de implementação. A abstração delega a maior parte do trabalho ao objeto de
implementação mencionado nesse campo.
6. Se você tiver várias variantes da lógica de alto nível, crie abstrações
refinadas para cada variante estendendo a classe de abstração básica.
7. O código do cliente deve passar um objeto de implementação ao construtor da
abstração para associar um ao outro. Depois disso, o cliente pode esquecer a
implementação e trabalhar apenas com o objeto de abstração.
"""
from abc import ABC, abstractmethod
#======================================Definição de classes abstratas (implementaçao)
class Implementation(ABC):
@abstractmethod
def method1(self) -> None:
pass
@abstractmethod
def method2(self) -> None:
pass
@abstractmethod
def method3(self) -> None:
pass
#==========================================================Definição da implementaçao
class ConcreteImplementation(Implementation):
def method1(self):
print("Executado Metodo 1")
def method2(self):
print("Executado Metodo 2")
def method3(self):
print("Executado Metodo 3")
#===============================================================Definição da Abstração
class Abstraction():
def __init__(self, implements: Implementation):
self._implements = implements
def feature1(self) -> None:
self._implements.method1()
def feature2(self) -> None:
self._implements.method3()
self._implements.method2()
#=================================================================Definição do cliente
def main_b():
while True:
try:
option = int(input("Caracteristica [1][2] | Exit[0]: "))
if(option == 1):
Abstraction(ConcreteImplementation()).feature1()
continue
elif(option == 2):
Abstraction(ConcreteImplementation()).feature2()
continue
elif(option == 0):
break
except:
print("Option false")
continue
| 36.034483
| 86
| 0.622648
|
135ac1b73f078b8a6c74184606b375879699d9ef
| 2,284
|
py
|
Python
|
nand_io/__main__.py
|
Noltari/nand-io
|
23442ed57950360cd4104d77959bd3001170b333
|
[
"MIT"
] | null | null | null |
nand_io/__main__.py
|
Noltari/nand-io
|
23442ed57950360cd4104d77959bd3001170b333
|
[
"MIT"
] | null | null | null |
nand_io/__main__.py
|
Noltari/nand-io
|
23442ed57950360cd4104d77959bd3001170b333
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
"""NAND IO main."""
import argparse
from .common import auto_int
from .const import SERIAL_DEF_SPEED
from .interface import NandIO
from .logger import INFO
def main():
"""NAND IO."""
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--bootloader",
dest="bootloader",
action="store_true",
help="Force device bootloader",
)
parser.add_argument(
"--pull-up",
dest="pull_up",
action="store_true",
help="Enable pull-up resistors",
)
parser.add_argument(
"--read",
dest="nand_read",
action="store",
type=str,
help="NAND read",
)
parser.add_argument(
"--restart",
dest="restart",
action="store_true",
help="Force device restart",
)
parser.add_argument(
"--serial-device",
dest="serial_device",
action="store",
type=str,
help="Serial device",
)
parser.add_argument(
"--serial-speed",
dest="serial_speed",
action="store",
type=auto_int,
help="Serial speed",
)
parser.add_argument(
"--write",
dest="nand_write",
action="store",
type=str,
help="NAND write",
)
args = parser.parse_args()
if not args.serial_device:
parser.print_help()
return
if not args.pull_up:
args.pull_up = False
if not args.serial_speed:
args.serial_speed = SERIAL_DEF_SPEED
nand = NandIO(
logger_level=INFO,
pull_up=args.pull_up,
serial_device=args.serial_device,
serial_speed=args.serial_speed,
)
if nand:
if nand.open():
if nand.ping():
if args.bootloader:
nand.bootloader()
elif args.restart:
nand.restart()
elif args.nand_read:
nand.show_info()
nand.read(file=args.nand_read)
elif args.nand_write:
nand.show_info()
nand.write(file=args.nand_write)
else:
nand.show_info()
nand.close()
main()
| 21.961538
| 52
| 0.528021
|
037fc222c9c75e55678bcdeca335ee450699b1be
| 2,978
|
py
|
Python
|
mips_revisit/sync.py
|
vlad17/mips_revisit
|
0777b37d9727d785487379e5b1f6dcd146d8b17c
|
[
"Apache-2.0"
] | null | null | null |
mips_revisit/sync.py
|
vlad17/mips_revisit
|
0777b37d9727d785487379e5b1f6dcd146d8b17c
|
[
"Apache-2.0"
] | null | null | null |
mips_revisit/sync.py
|
vlad17/mips_revisit
|
0777b37d9727d785487379e5b1f6dcd146d8b17c
|
[
"Apache-2.0"
] | null | null | null |
"""
Directory syncer, taken from track, which took it from ray.
https://github.com/richardliaw/track
"""
import distutils.spawn
import shutil
import subprocess
import time
from urllib.parse import urlparse
import hashlib
from . import log
from .utils import timeit
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
S3_PREFIX = "s3://"
GCS_PREFIX = "gs://"
ALLOWED_REMOTE_PREFIXES = (S3_PREFIX, GCS_PREFIX)
def simplehash(s):
return hashlib.md5(s.encode('utf-8')).hexdigest()
def _check_remote(remote_dir):
if not any(
remote_dir.startswith(prefix) for prefix in ALLOWED_REMOTE_PREFIXES
):
return False
if remote_dir.startswith(
S3_PREFIX
) and not distutils.spawn.find_executable("aws"):
raise TrackError(
"Upload uri starting with '{}' requires awscli tool"
" to be installed".format(S3_PREFIX)
)
elif remote_dir.startswith(
GCS_PREFIX
) and not distutils.spawn.find_executable("gsutil"):
raise TrackError("Upload uri starting with '{}' requires gsutil tool")
return True
def sync(src, dst, *args):
with timeit() as t:
_sync(src, dst, args)
log.debug("sync from {} to {} in {:.2f} sec", src, dst, t.seconds)
def _sync(src, dst, args):
if _check_remote(dst):
remote_dir = dst
elif _check_remote(src):
remote_dir = src
else:
shutil.copy(src, dst)
return
local_to_remote_sync_cmd = None
if remote_dir.startswith(S3_PREFIX):
local_to_remote_sync_cmd = "aws s3 sync {} {} {}".format(
quote(src), quote(dst), ' '.join(map(quote, args))
)
elif remote_dir.startswith(GCS_PREFIX):
local_to_remote_sync_cmd = "gsutil rsync -r {} {} {}".format(
quote(src), quote(dst), ' '.join(map(quote, args))
)
if local_to_remote_sync_cmd:
final_cmd = local_to_remote_sync_cmd
sync_process = subprocess.Popen(final_cmd, shell=True)
ret = sync_process.wait() # fail gracefully
if ret != 0:
log.info(
"sync from {} to {} failed with return code {}", src, dst, ret
)
def exists(remote):
if not _check_remote(remote):
return os.path.exists(f)
if remote.startswith(S3_PREFIX):
from boto3 import client
c = client("s3")
parsed = urlparse(remote, allow_fragments=False)
bucket = parsed.netloc
path = parsed.path
while path.startswith("/"):
path = path[1:]
response = c.list_objects_v2(
Bucket=bucket, Prefix=path, Delimiter="/", MaxKeys=1
)
for obj in response.get("Contents", []):
if obj["Key"] == path:
return True
return False
if remote.startswith(GCS_PREFIX):
import tensorflow as tf
return tf.gfile.Exists(f)
raise ValueError("unhandled file type")
| 27.072727
| 78
| 0.619208
|
2191bfff36fb8bf97a5ce64f3687722cd59d0947
| 1,891
|
py
|
Python
|
packages/micropython-official/v1.11/esp8266/stubs/ssd1306.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 18
|
2019-07-11T13:31:09.000Z
|
2022-01-27T06:38:40.000Z
|
packages/micropython-official/v1.11/esp8266/stubs/ssd1306.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 9
|
2019-09-01T21:44:49.000Z
|
2022-02-04T20:55:08.000Z
|
packages/micropython-official/v1.11/esp8266/stubs/ssd1306.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 6
|
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'ssd1306' on esp8266 v1.11
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.11-8-g48dcbbe60 on 2019-05-29', machine='ESP module with ESP8266')
# Stubber: 1.2.0
SET_CHARGE_PUMP = 141
SET_COL_ADDR = 33
SET_COM_OUT_DIR = 192
SET_COM_PIN_CFG = 218
SET_CONTRAST = 129
SET_DISP = 174
SET_DISP_CLK_DIV = 213
SET_DISP_OFFSET = 211
SET_DISP_START_LINE = 64
SET_ENTIRE_ON = 164
SET_MEM_ADDR = 32
SET_MUX_RATIO = 168
SET_NORM_INV = 166
SET_PAGE_ADDR = 34
SET_PRECHARGE = 217
SET_SEG_REMAP = 160
SET_VCOM_DESEL = 219
class SSD1306:
''
def blit():
pass
def contrast():
pass
def fill():
pass
def fill_rect():
pass
def hline():
pass
def init_display():
pass
def invert():
pass
def line():
pass
def pixel():
pass
def poweroff():
pass
def poweron():
pass
def rect():
pass
def scroll():
pass
def show():
pass
def text():
pass
def vline():
pass
class SSD1306_I2C:
''
def blit():
pass
def contrast():
pass
def fill():
pass
def fill_rect():
pass
def hline():
pass
def init_display():
pass
def invert():
pass
def line():
pass
def pixel():
pass
def poweroff():
pass
def poweron():
pass
def rect():
pass
def scroll():
pass
def show():
pass
def text():
pass
def vline():
pass
def write_cmd():
pass
def write_data():
pass
class SSD1306_SPI:
''
def blit():
pass
def contrast():
pass
def fill():
pass
def fill_rect():
pass
def hline():
pass
| 12.691275
| 155
| 0.518244
|
89bf058e812e95129d664ccceb0964df0b176be3
| 7,204
|
py
|
Python
|
client/python/easemlclient/easemlclient/model/dataset.py
|
xzyaoi/easeml
|
50e028c278013bf35f6682b2f97aa5cdc81382e2
|
[
"MIT"
] | null | null | null |
client/python/easemlclient/easemlclient/model/dataset.py
|
xzyaoi/easeml
|
50e028c278013bf35f6682b2f97aa5cdc81382e2
|
[
"MIT"
] | null | null | null |
client/python/easemlclient/easemlclient/model/dataset.py
|
xzyaoi/easeml
|
50e028c278013bf35f6682b2f97aa5cdc81382e2
|
[
"MIT"
] | null | null | null |
"""
Implementation of the `Dataset` class.
"""
import pyrfc3339
from copy import deepcopy
from datetime import datetime
from enum import Enum
from io import FileIO
from tusclient import client as tus_client
from typing import Dict, Optional, Any, Iterator, Tuple, List, IO
from .core import Connection
from .process import Process
from .user import User
from .type import ApiType, ApiQuery, ApiQueryOrder
class DatasetSource(Enum):
UPLOAD = "upload"
LOCAL = "local"
DOWNLOAD = "download"
class DatasetStatus(Enum):
CREATED = "created"
TRANSFERRED = "transferred"
UNPACKED = "unpacked"
VALIDATED = "validated"
ARCHIVED = "archived"
ERROR = "error"
class Dataset(ApiType['Dataset']):
"""The Dataset class contains information about datasets.
...
Attributes:
-----------
identifier: str
A unique identifier of the user (i.e. the username).
name: str
The full name of the user.
status: str
The current status of the user. Can be 'active' or 'archived'.
"""
def __init__(self, input: Dict[str, Any]) -> None:
if "id" not in input:
raise ValueError("Invalid input dictionary: It must contain an 'id' key.")
super().__init__(input)
@classmethod
def create(cls, id: str, source: Optional[DatasetSource] = None, source_address: Optional[str] = None,
name: Optional[str] = None, description: Optional[str] = None) -> 'Dataset':
init_dict: Dict[str, Any] = {"id": id}
if source is not None:
init_dict["source"] = source.value
if source_address is not None:
init_dict["source-address"] = source_address
if name is not None:
init_dict["name"] = name
if description is not None:
init_dict["description"] = description
return Dataset(init_dict)
@classmethod
def create_ref(cls, id: str) -> 'Dataset':
return Dataset({"id": id})
@property
def id(self) -> str:
return self._dict["id"]
@property
def user(self) -> Optional[User]:
value = self._dict.get("user")
return User({"id": value}) if value is not None else None
@property
def name(self) -> Optional[str]:
value = self._updates.get("name") or self._dict.get("name")
return str(value) if value is not None else None
@name.setter
def name(self, value: Optional[str] = None) -> None:
if value is not None:
self._updates["name"] = value
else:
self._updates.pop("name")
@property
def description(self) -> Optional[str]:
value = self._updates.get("description") or self._dict.get("description")
return str(value) if value is not None else None
@description.setter
def description(self, value: Optional[str] = None) -> None:
if value is not None:
self._updates["description"] = value
else:
self._updates.pop("description")
@property
def schema_in(self) -> Optional[str]:
value = self._dict.get("schema-in")
return str(value) if value is not None else None
@property
def schema_out(self) -> Optional[str]:
value = self._dict.get("schema-out")
return str(value) if value is not None else None
@property
def source(self) -> Optional[DatasetSource]:
value = self._dict.get("source")
return DatasetSource(value) if value is not None else None
@property
def source_address(self) -> Optional[str]:
value = self._dict.get("source-address")
return str(value) if value is not None else None
@property
def creation_time(self) -> Optional[datetime]:
value = self._dict.get("creation-time")
return pyrfc3339.parse(value) if value is not None else None
@property
def status(self) -> Optional[DatasetStatus]:
value = self._updates.get("status") or self._dict.get("status")
return DatasetStatus(value) if value is not None else None
@status.setter
def status(self, value: Optional[DatasetStatus] = None) -> None:
if value is not None:
self._updates["status"] = value.value
else:
self._updates.pop("status")
@property
def status_message(self) -> Optional[str]:
value = self._dict.get("status-message")
return str(value) if value is not None else None
@property
def process(self) -> Optional[Process]:
value = self._dict.get("process")
return Process({"id": value}) if value is not None else None
def __iter__(self) -> Iterator[Tuple[str, Any]]:
for (k, v) in self._dict:
yield (k, v)
def post(self, connection: Connection) -> 'Dataset':
url = connection.url("datasets")
return self._post(connection, url)
def patch(self, connection: Connection) -> 'Dataset':
url = connection.url("datasets/" + self.id)
return self._patch(connection, url)
def get(self, connection: Connection) -> 'Dataset':
url = connection.url("datasets/" + self.id)
return self._get(connection, url)
def upload(self, connection: Connection, data: IO, file_name: Optional[str] = None) -> None:
url = connection.url("datasets/%s/upload" % self.id)
metadata = {"filename" : file_name} if file_name is not None else None
# Initialize the client for the TUS upload protocol. Apply the authentication header.
client = tus_client.TusClient(url)
connection.auth(client)
uploader = client.uploader(file_stream=data, chunk_size=201800, metadata=metadata)
uploader.upload()
class DatasetQuery(ApiQuery['Dataset', 'DatasetQuery']):
VALID_SORTING_FIELDS = ["id", "user", "source", "source-address", "creation-time", "status"]
def __init__(self, id: Optional[List[str]] = None, user: Optional[User] = None,
status: Optional[DatasetStatus] = None, source: Optional[DatasetSource] = None,
source_address: Optional[str] = None,
schema_in: Optional[str] = None, schema_out: Optional[str] = None,
order_by: Optional[str] = None, order: Optional[ApiQueryOrder] = None,
limit: Optional[int] = None, cursor: Optional[str] = None) -> None:
super().__init__(order_by, order, limit, cursor)
self.T = Dataset
if id is not None:
self._query["id"] = id
if user is not None:
self._query["user"] = user.id
if status is not None:
self._query["status"] = status.value
if source is not None:
self._query["source"] = source.value
if source_address is not None:
self._query["source-address"] = source_address
if schema_in is not None:
self._query["schema-in"] = schema_in
if schema_out is not None:
self._query["schema-out"] = schema_out
def run(self, connection: Connection) -> Tuple[List[Dataset], Optional['DatasetQuery']]:
url = connection.url("datasets")
return self._run(connection, url)
| 34.14218
| 106
| 0.620905
|
5ef2a5ca2d7eb71d4e37a03b5b752b5870570d61
| 1,769
|
py
|
Python
|
test/functional/sapling_malleable_sigs.py
|
HunterCanimun/surgeofficial-surge-coin
|
663dc25517e9045a65a9b1e0993bbaa06d564284
|
[
"MIT"
] | null | null | null |
test/functional/sapling_malleable_sigs.py
|
HunterCanimun/surgeofficial-surge-coin
|
663dc25517e9045a65a9b1e0993bbaa06d564284
|
[
"MIT"
] | null | null | null |
test/functional/sapling_malleable_sigs.py
|
HunterCanimun/surgeofficial-surge-coin
|
663dc25517e9045a65a9b1e0993bbaa06d564284
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Copyright (c) 2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import SurgeTestFramework
from test_framework.messages import (
CTransaction,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
)
from decimal import Decimal
from io import BytesIO
class MalleableSigsTest(SurgeTestFramework):
def set_test_params(self):
self.num_nodes = 1
saplingUpgrade = ['-nuparams=v5_shield:201']
self.extra_args = [saplingUpgrade]
def run_test(self):
node = self.nodes[0]
node.generate(2)
assert_equal(node.getblockcount(), 202)
z_addr = node.getnewshieldaddress()
shield_to = [{"address": z_addr, "amount": Decimal('10')}]
# Create rawtx shielding 10 SRG
self.log.info("Shielding 10 SRG...")
rawtx_hex = node.rawshieldsendmany("from_transparent", shield_to)
self.log.info("Raw tx created")
# Creating malleated tx
self.log.info("Removing sapling data...")
new_tx = CTransaction()
new_tx.deserialize(BytesIO(hex_str_to_bytes(rawtx_hex)))
new_tx.sapData = b""
new_rawtx = bytes_to_hex_str(new_tx.serialize())
self.log.info("Sending malleated tx...")
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed",
node.sendrawtransaction, new_rawtx, True)
self.log.info("Good. Tx NOT accepted in mempool")
if __name__ == '__main__':
MalleableSigsTest().main()
| 31.035088
| 75
| 0.677784
|
d42b6d9a88c3a65dd0d718006a37918cae13dba9
| 1,747
|
py
|
Python
|
misc/python/materialize/checks/having.py
|
guswynn/materialize
|
f433173ed71f511d91311769ec58c2d427dd6c3b
|
[
"MIT"
] | null | null | null |
misc/python/materialize/checks/having.py
|
guswynn/materialize
|
f433173ed71f511d91311769ec58c2d427dd6c3b
|
[
"MIT"
] | 157
|
2021-12-28T19:17:45.000Z
|
2022-03-31T17:44:27.000Z
|
misc/python/materialize/checks/having.py
|
guswynn/materialize
|
f433173ed71f511d91311769ec58c2d427dd6c3b
|
[
"MIT"
] | null | null | null |
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from textwrap import dedent
from typing import List
from materialize.checks.actions import Testdrive
from materialize.checks.checks import Check
class Having(Check):
def initialize(self) -> Testdrive:
return Testdrive(
dedent(
"""
> CREATE TABLE having_table (f1 INTEGER, f2 INTEGER);
> INSERT INTO having_table VALUES (1, 1);
"""
)
)
def manipulate(self) -> List[Testdrive]:
return [
Testdrive(dedent(s))
for s in [
"""
> CREATE MATERIALIZED VIEW having_view1 AS
SELECT f1, SUM(f1) FROM having_table GROUP BY f1 HAVING SUM(f1) > 1 AND SUM(f1) < 3;
> INSERT INTO having_table VALUES (2, 2);
""",
"""
> CREATE MATERIALIZED VIEW having_view2 AS
SELECT f1, SUM(f1) FROM having_table GROUP BY f1 HAVING SUM(f1) > 1 AND SUM(f1) < 3;
> INSERT INTO having_table VALUES (3, 3);
""",
]
]
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
> SELECT * FROM having_view1;
2 2
> SELECT * FROM having_view2;
2 2
"""
)
)
| 31.763636
| 102
| 0.544362
|
a136d7364bcd580e9cf3cfd3375a5be3ee06e662
| 63,303
|
py
|
Python
|
odps/df/backends/pd/compiler.py
|
alvinyeats/aliyun-odps-python-sdk
|
ae65e3f5efddb2e5fa85291844f2cc284c8b530a
|
[
"Apache-2.0"
] | null | null | null |
odps/df/backends/pd/compiler.py
|
alvinyeats/aliyun-odps-python-sdk
|
ae65e3f5efddb2e5fa85291844f2cc284c8b530a
|
[
"Apache-2.0"
] | null | null | null |
odps/df/backends/pd/compiler.py
|
alvinyeats/aliyun-odps-python-sdk
|
ae65e3f5efddb2e5fa85291844f2cc284c8b530a
|
[
"Apache-2.0"
] | 1
|
2017-06-27T08:18:29.000Z
|
2017-06-27T08:18:29.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import re
import time
import uuid
from datetime import datetime
from ..core import Backend
from ...expr.expressions import *
from ...expr.arithmetic import Power
from ...expr.reduction import GroupedSequenceReduction, GroupedCount, Count, \
GroupedCat, Cat, NUnique, GroupedNUnique, ToList, GroupedToList, Quantile, \
GroupedQuantile
from ...expr.merge import JoinCollectionExpr
from ...expr.datetimes import DTScalar
from ...expr.collections import PivotCollectionExpr
from ...expr import arithmetic, element, composites
from ...utils import traverse_until_source
from ....dag import DAG
from ..errors import CompileError
from ..utils import refresh_dynamic
from . import types
from ... import types as df_types
from ....models import FileResource, TableResource, Schema
from .... import compat
from ....lib.xnamedtuple import xnamedtuple
from ....compat import lzip
try:
import numpy as np
import pandas as pd
except ImportError:
pd = None
np = None
BINARY_OP_TO_PANDAS = {
'Add': operator.add,
'Substract': operator.sub,
'Multiply': operator.mul,
'Divide': operator.div if six.PY2 else operator.truediv,
'Mod': operator.mod,
'FloorDivide': operator.floordiv,
'Power': operator.pow,
'Greater': operator.gt,
'GreaterEqual': operator.ge,
'Less': operator.lt,
'LessEqual': operator.le,
'Equal': operator.eq,
'NotEqual': operator.ne,
'And': operator.and_,
'Or': operator.or_
}
UNARY_OP_TO_PANDAS = {
'Negate': operator.neg,
'Invert': operator.inv,
'Abs': operator.abs
}
if pd:
SORT_CUM_WINDOW_OP_TO_PANDAS = {
'CumSum': lambda s: s.expanding(min_periods=1).sum(),
'CumMean': lambda s: s.expanding(min_periods=1).mean(),
'CumMedian': lambda s: s.expanding(min_periods=1).median(),
'CumStd': lambda s: s.expanding(min_periods=1).std(),
'CumMin': lambda s: s.expanding(min_periods=1).min(),
'CumMax': lambda s: s.expanding(min_periods=1).max(),
'CumCount': lambda s: s.expanding(min_periods=1).count(),
}
if np:
CUM_WINDOW_OP_TO_PANDAS = {
'CumSum': np.sum,
'CumMean': np.mean,
'CumMedian': np.median,
'CumStd': np.std,
'CumMin': np.min,
'CumMax': np.max,
'CumCount': lambda x: len(x),
}
JOIN_DICT = {
'INNER': 'inner',
'LEFT OUTER': 'left',
'RIGHT OUTER': 'right',
'FULL OUTER': 'outer'
}
def _explode(obj):
if obj and isinstance(obj, tuple):
obj = obj[0]
if obj is None:
return
if isinstance(obj, dict):
for k, v in six.iteritems(obj):
yield k, v
else:
for v in obj:
yield v
def _pos_explode(obj):
if obj and isinstance(obj, tuple):
obj = obj[0]
if obj is None:
return
for idx, v in enumerate(obj):
yield idx, v
def _filter_none(col):
import numpy as np
if hasattr(col, 'dropna'):
col = col.dropna()
else:
try:
col = col[~np.isnan(col)]
except TypeError:
col = col[np.fromiter((v is not None for v in col), np.bool_)]
return col
BUILTIN_FUNCS = {
'EXPLODE': _explode,
'POSEXPLODE': _pos_explode,
}
class PandasCompiler(Backend):
"""
PandasCompiler will compile an Expr into a DAG
in which each node is a pair of <expr, function>.
"""
def __init__(self, expr_dag):
self._dag = DAG()
self._expr_to_dag_node = dict()
self._expr_dag = expr_dag
self._callbacks = list()
def compile(self, expr):
try:
return self._compile(expr)
finally:
self._cleanup()
def _cleanup(self):
for callback in self._callbacks:
callback()
self._callbacks = list()
def _compile(self, expr, traversed=None):
if traversed is None:
traversed = set()
root = self._retrieve_until_find_root(expr)
if root is not None and id(root) not in traversed:
self._compile_join_node(root, traversed)
traversed.add(id(root))
for node in traverse_until_source(expr):
if id(node) not in traversed:
node.accept(self)
traversed.add(id(node))
return self._dag
def _compile_join_node(self, expr, traversed):
nodes = []
self._compile(expr._lhs, traversed)
nodes.append(expr._lhs)
self._compile(expr._rhs, traversed)
nodes.append(expr._rhs)
for node in expr._predicate:
nodes.append(node._lhs)
self._compile(node._lhs, traversed)
nodes.append(node._rhs)
self._compile(node._rhs, traversed)
expr.accept(self)
for node in nodes:
self._dag.add_edge(self._expr_to_dag_node[node], self._expr_to_dag_node[expr])
cached_args = expr.args
def cb():
for arg_name, arg in zip(expr._args, cached_args):
setattr(expr, arg_name, arg)
self._callbacks.append(cb)
for arg_name in expr._args:
setattr(expr, arg_name, None)
@classmethod
def _retrieve_until_find_root(cls, expr):
for node in traverse_until_source(expr, top_down=True, unique=True):
if isinstance(node, JoinCollectionExpr):
return node
def _add_node(self, expr, handle):
children = expr.children()
node = (expr, handle)
self._dag.add_node(node)
self._expr_to_dag_node[expr] = node
# the dependencies do not exist in self._expr_to_dag_node
predecessors = [self._expr_to_dag_node[child] for child in children
if child in self._expr_to_dag_node]
[self._dag.add_edge(p, node) for p in predecessors]
def visit_source_collection(self, expr):
df = next(expr.data_source())
if not isinstance(df, pd.DataFrame):
raise ValueError('Expr data must be a pandas DataFrame.')
# make a copy to avoid modify
handle = lambda _: df.rename(columns=dict(zip(df.columns, expr.schema.names)))
self._add_node(expr, handle)
@classmethod
def _get_children_vals(cls, kw, expr=None, children=None):
children = children or expr.children()
return [kw.get(child) for child in children]
@classmethod
def _merge_values(cls, exprs, kw):
fields = [kw.get(expr) for expr in exprs]
size = max(len(f) for f, e in zip(fields, exprs) if isinstance(e, SequenceExpr))
fields = [pd.Series([f] * size) if isinstance(e, Scalar) else f
for f, e in zip(fields, exprs)]
return pd.concat(fields, axis=1, keys=[e.name for e in exprs])
def visit_project_collection(self, expr):
def handle(kw):
children = expr.children()
fields = self._get_children_vals(kw, children=children)[1:]
names = expr.schema.names
if isinstance(expr, Summary):
size = 1
else:
size = max(len(f) for f, e in zip(fields, expr._fields)
if isinstance(e, SequenceExpr))
for i in range(len(fields)):
if not isinstance(fields[i], pd.Series):
fields[i] = pd.Series([fields[i]] * size)
return pd.concat(fields, axis=1, keys=names)
self._add_node(expr, handle)
def visit_filter_partition_collection(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
df, predicate = children_vals[0:1]
return df[predicate][expr.schema.names]
self._add_node(expr, handle)
def visit_filter_collection(self, expr):
def handle(kw):
df, predicate = tuple(self._get_children_vals(kw, expr))
return df[predicate]
self._add_node(expr, handle)
def visit_slice_collection(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
df = children_vals[0]
start, end, step = expr.start, expr.stop, expr.step
return df[start: end: step]
self._add_node(expr, handle)
def visit_element_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
input, args = children_vals[0], children_vals[1:]
if isinstance(expr.input, Scalar):
input = pd.Series([input])
def run():
if isinstance(expr, element.IsNull):
return input.isnull()
elif isinstance(expr, element.NotNull):
return input.notnull()
elif isinstance(expr, element.FillNa):
return input.fillna(args[0])
elif isinstance(expr, element.IsIn):
if isinstance(expr._values[0], SequenceExpr):
return input.isin(list(args[0]))
else:
return input.isin(args)
elif isinstance(expr, element.NotIn):
if isinstance(expr._values[0], SequenceExpr):
return ~input.isin(list(args[0]))
else:
return ~input.isin(args)
elif isinstance(expr, element.IfElse):
return pd.Series(np.where(input, args[0], args[1]), name=expr.name, index=input.index)
elif isinstance(expr, element.Switch):
case = None if expr.case is None else kw.get(expr.case)
default = None if expr.default is None else kw.get(expr.default)
conditions = [kw.get(it) for it in expr.conditions]
thens = [kw.get(it) for it in expr.thens]
if case is not None:
conditions = [case == condition for condition in conditions]
condition_exprs = [expr.case == cond for cond in expr.conditions]
else:
condition_exprs = expr.conditions
size = max(len(val) for e, val in zip(condition_exprs + expr.thens, conditions + thens)
if isinstance(e, SequenceExpr))
curr = pd.Series([None] * size)
for condition, then in zip(conditions, thens):
curr = curr.where(-condition, then)
if default is not None:
return curr.fillna(default)
return curr
elif isinstance(expr, element.Between):
return input.between(*args)
elif isinstance(expr, element.Cut):
bins = [bin.value for bin in expr.bins]
if expr.include_under:
bins.insert(0, -float('inf'))
if expr.include_over:
bins.append(float('inf'))
labels = [l.value for l in expr.labels]
return pd.cut(input, bins, right=expr.right, labels=labels,
include_lowest=expr.include_lowest)
if isinstance(expr.input, Scalar):
return run()[0]
else:
return run()
self._add_node(expr, handle)
def visit_binary_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
if expr.lhs.dtype == df_types.datetime and expr.rhs.dtype == df_types.datetime:
return ((pd.to_datetime(children_vals[0]) - pd.to_datetime(children_vals[1])) /
np.timedelta64(1, 'ms')).astype(np.int64)
op = BINARY_OP_TO_PANDAS[expr.node_name]
if isinstance(expr, Power) and isinstance(expr.dtype, df_types.Integer):
return op(*children_vals).astype(types.df_type_to_np_type(expr.dtype))
return op(*children_vals)
self._add_node(expr, handle)
def visit_unary_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
op = UNARY_OP_TO_PANDAS[expr.node_name]
return op(*children_vals)
self._add_node(expr, handle)
def visit_math(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
if isinstance(expr, math.Log) and expr._base is not None:
base = expr._base.value
return np.log(children_vals[0]) / np.log(base)
elif isinstance(expr, math.Trunc):
decimals = expr._decimals.value
order = 10 ** decimals
return np.trunc(children_vals[0] * order) / order
else:
op = getattr(np, expr.node_name.lower())
return op(*children_vals)
self._add_node(expr, handle)
def visit_string_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
input = children_vals[0]
if isinstance(expr.input, Scalar):
input = pd.Series([input])
assert len(expr._args) == len(expr.args)
kv = dict((name.lstrip('_'), self._get(arg, kw))
for name, arg in zip(expr._args[1:], expr.args[1:]))
op = expr.node_name.lower()
if op == 'get':
res = getattr(getattr(input, 'str'), op)(children_vals[1])
elif op == 'strptime':
res = input.map(lambda x: datetime.strptime(x, children_vals[1]))
elif op == 'extract':
def extract(x, pat, flags, group):
regex = re.compile(pat, flags=flags)
m = regex.match(x)
if m:
return m.group(group)
df = self._merge_values([expr.input, expr._pat, expr._flags, expr._group], kw)
return pd.Series([extract(*r[1]) for r in df.iterrows()])
elif op == 'split':
return input.apply(lambda v: v.split(kv['pat'], kv['n']) if v is not None else None)
elif op == 'stringtodict':
def _parse_dict(x):
return dict(it.split(kv['kv_delim'], 1) for it in x.split(kv['item_delim']))
return input.apply(lambda v: _parse_dict(v) if v is not None else None)
else:
if op == 'slice':
kv['stop'] = kv.pop('end', None)
elif op == 'replace':
assert 'regex' in kv
if kv['regex']:
kv.pop('regex')
else:
kv['pat'] = re.escape(kv['pat'])
kv.pop('regex')
res = getattr(getattr(input, 'str'), op)(**kv)
if isinstance(expr.input, Scalar):
return res[0]
else:
return res
self._add_node(expr, handle)
def visit_datetime_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
input = children_vals[0]
if isinstance(expr.input, Scalar):
input = pd.Series([input])
assert len(children_vals) == len(expr.args)
kv = dict(zip([arg.lstrip('_') for arg in expr._args[1:]],
children_vals[1:]))
op = expr.node_name.lower()
res = getattr(getattr(input, 'dt'), op)
if not isinstance(res, pd.Series):
res = res(**kv)
if isinstance(expr.input, Scalar):
return res[0]
else:
return res
self._add_node(expr, handle)
def visit_groupby(self, expr):
def handle(kw):
fields_exprs = expr._fields or expr._by + expr._aggregations
fields = [[kw.get(field), ] if isinstance(field, Scalar) else kw.get(field)
for field in fields_exprs]
length = max(len(it) for it in fields)
for i in range(len(fields)):
bys = self._get_compiled_bys(kw, expr._by, length)
if isinstance(fields_exprs[i], SequenceExpr):
is_reduction = False
for n in itertools.chain(*(fields_exprs[i].all_path(expr.input))):
if isinstance(n, GroupedSequenceReduction):
is_reduction = True
break
if not is_reduction:
fields[i] = fields[i].groupby(bys).first()
elif len(fields[i]) == 1:
fields[i] = pd.Series(fields[i] * length,
name=fields_exprs[i].name).groupby(bys).first()
df = pd.concat(fields, axis=1)
if expr._having is not None:
having = kw.get(expr._having)
if all(not isinstance(e, GroupedSequenceReduction)
for e in itertools.chain(*expr._having.all_path(expr.input))):
# the having comes from the by fields, we need to do Series.groupby explicitly.
bys = self._get_compiled_bys(kw, expr._by, len(having))
having = having.groupby(bys).first()
df = df[having]
return pd.DataFrame(
df.values, columns=[f.name for f in fields_exprs])[expr.schema.names]
self._add_node(expr, handle)
def visit_mutate(self, expr):
def handle(kw):
bys = self._get_compiled_bys(kw, expr._by, len(kw.get(expr.input)))
bys = pd.concat(bys)
bys.sort_values(inplace=True)
wins = [kw.get(f) for f in expr._window_fields]
return pd.DataFrame(pd.concat([bys] + wins, axis=1).values,
columns=expr.schema.names)
self._add_node(expr, handle)
def visit_value_counts(self, expr):
def handle(kw):
by = kw.get(expr._by)
sort = kw.get(expr._sort)
ascending = kw.get(expr._ascending)
dropna = kw.get(expr._dropna)
df = by.value_counts(sort=sort, ascending=ascending, dropna=dropna).to_frame()
df.reset_index(inplace=True)
return pd.DataFrame(df.values, columns=expr.schema.names)
self._add_node(expr, handle)
def visit_sort(self, expr):
def handle(kw):
input = kw.get(expr.input)
names = expr.schema.names
sorted_columns = OrderedDict()
for field in expr._sorted_fields:
name = str(uuid.uuid4())
sorted_columns[name] = kw.get(field)
input = input.assign(**sorted_columns)
return input.sort_values(list(six.iterkeys(sorted_columns)),
ascending=expr._ascending)[names]
self._add_node(expr, handle)
def visit_sort_column(self, expr):
def handle(kw):
input = kw.get(expr.input)
if isinstance(expr.input, CollectionExpr):
return input[expr._source_name]
else:
return input
self._add_node(expr, handle)
def visit_distinct(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
fields = children_vals[1:]
ret = pd.concat(fields, axis=1, keys=expr.schema.names).drop_duplicates()
ret.reset_index(drop=True, inplace=True)
return ret
self._add_node(expr, handle)
def _get(self, item, kw):
if item is None:
return
if isinstance(item, (list, tuple, set)):
return type(item)(kw.get(it) for it in item)
return kw.get(item)
def visit_sample(self, expr):
def handle(kw):
input = self._get(expr.input, kw)
parts = self._get(expr._parts, kw)
i = self._get(expr._i, kw)
n = self._get(expr._n, kw)
frac = self._get(expr._frac, kw)
replace = self._get(expr._replace, kw)
weights = self._get(expr._weights, kw)
strata = self._get(expr._strata, kw)
random_state = self._get(expr._random_state, kw)
if expr._sampled_fields:
collection = pd.DataFrame(
pd.concat([kw.get(e) for e in expr._sampled_fields], axis=1).values,
columns=[str(uuid.uuid4()) for _ in expr._sampled_fields])
else:
collection = input
if parts is not None and frac is None:
frac = 1 / float(parts)
if i is not None and (len(i) != 1 or i[0] > 0):
raise NotImplementedError
if not strata:
sampled = collection.sample(n=n, frac=frac, replace=replace, weights=weights,
random_state=random_state)
else:
frames = []
frac = json.loads(frac) if expr._frac else dict()
n = json.loads(n) if expr._n else dict()
for val in itertools.chain(six.iterkeys(frac), six.iterkeys(n)):
v_frac = frac.get(val)
v_n = n.get(val)
filtered = collection[collection[strata].astype(str) == val]
sampled = filtered.sample(n=v_n, frac=v_frac, replace=replace, random_state=random_state)
frames.append(sampled)
if frames:
sampled = pd.concat(frames)
else:
sampled = pd.DataFrame(columns=collection.columns)
if expr._sampled_fields:
return pd.concat([input, sampled], axis=1, join='inner')[
[n for n in input.columns.tolist()]]
return sampled
self._add_node(expr, handle)
def _get_names(self, x, force_list=False):
if x is None:
return x
res = [it.name for it in x]
if not force_list and len(res) == 1:
return res[0]
return res
def _get_pivot_handler(self, expr):
def handle(kw):
df = self._merge_values(expr._group + expr._columns + expr._values, kw)
pivoted = df.pivot(index=self._get_names(expr._group),
columns=self._get_names(expr._columns))
columns = pivoted.columns.levels
pivoted.reset_index(inplace=True)
names = self._get_names(expr._group, True)
tps = [g.dtype for g in expr._group]
if len(columns[0]) == 1:
tp = expr._values[0].dtype
for name in columns[1]:
names.append(name)
tps.append(tp)
else:
for value_name, value_col in zip(columns[0], expr._values):
for name in columns[1]:
names.append('{0}_{1}'.format(name, value_name))
tps.append(value_col.dtype)
expr._schema = Schema.from_lists(names, tps)
res = pd.DataFrame(pivoted.values, columns=names)
to_sub = CollectionExpr(_source_data=res, _schema=expr._schema)
self._expr_dag.substitute(expr, to_sub)
# trigger refresh of dynamic operations
def func(expr):
for c in traverse_until_source(expr, unique=True):
if c not in self._expr_to_dag_node:
c.accept(self)
refresh_dynamic(to_sub, self._expr_dag, func=func)
return to_sub, res
return handle
def _get_pivot_table_handler(self, expr):
from ...expr.query import ExprVisitor
class WrappedNumpyFunction(object):
def __init__(self, fun):
self._fun = fun
def __call__(self, *args, **kwargs):
return self._fun(*args, **kwargs)
class AggFuncVisitor(ExprVisitor):
def __init__(self, np_object, env):
super(AggFuncVisitor, self).__init__(env)
self.np_object = np_object
def get_named_object(self, obj_name):
if obj_name == 'count':
return WrappedNumpyFunction(np.size)
elif obj_name == 'nunique':
return WrappedNumpyFunction(lambda x: np.size(np.unique(x)))
elif obj_name == 'quantile':
return WrappedNumpyFunction(lambda x, prob: np.percentile(x, prob * 100))
else:
return WrappedNumpyFunction(getattr(np, obj_name))
def visit_Call(self, node):
func = self.visit(node.func)
args = [self.visit(n) for n in node.args]
if isinstance(func, WrappedNumpyFunction):
args = [self.np_object] + args
kwargs = OrderedDict([(kw.arg, self.visit(kw.value)) for kw in node.keywords])
return func(*args, **kwargs)
def get_real_aggfunc(aggfunc):
if isinstance(aggfunc, six.string_types):
if aggfunc == 'count':
return getattr(np, 'size')
if aggfunc == 'nunique':
return lambda x: np.size(np.unique(x))
if hasattr(np, aggfunc):
return getattr(np, aggfunc)
def agg_eval(x):
visitor = AggFuncVisitor(x, {})
return visitor.eval(aggfunc, rewrite=False)
return agg_eval
if inspect.isclass(aggfunc):
aggfunc = aggfunc()
def func(x):
buffer = aggfunc.buffer()
for it in x:
aggfunc(buffer, it)
return aggfunc.getvalue(buffer)
return func
return aggfunc
def handle(kw):
columns = expr._columns if expr._columns else []
df = self._merge_values(expr._group + columns + expr._values, kw)
pivoted = df.pivot_table(index=self._get_names(expr._group),
columns=self._get_names(expr._columns),
values=self._get_names(expr._values),
aggfunc=[get_real_aggfunc(f) for f in expr._agg_func],
fill_value=expr.fill_value)
levels = pivoted.columns.levels if isinstance(pivoted.columns, pd.MultiIndex) \
else [pivoted.columns]
pivoted.reset_index(inplace=True)
names = self._get_names(expr._group, True)
tps = [g.dtype for g in expr._group]
columns_values = levels[-1] if expr._columns else [None, ]
for agg_func_name in expr._agg_func_names:
for value_col in expr._values:
for col in columns_values:
base = '{0}_'.format(col) if col is not None else ''
name = '{0}{1}_{2}'.format(base, value_col.name, agg_func_name)
names.append(name)
tps.append(value_col.dtype)
if expr._columns:
expr._schema = Schema.from_lists(names, tps)
res = pd.DataFrame(pivoted.values, columns=names)
to_sub = CollectionExpr(_source_data=res, _schema=expr._schema)
self._expr_dag.substitute(expr, to_sub)
# trigger refresh of dynamic operations
def func(expr):
for c in traverse_until_source(expr, unique=True):
if c not in self._expr_to_dag_node:
c.accept(self)
refresh_dynamic(to_sub, self._expr_dag, func=func)
return to_sub, res
return handle
def visit_pivot(self, expr):
if isinstance(expr, PivotCollectionExpr):
handle = self._get_pivot_handler(expr)
else:
handle = self._get_pivot_table_handler(expr)
self._add_node(expr, handle)
def _get_compiled_bys(self, kw, by_exprs, length):
bys = [[kw.get(by), ] if isinstance(by, Scalar) else kw.get(by)
for by in by_exprs]
if any(isinstance(e, SequenceExpr) for e in by_exprs):
size = max(len(by) for by, e in zip(bys, by_exprs)
if isinstance(e, SequenceExpr))
else:
size = length
return [(by * size if len(by) == 1 else by) for by in bys]
def _compile_grouped_reduction(self, kw, expr):
if isinstance(expr, GroupedCount) and isinstance(expr._input, CollectionExpr):
df = kw.get(expr.input)
bys = [[kw.get(by), ] if isinstance(by, Scalar) else kw.get(by)
for by in expr._by]
if any(isinstance(e, SequenceExpr) for e in expr._by):
size = max(len(by) for by, e in zip(bys, expr._by)
if isinstance(e, SequenceExpr))
else:
size = len(df)
bys = [(by * size if len(by) == 1 else by) for by in bys]
return df.groupby(bys).size()
if isinstance(expr, GroupedNUnique):
input_df = pd.concat([kw.get(ip) for ip in expr.inputs], axis=1)
bys = self._get_compiled_bys(kw, expr._by, len(input_df))
return input_df.groupby(bys).apply(lambda x: pd.Series([len(x.drop_duplicates())]))[0]
series = kw.get(expr.input) if isinstance(expr.input, SequenceExpr) \
else pd.Series([kw.get(expr.input)], name=expr.input.name)
bys = self._get_compiled_bys(kw, expr._by, len(series))
if isinstance(expr.input, Scalar):
series = pd.Series(series.repeat(len(bys[0])).values, index=bys[0].index)
if isinstance(expr, GroupedCat):
return series.groupby(bys).apply(lambda x: kw.get(expr._sep).join(x))
if isinstance(expr, GroupedToList):
if expr._unique:
return series.groupby(bys).apply(lambda x: list(set(x)))
else:
return series.groupby(bys).apply(list)
kv = dict()
if hasattr(expr, '_ddof'):
kv['ddof'] = expr._ddof
op = expr.node_name.lower()
op = 'size' if op == 'count' else op
return getattr(series.groupby(bys), op)(**kv)
def visit_reduction(self, expr):
def handle(kw):
if isinstance(expr, GroupedSequenceReduction):
return self._compile_grouped_reduction(kw, expr)
children_vals = self._get_children_vals(kw, expr)
kv = dict()
if hasattr(expr, '_ddof'):
kv['ddof'] = expr._ddof
op = expr.node_name.lower()
op = 'size' if op == 'count' else op
if isinstance(expr, NUnique):
inputs = children_vals[:len(expr.inputs)]
if len(expr.inputs) == 1:
inputs[0] = _filter_none(inputs[0])
return len(pd.concat(inputs, axis=1).drop_duplicates())
input = children_vals[0]
if getattr(expr, '_unique', False):
input = input.unique()
if isinstance(expr, Count):
if isinstance(expr.input, CollectionExpr):
return len(input)
elif isinstance(expr.input, SequenceExpr):
return len(_filter_none(input))
input = _filter_none(input)
if isinstance(expr, (Cat, GroupedCat)):
kv['sep'] = expr._sep.value if isinstance(expr._sep, Scalar) else expr._sep
kv['na_rep'] = expr._na_rep.value \
if isinstance(expr._na_rep, Scalar) else expr._na_rep
return getattr(getattr(input, 'str'), 'cat')(**kv)
elif isinstance(expr, (ToList, GroupedToList)):
return list(input)
elif isinstance(expr, (Quantile, GroupedQuantile)):
if isinstance(expr._prob, (list, set)):
return [np.percentile(input, p * 100) for p in expr._prob]
else:
return np.percentile(input, expr._prob * 100)
return getattr(input, op)(**kv)
self._add_node(expr, handle)
def visit_user_defined_aggregator(self, expr):
def handle(kw):
resources = self._get_resources(expr, kw)
input = self._merge_values(expr._inputs, kw)
func = expr._aggregator
args = expr._func_args
kwargs = expr._func_kwargs or dict()
if resources:
if not args and not kwargs:
agg = func(resources)
else:
kwargs['resources'] = resources
agg = func(*args, **kwargs)
else:
agg = func(*args, **kwargs)
if isinstance(expr, GroupedSequenceReduction):
bys = [[kw.get(by), ] if isinstance(by, Scalar) else kw.get(by)
for by in expr._by]
else:
bys = [[1, ]]
if expr._by and any(isinstance(e, SequenceExpr) for e in expr._by):
size = max(len(by) for by, e in zip(bys, expr._by)
if isinstance(e, SequenceExpr))
else:
size = len(input)
bys = [(by * size if len(by) == 1 else by) for by in bys]
def iterrows(x):
if getattr(expr, '_unique', False):
vset = set()
for it in x.iterrows():
if bytes(it[1].values.data) not in vset:
yield it
vset.add(bytes(it[1].values.data))
else:
for it in x.iterrows():
yield it
def f(x):
buffer = agg.buffer()
for it in iterrows(x):
agg(buffer, *it[1])
ret = agg.getvalue(buffer)
np_type = types.df_type_to_np_type(expr.dtype)
return np.array([ret,], dtype=np_type)[0]
res = input.groupby(bys).apply(f)
if isinstance(expr, Scalar):
return res.iloc[0]
return res
self._add_node(expr, handle)
def visit_column(self, expr):
def handle(kw):
chidren_vals = self._get_children_vals(kw, expr)
# FIXME: consider the name which is unicode
return chidren_vals[0][expr._source_name]
self._add_node(expr, handle)
def _get_resources(self, expr, kw):
if not expr._resources:
return
res = []
collection_idx = 0
for resource in expr._resources:
if isinstance(resource, FileResource):
res.append(resource.open())
elif isinstance(resource, TableResource):
def gen():
table = resource.get_source_table()
named_args = xnamedtuple('NamedArgs', table.schema.names)
partition = resource.get_source_table_partition()
with table.open_reader(partition=partition) as reader:
for r in reader:
yield named_args(*r.values)
res.append(gen())
else:
resource = expr._collection_resources[collection_idx]
collection_idx += 1
df = kw.get(resource)
def gen():
named_args = xnamedtuple('NamedArgs', resource.schema.names)
for r in df.iterrows():
yield named_args(*r[1])
res.append(gen())
return res
def visit_function(self, expr):
def handle(kw):
resources = self._get_resources(expr, kw)
if not expr._multiple:
input = self._get_children_vals(kw, expr)[0]
if isinstance(expr.inputs[0], Scalar):
input = pd.Series([input])
func = expr._func
args = expr._func_args
kwargs = expr._func_kwargs
if args is not None and len(args) > 0:
raise NotImplementedError
if kwargs is not None and len(kwargs) > 0:
raise NotImplementedError
if inspect.isclass(func):
if resources:
func = func(resources)
else:
func = func()
else:
if resources:
func = func(resources)
res = input.map(func)
if isinstance(expr.inputs[0], Scalar):
return res[0]
return res
else:
input = self._merge_values(expr.inputs, kw)
def func(s):
names = [f.name for f in expr.inputs]
t = xnamedtuple('NamedArgs', names)
row = t(*s.tolist())
if not inspect.isfunction(expr._func):
if resources:
f = expr._func(resources)
else:
f = expr._func()
else:
if resources:
f = expr._func(resources)
else:
f = expr._func
res = f(row, *expr._func_args, **expr._func_kwargs)
if not inspect.isgeneratorfunction(f):
return res
return next(res)
return input.apply(func, axis=1, reduce=True,
args=expr._func_args, **expr._func_kwargs)
self._add_node(expr, handle)
def visit_reshuffle(self, expr):
def handle(kw):
if expr._sort_fields is not None:
input = kw.get(expr._input)
names = []
for sort in expr._sort_fields:
name = str(uuid.uuid4())
input[name] = kw.get(sort)
names.append(name)
input = input.sort_values(
names, ascending=[f._ascending for f in expr._sort_fields])
return input[expr.schema.names]
return kw.get(expr._input)
self._add_node(expr, handle)
def _check_output_types(self, pd_df, expect_df_types):
for field, expect_df_type in zip(pd_df.columns, expect_df_types):
arr = pd_df[field].values
try:
df_type = types.np_type_to_df_type(pd_df[field].dtype, arr=arr)
except TypeError:
# all element is None
continue
if not expect_df_type.can_implicit_cast(df_type):
raise TypeError('Field(%s) has wrong type, expect %s, got %s' % (
field, expect_df_type, df_type
))
return pd_df
def visit_apply_collection(self, expr):
def conv(l):
if isinstance(l, tuple):
l = list(l)
elif not isinstance(l, list):
l = [l, ]
return l
def handle(kw):
resources = self._get_resources(expr, kw)
input = self._merge_values(expr.fields, kw)
names = [f.name for f in expr.fields]
t = xnamedtuple('NamedArgs', names)
expr._func_args = expr._func_args or ()
expr._func_kwargs = expr._func_kwargs or {}
func = expr._func
if isinstance(func, six.string_types) and func.upper() in BUILTIN_FUNCS:
func = BUILTIN_FUNCS[func.upper()]
if inspect.isfunction(func):
if resources:
func = func(resources)
is_generator_function = inspect.isgeneratorfunction(func)
close_func = None
is_close_generator_function = False
elif hasattr(func, '__call__'):
if resources:
func = func(resources)
else:
func = func()
is_generator_function = inspect.isgeneratorfunction(func.__call__)
close_func = getattr(func, 'close', None)
is_close_generator_function = inspect.isgeneratorfunction(close_func)
else:
raise NotImplementedError
rows = []
indices = []
idx = 0
for s in input.iterrows():
row = t(*s[1])
res = func(row, *expr._func_args, **expr._func_kwargs)
expand_num = 0
if is_generator_function:
for l in res:
rows.append(conv(l))
expand_num += 1
else:
if res:
rows.append(conv(res))
expand_num += 1
if expand_num == 0 and expr._keep_nulls:
rows.append([None] * len(names))
expand_num += 1
indices.extend([s[0]] * expand_num)
idx = max(idx, s[0] + 1)
if close_func:
expand_num = 0
if is_close_generator_function:
for l in close_func(*expr._func_args, **expr._func_kwargs):
rows.append(conv(l))
expand_num += 1
else:
rows.append(close_func(*expr._func_args, **expr._func_kwargs))
expand_num += 1
indices.extend([idx] * expand_num)
if expr._lateral_view:
out_df = pd.DataFrame(rows, columns=expr.schema.names,
index=pd.Int64Index(indices))
else:
out_df = pd.DataFrame(rows, columns=expr.schema.names)
return self._check_output_types(out_df, expr.schema.types)
self._add_node(expr, handle)
def visit_lateral_view(self, expr):
def handle(kw):
lv_sources = dict()
for lv in expr.lateral_views:
for col_name in lv.schema.names:
lv_sources[col_name] = lv
children = expr.children()
fields = self._get_children_vals(kw, children=children)[1:len(expr._fields) + 1]
names = expr.schema.names
idx = reduce(operator.and_, (set(f.index.tolist()) for f, e in zip(fields, expr._fields)
if isinstance(e, SequenceExpr)))
idx = pd.Int64Index(sorted(idx))
result = pd.DataFrame(index=idx)
lv_visited = set()
for i in range(len(fields)):
f = fields[i]
if names[i] in lv_sources:
lv_src = lv_sources[names[i]]
if lv_src in lv_visited:
continue
lv_visited.add(lv_src)
f = kw[lv_src]
elif not isinstance(f, pd.Series):
f = pd.Series([f] * len(idx), index=idx, name=names[i])
result = result.join(f)
return result
self._add_node(expr, handle)
def visit_composite_op(self, expr):
def handle(kw):
def _zip_args(fields):
zip_args = []
seq_index = None
for it in fields:
if isinstance(it, SequenceExpr):
zip_args.append(kw[it])
seq_index = kw[it].index
else:
zip_args.append(itertools.repeat(kw[it]))
return seq_index, zip_args
children_vals = self._get_children_vals(kw, expr)
_input = children_vals[0]
if isinstance(expr, composites.ListDictLength):
return _input.apply(lambda v: len(v) if v is not None else None)
elif isinstance(expr, composites.ListDictGetItem):
def _get_list_item(l, x):
try:
return l[x] if l is not None else None
except IndexError:
return None
_value = children_vals[1]
if isinstance(expr.input.dtype, df_types.List):
item_fun = _get_list_item
else:
item_fun = lambda s, k: s.get(k) if s is not None else None
if isinstance(expr, Scalar):
return item_fun(_input, _value)
else:
if isinstance(expr.input, Scalar):
return _value.apply(lambda v: item_fun(_input, v))
if isinstance(expr._key, Scalar):
return _input.apply(lambda v: item_fun(v, _value))
seq_values = [item_fun(k, v) for k, v in compat.izip(_input, _value)]
return pd.Series(seq_values, index=_input.index, name=expr.name)
elif isinstance(expr, composites.ListContains):
_value = children_vals[1]
contains_fun = lambda s, k: k in s if s is not None else None
if isinstance(expr, Scalar):
return contains_fun(_input, _value)
else:
if isinstance(expr.input, Scalar):
return _value.apply(lambda v: contains_fun(_input, v))
if isinstance(expr._value, Scalar):
return _input.apply(lambda v: contains_fun(v, _value))
seq_values = [contains_fun(k, v) for k, v in compat.izip(_input, _value)]
return pd.Series(seq_values, index=_input.index, name=expr.name)
elif isinstance(expr, composites.ListSort):
return _input.apply(lambda l: sorted(l) if l is not None else None)
elif isinstance(expr, composites.DictKeys):
return _input.apply(lambda d: list(six.iterkeys(d)) if d is not None else None)
elif isinstance(expr, composites.DictValues):
return _input.apply(lambda d: list(six.itervalues(d)) if d is not None else None)
elif isinstance(expr, composites.ListBuilder):
if isinstance(expr, Scalar):
return [kw[v] for v in expr._values]
else:
seq_index, zip_args = _zip_args(expr._values)
seq_values = []
for r in compat.izip(*zip_args):
seq_values.append(list(r))
return pd.Series(seq_values, index=seq_index, name=expr.name)
elif isinstance(expr, composites.DictBuilder):
if isinstance(expr, Scalar):
return OrderedDict((kw[k], kw[v]) for k, v in compat.izip(expr._keys, expr._values))
else:
seq_index, zip_args = _zip_args(expr._keys + expr._values)
seq_values = []
dict_len = len(expr._values)
for r in zip(*zip_args):
seq_values.append(OrderedDict((k, v) for k, v in compat.izip(r[:dict_len], r[dict_len:])))
return pd.Series(seq_values, index=seq_index, name=expr.name)
else:
raise NotImplementedError
self._add_node(expr, handle)
def visit_sequence(self, expr):
raise NotImplementedError
def visit_cum_window(self, expr):
if expr.preceding is not None or expr.following is not None:
raise NotImplementedError
def handle(kw):
input = kw.get(expr.input)
bys = self._get_compiled_bys(kw, expr.partition_by, len(input))
grouped = input.groupby(bys)
if expr.order_by:
sort = [kw.get(e) for e in expr.order_by]
ascendings = [e._ascending for e in expr.order_by]
for s in sort:
sort_name = str(uuid.uuid4())
s.name = sort_name
else:
sort = None
ascendings = None
def f(x):
if sort:
df = pd.concat([x] + sort, join='inner', axis=1)
df.sort_values([s.name for s in sort], ascending=ascendings, inplace=True)
series = df[x.name]
if expr.node_name in SORT_CUM_WINDOW_OP_TO_PANDAS:
return SORT_CUM_WINDOW_OP_TO_PANDAS[expr.node_name](series)
elif expr.node_name == 'NthValue':
values = [None] * len(series)
if expr._skip_nulls:
new_series = _filter_none(series)
else:
new_series = series
if expr._nth < len(new_series):
values[expr._nth:] = [new_series.iloc[expr._nth]] * (len(series) - expr._nth)
return pd.Series(values, index=series.index)
else:
raise NotImplementedError
else:
if expr.distinct:
new_x = x.drop_duplicates()
else:
new_x = x
if expr.node_name in CUM_WINDOW_OP_TO_PANDAS:
val = CUM_WINDOW_OP_TO_PANDAS[expr.node_name](new_x)
elif expr.node_name == 'NthValue':
if expr._skip_nulls:
new_series = _filter_none(x)
else:
new_series = x
if expr._nth < len(new_series):
val = new_series.iloc[expr._nth]
else:
val = None
else:
raise NotImplementedError
return pd.Series([val] * len(x), index=x.index)
res = grouped.apply(f)
if sort:
for _ in bys:
res = res.reset_index(level=0, drop=True)
return res
self._add_node(expr, handle)
def visit_rank_window(self, expr):
def handle(kw):
input = kw.get(expr.input)
sort = [kw.get(e) * (1 if e._ascending else -1)
for e in expr.order_by]
bys = self._get_compiled_bys(kw, expr.partition_by, len(input))
sort_names = [str(uuid.uuid4()) for _ in sort]
by_names = [str(uuid.uuid4()) for _ in bys]
input_names = [input.name] if isinstance(input, pd.Series) else input.columns.tolist()
df = pd.DataFrame(pd.concat([input] + sort + [pd.Series(b) for b in bys], axis=1).values,
columns=input_names + sort_names + by_names,
index=input.index)
df.sort_values(sort_names, inplace=True)
grouped = df.groupby(by_names)
try:
pd_fast_zip = pd._libs.lib.fast_zip
except AttributeError:
pd_fast_zip = pd.lib.fast_zip
def f(x):
s_df = pd.Series(pd_fast_zip([x[s].values for s in sort_names]), index=x.index)
if expr.node_name == 'Rank':
return s_df.rank(method='min')
elif expr.node_name == 'DenseRank':
return s_df.rank(method='dense')
elif expr.node_name == 'RowNumber':
return pd.Series(compat.lrange(1, len(s_df) + 1), index=s_df.index)
elif expr.node_name == 'PercentRank':
if len(s_df) == 1:
return pd.Series([0.0, ], index=s_df.index)
return (s_df.rank(method='min') - 1) / (len(s_df) - 1)
elif expr.node_name == 'CumeDist':
return pd.Series([v * 1.0 / len(s_df) for v in compat.irange(1, len(s_df) + 1)],
index=s_df.index)
elif expr.node_name == 'QCut':
if len(s_df) <= 1:
return pd.Series([0] * len(s_df), index=s_df.index, dtype=np.int64)
return pd.Series(pd.qcut(compat.irange(1, len(s_df) + 1), expr._bins, labels=False),
index=s_df.index, dtype=np.int64)
else:
raise NotImplementedError
res = grouped.apply(f)
if isinstance(res, pd.DataFrame):
res = res.iloc[0]
else:
for _ in bys:
res = res.reset_index(level=0, drop=True)
return res
self._add_node(expr, handle)
def visit_shift_window(self, expr):
def handle(kw):
input = kw.get(expr.input)
bys = self._get_compiled_bys(kw, expr.partition_by, len(input))
grouped = input.groupby(bys)
if expr.order_by:
sort = [kw.get(e) for e in expr.order_by]
ascendings = [e._ascending for e in expr.order_by]
for s in sort:
sort_name = str(uuid.uuid4())
s.name = sort_name
else:
sort = None
ascendings = None
if expr.node_name == 'Lag':
shift = kw.get(expr.offset)
else:
assert expr.node_name == 'Lead'
shift = -kw.get(expr.offset)
default = kw.get(expr.default)
def f(x):
if sort:
df = pd.concat([x] + sort, join='inner', axis=1)
df.sort_values([s.name for s in sort], ascending=ascendings, inplace=True)
series = df[x.name]
else:
series = x
res = series.shift(shift)
if default is not None:
return res.fillna(default)
return res
res = grouped.apply(f)
if sort:
for _ in bys:
res = res.reset_index(level=0, drop=True)
return res
self._add_node(expr, handle)
def visit_scalar(self, expr):
def handle(_):
if isinstance(expr, DTScalar):
arg_name = type(expr).__name__.lower()[:-6] + 's'
value = expr.value
if arg_name == 'milliseconds':
arg_name = 'microseconds'
value *= 1000
return pd.DateOffset(**{arg_name: value})
if expr.value is not None:
return expr.value
return None
self._add_node(expr, handle)
def visit_cast(self, expr):
def handle(kw):
dtype = types.df_type_to_np_type(expr.dtype)
input = self._get_children_vals(kw, expr)[0]
if isinstance(expr._input, Scalar):
return pd.Series([input]).astype(dtype)[0]
return input.astype(dtype)
self._add_node(expr, handle)
@classmethod
def _find_all_equalizations(cls, predicate, lhs, rhs):
return [eq for eq in traverse_until_source(predicate, top_down=True, unique=True)
if isinstance(eq, arithmetic.Equal) and
eq.is_ancestor(lhs) and eq.is_ancestor(rhs)]
def visit_join(self, expr):
def handle(kw):
left = kw.get(expr._lhs)
right = kw.get(expr._rhs)
eqs = expr._predicate
left_ons = []
right_ons = []
on_same_names = set()
for eq in eqs:
if isinstance(eq._lhs, Column) and isinstance(eq._rhs, Column) and \
eq._lhs.source_name == eq._rhs.source_name:
left_ons.append(eq._lhs.source_name)
right_ons.append(eq._rhs.source_name)
on_same_names.add(eq._lhs.source_name)
continue
left_name = str(uuid.uuid4())
left[left_name] = kw.get(eq._lhs)
left_ons.append(left_name)
right_name = str(uuid.uuid4())
right[right_name] = kw.get(eq._rhs)
right_ons.append(right_name)
for idx, collection in enumerate([left, right]):
collection_expr = (expr._lhs, expr._rhs)[idx]
for field_name in collection_expr.schema.names:
if field_name in expr._renamed_columns and field_name in on_same_names:
new_name = expr._renamed_columns[field_name][idx]
collection[new_name] = collection[field_name]
merged = left.merge(right, how=JOIN_DICT[expr._how], left_on=left_ons,
right_on=right_ons,
suffixes=(expr._left_suffix, expr._right_suffix))
cols = []
for name in expr.schema.names:
if name in merged:
cols.append(merged[name])
else:
cols.append(merged[expr._column_origins[name][1]])
return pd.concat(cols, axis=1, keys=expr.schema.names)
# Just add node, shouldn't add edge here
node = (expr, handle)
self._dag.add_node(node)
self._expr_to_dag_node[expr] = node
def visit_extract_kv(self, expr):
def handle(kw):
from ... import types
_input = kw.get(expr._input)
columns = [getattr(_input, c.name) for c in expr._columns]
kv_delim = kw.get(expr._kv_delimiter)
item_delim = kw.get(expr._item_delimiter)
default = kw.get(expr._default)
kv_slot_map = dict()
app_col_names = []
def validate_kv(v):
parts = v.split(kv_delim)
if len(parts) != 2:
raise ValueError('Malformed KV pair: %s' % v)
return parts[0]
for col in columns:
kv_slot_map[col.name] = dict()
keys = col.apply(lambda s: [validate_kv(kv) for kv in s.split(item_delim)])
for k in sorted(compat.reduce(lambda a, b: set(a) | set(b), keys, set())):
app_col_names.append('%s_%s' % (col.name, k))
kv_slot_map[col.name][k] = len(app_col_names) - 1
type_adapter = None
if isinstance(expr._column_type, types.Float):
type_adapter = float
elif isinstance(expr._column_type, types.Integer):
type_adapter = int
append_grid = [[default] * len(app_col_names) for _ in compat.irange(len(_input))]
for col in columns:
series = getattr(_input, col.name)
for idx, v in enumerate(series):
for kv_item in v.split(item_delim):
k, v = kv_item.split(kv_delim)
if type_adapter:
v = type_adapter(v)
append_grid[idx][kv_slot_map[col.name][k]] = v
intact_names = [c.name for c in expr._intact]
intact_types = [c.dtype for c in expr._intact]
intact_df = _input[intact_names]
append_df = pd.DataFrame(append_grid, columns=app_col_names)
expr._schema = Schema.from_lists(
intact_names + app_col_names,
intact_types + [expr._column_type] * len(app_col_names),
)
res = pd.concat([intact_df, append_df], axis=1)
to_sub = CollectionExpr(_source_data=res, _schema=expr._schema)
self._expr_dag.substitute(expr, to_sub)
# trigger refresh of dynamic operations
def func(expr):
for c in traverse_until_source(expr, unique=True):
if c not in self._expr_to_dag_node:
c.accept(self)
refresh_dynamic(to_sub, self._expr_dag, func=func)
return to_sub, res
self._add_node(expr, handle)
def visit_union(self, expr):
if expr._distinct:
raise CompileError("Distinct union is not supported here.")
def handle(kw):
left = kw.get(expr._lhs)
right = kw.get(expr._rhs)
merged = pd.concat([left, right])
return merged[expr.schema.names]
self._add_node(expr, handle)
def visit_concat(self, expr):
def handle(kw):
left = kw.get(expr._lhs)
right = kw.get(expr._rhs)
merged = pd.concat([left, right], axis=1)
return merged[expr.schema.names]
self._add_node(expr, handle)
def visit_append_id(self, expr):
def handle(kw):
_input = kw.get(expr._input)
id_col = kw.get(expr._id_col)
id_seq = pd.DataFrame(compat.lrange(len(_input)), columns=[id_col])
return pd.concat([id_seq, _input], axis=1)
self._add_node(expr, handle)
def visit_split(self, expr):
def handle(kw):
_input = kw.get(expr._input)
frac = kw.get(expr._frac)
seed = kw.get(expr._seed) if expr._seed else None
split_id = kw.get(expr._split_id)
if seed is not None:
np.random.seed(seed)
cols = list(_input.columns)
factor_col = 'rand_factor_%d' % int(time.time())
factor_df = pd.DataFrame(np.random.rand(len(_input)), columns=[factor_col])
concated_df = pd.concat([factor_df, _input], axis=1)
if split_id == 0:
return concated_df[concated_df[factor_col] <= frac][cols]
else:
return concated_df[concated_df[factor_col] > frac][cols]
self._add_node(expr, handle)
| 38.788603
| 114
| 0.525299
|
66363b3ee0572170ff1c87c7c325ba8d1796d9e1
| 1,346
|
py
|
Python
|
c1_2_processes/s22_ex1_26.py
|
zhou-zhenyi/sicp
|
ee6475d79a486a3bdc458378c55d0721195ea7d6
|
[
"MIT"
] | null | null | null |
c1_2_processes/s22_ex1_26.py
|
zhou-zhenyi/sicp
|
ee6475d79a486a3bdc458378c55d0721195ea7d6
|
[
"MIT"
] | null | null | null |
c1_2_processes/s22_ex1_26.py
|
zhou-zhenyi/sicp
|
ee6475d79a486a3bdc458378c55d0721195ea7d6
|
[
"MIT"
] | null | null | null |
import sys
sys.setrecursionlimit(10000000)
from time import time_ns
from random import randint
from util import square
from util import even
def timed_prime_test(n):
return start_prime_test(n, time_ns())
def start_prime_test(n, start_time):
is_prime = fast_prime(n, 1)
if is_prime:
report_prime(n, time_ns() - start_time)
return is_prime
def report_prime(n, elapsed_time):
print(str(n) + " *** " + str(elapsed_time))
def fast_prime(n, times):
def fermat_test(n):
def try_it(a):
def expmod(base, exp, m):
if exp == 0:
return 1
elif even(exp):
return (expmod(base, exp / 2, m) * expmod(base, exp / 2, m)) % m
else:
return base * expmod(base, exp - 1, m) % m
return expmod(a, n, n) == a
return try_it(randint(1, n - 1))
if times == 0:
return True
elif fermat_test(n):
return fast_prime(n, times - 1)
else:
return False
def search_for_prime(n, i):
if i == 0:
return
elif even(n):
n += 1
else:
n += 2
if timed_prime_test(n):
i -= 1
search_for_prime(n, i)
search_for_prime(1000, 3)
search_for_prime(10000, 3)
search_for_prime(100000, 3)
search_for_prime(1000000, 3)
| 21.709677
| 84
| 0.567608
|
58480a64dd5f4715d41bf918c0a7e8c6254ad404
| 1,044
|
py
|
Python
|
clicommandhandler.py
|
farooq-teqniqly/command-line-experiements
|
b4997b23f51a073c6a3af28888132572e3387fa8
|
[
"MIT"
] | null | null | null |
clicommandhandler.py
|
farooq-teqniqly/command-line-experiements
|
b4997b23f51a073c6a3af28888132572e3387fa8
|
[
"MIT"
] | null | null | null |
clicommandhandler.py
|
farooq-teqniqly/command-line-experiements
|
b4997b23f51a073c6a3af28888132572e3387fa8
|
[
"MIT"
] | null | null | null |
import abc
import json
from typing import Any
import click
class CliCommandHandler(click.Command, abc.ABC):
def __init__(self, name):
super(CliCommandHandler, self).__init__(
name=name, params=[click.Option(["-o", "--output"])]
)
@classmethod
def _on_output(cls, ctx, output):
if ctx.params["output"]:
output_type = str(ctx.params["output"])
if output_type.lower() == "json":
click.echo(json.dumps(output, indent=4))
elif output_type.lower() == "tsv":
click.echo("\t".join(str(v) for v in output.values()))
else:
raise click.UsageError(
"Valid values for the output parameter are 'json' and 'tsv'."
)
else:
click.echo("\t".join(str(v) for v in output.values()))
@abc.abstractmethod
def on_invoke(self, ctx) -> Any:
pass
def invoke(self, ctx):
output = self.on_invoke(ctx)
self._on_output(ctx, output)
| 28.216216
| 81
| 0.556513
|
fc437b6022209ef1f155aec489831b1a7a2bf84b
| 18,402
|
py
|
Python
|
tests/test_schedule_compute.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 7
|
2019-08-20T02:43:44.000Z
|
2019-12-13T14:26:05.000Z
|
tests/test_schedule_compute.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_schedule_compute.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 2
|
2019-07-18T14:13:35.000Z
|
2020-01-04T01:45:34.000Z
|
import heterocl as hcl
import numpy as np
def test_pipeline():
hcl.init()
initiation_interval = 4
a = hcl.placeholder((10, 20))
b = hcl.placeholder((10, 20))
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j])
s = hcl.create_schedule([a, b, c])
s[c].pipeline(c.axis[0], initiation_interval)
ir = hcl.lower(s)
pipeline_hint_str = "\"initiation_interval\"="+str(initiation_interval)
assert pipeline_hint_str in str(ir)
def test_pipeline_num_axis():
hcl.init()
initiation_interval = 4
a = hcl.placeholder((10, 20))
b = hcl.placeholder((10, 20))
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j])
s = hcl.create_schedule([a, b, c])
s[c].pipeline(0, initiation_interval)
ir = hcl.lower(s)
pipeline_hint_str = "\"initiation_interval\"="+str(initiation_interval)
assert pipeline_hint_str in str(ir)
def test_unroll():
hcl.init()
factor = 4
a = hcl.placeholder((10, 20))
b = hcl.placeholder((10, 20))
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j])
s = hcl.create_schedule([a, b, c])
s[c].unroll(c.axis[0], factor=factor)
ir = hcl.lower(s)
unroll_hint_str = "\"factor\"="+str(factor)
assert unroll_hint_str in str(ir)
def test_unroll_num_axis():
hcl.init()
factor = 4
a = hcl.placeholder((10, 20))
b = hcl.placeholder((10, 20))
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j])
s = hcl.create_schedule([a, b, c])
s[c].unroll(0, factor=factor)
ir = hcl.lower(s)
unroll_hint_str = "\"factor\"="+str(factor)
assert unroll_hint_str in str(ir)
def test_fuse():
hcl.init()
a = hcl.placeholder((10, 20, 30, 40))
b = hcl.placeholder((10, 20, 30, 40))
c = hcl.compute(a.shape, lambda i, j, k, l: a[i, j, k, l] + b[i, j, k, l])
s = hcl.create_schedule([a, b, c])
s[c].fuse(c.axis[1], c.axis[2])
ir = hcl.lower(s)
assert "j.k.fused" in str(ir)
def test_fuse_num_axis():
hcl.init()
a = hcl.placeholder((10, 20, 30, 40))
b = hcl.placeholder((10, 20, 30, 40))
c = hcl.compute(a.shape, lambda i, j, k, l: a[i, j, k, l] + b[i, j, k, l])
s = hcl.create_schedule([a, b, c])
s[c].fuse(1, 2)
ir = hcl.lower(s)
assert "j.k.fused" in str(ir)
def test_reorder():
hcl.init()
a = hcl.placeholder((10, 20, 30, 40), name="a")
b = hcl.placeholder((10, 20, 30, 40), name="b")
c = hcl.compute(a.shape, lambda i, j, k, l: a[i, j, k, l] + b[i, j, k, l], name="c")
# axes are consecutive
def test_case_1():
s = hcl.create_schedule([a, b, c])
s[c].reorder(c.axis[2], c.axis[1])
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (i, 0, 10)")
assert str(ir.body.body.body).startswith("for (k, 0, 30)")
assert str(ir.body.body.body.body).startswith("for (j, 0, 20)")
assert str(ir.body.body.body.body.body).startswith("for (l, 0, 40)")
# axes are not consecutive
def test_case_2():
s = hcl.create_schedule([a, b, c])
s[c].reorder(c.axis[3], c.axis[0])
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (l, 0, 40)")
assert str(ir.body.body.body).startswith("for (j, 0, 20)")
assert str(ir.body.body.body.body).startswith("for (k, 0, 30)")
assert str(ir.body.body.body.body.body).startswith("for (i, 0, 10)")
test_case_1()
test_case_2()
def test_reorder_num_axis():
hcl.init()
a = hcl.placeholder((10, 20, 30, 40), name="a")
b = hcl.placeholder((10, 20, 30, 40), name="b")
c = hcl.compute(a.shape, lambda i, j, k, l: a[i, j, k, l] + b[i, j, k, l], name="c")
s = hcl.create_schedule([a, b, c])
s[c].reorder(2, 1)
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (i, 0, 10)")
assert str(ir.body.body.body).startswith("for (k, 0, 30)")
assert str(ir.body.body.body.body).startswith("for (j, 0, 20)")
assert str(ir.body.body.body.body.body).startswith("for (l, 0, 40)")
def test_split():
hcl.init()
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j], name="c")
# without if condition
def test_transform_mode_1():
s = hcl.create_schedule([a, b, c])
s[c].split(c.axis[1], factor=4, mode="transform")
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (i, 0, 10)")
assert str(ir.body.body.body).startswith("for (j.outer, 0, 5)")
assert str(ir.body.body.body.body).startswith("for (j.inner, 0, 4)")
assert str(ir.body.body.body.body.body).startswith("c[")
# with if condition
def test_transform_mode_2():
s = hcl.create_schedule([a, b, c])
s[c].split(c.axis[1], factor=3, mode="transform")
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (i, 0, 10)")
assert str(ir.body.body.body).startswith("for (j.outer, 0, 7)")
assert str(ir.body.body.body.body).startswith("for (j.inner, 0, 3)")
assert str(ir.body.body.body.body.body).startswith(
"if ((j.inner < (20 - (j.outer*3))))")
def test_annotate_mode():
split_factor = 3
s = hcl.create_schedule([a, b, c])
s[c].split(c.axis[1], factor=split_factor, mode="annotate")
split_hint_str = "\"split_factor\"="+str(split_factor)
ir = hcl.lower(s)
assert split_hint_str in str(ir)
test_transform_mode_1()
test_transform_mode_2()
test_annotate_mode()
def test_split_num_axis():
hcl.init()
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j], name="c")
s = hcl.create_schedule([a, b, c])
s[c].split(1, factor=4, mode="transform")
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (i, 0, 10)")
assert str(ir.body.body.body).startswith("for (j.outer, 0, 5)")
assert str(ir.body.body.body.body).startswith("for (j.inner, 0, 4)")
assert str(ir.body.body.body.body.body).startswith("c[")
def test_split_reorder():
hcl.init()
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j], name="c")
def test_case_1():
s = hcl.create_schedule([a, b, c])
xo, xi = s[c].split(c.axis[0], factor=2, mode="transform")
yo, yi = s[c].split(c.axis[1], factor=5, mode="transform")
s[c].reorder(yo, xo, yi, xi)
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (j.outer, 0, 4)")
assert str(ir.body.body.body).startswith("for (i.outer, 0, 5)")
assert str(ir.body.body.body.body).startswith("for (j.inner, 0, 5)")
assert str(ir.body.body.body.body.body).startswith("for (i.inner, 0, 2)")
def test_case_2():
s = hcl.create_schedule([a, b, c])
xo, xi = s[c].split(c.axis[0], factor=3, mode="transform")
yo, yi = s[c].split(c.axis[1], factor=3, mode="transform")
s[c].reorder(yi, xi, yo, xo)
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (j.inner, 0, 3)")
assert str(ir.body.body.body).startswith("for (i.inner, 0, 3)")
assert str(ir.body.body.body.body).startswith("for (j.outer, 0, 7)")
assert str(ir.body.body.body.body.body).startswith("for (i.outer, 0, 4)")
assert str(ir.body.body.body.body.body.body).startswith(
"if ((j.inner < (20 - (j.outer*3))))")
assert str(ir.body.body.body.body.body.body.then_case).startswith(
"if ((i.inner < (10 - (i.outer*3)))")
test_case_1()
test_case_2()
def test_split_reorder_num_axis():
# note that this is not the recommanded way
hcl.init()
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.compute(a.shape, lambda i, j: a[i, j] + b[i, j], name="c")
s = hcl.create_schedule([a, b, c])
xo, xi = s[c].split(0, factor=2, mode="transform")
yo, yi = s[c].split(2, factor=5, mode="transform")
s[c].reorder(2, 0, 3, 1)
ir = hcl.lower(s)
assert str(ir.body.body).startswith("for (j.outer, 0, 4)")
assert str(ir.body.body.body).startswith("for (i.outer, 0, 5)")
assert str(ir.body.body.body.body).startswith("for (j.inner, 0, 5)")
assert str(ir.body.body.body.body.body).startswith("for (i.inner, 0, 2)")
def test_compute_at():
def _build_kernel():
hcl.init()
A = hcl.placeholder((10, 20, 30), name="A")
B = hcl.compute(A.shape, lambda i, j, m: A[i, j, m] * 2, name="B")
C = hcl.compute(B.shape, lambda ii, jj, mm: B[ii, jj, mm] + 1, name="C")
return A, B, C
def _verify_build(sch):
f = hcl.build(sch)
a_np = np.random.randint(low=0, high=100, size=(10, 20, 30))
a_hcl = hcl.asarray(a_np)
c_hcl = hcl.asarray(np.zeros(a_np.shape), dtype="int32")
f(a_hcl, c_hcl)
c_np = a_np * 2 + 1
np.testing.assert_allclose(c_np, c_hcl.asnumpy())
def test_case_1():
# axis 0
A, B, C = _build_kernel()
s0 = hcl.create_schedule([A, C])
s0[B].compute_at(s0[C], C.axis[0])
ir0 = hcl.lower(s0)
assert "allocate B[int32 * 1 * 20 * 30]" in str(ir0)
_verify_build(s0)
# axis 1
A, B, C = _build_kernel()
s1 = hcl.create_schedule([A, C])
s1[B].compute_at(s1[C], C.axis[1])
ir1 = hcl.lower(s1)
assert "allocate B[int32 * 1 * 1 * 30]" in str(ir1)
_verify_build(s1)
# axis 2
A, B, C = _build_kernel()
s2 = hcl.create_schedule([A, C])
s2[B].compute_at(s2[C], C.axis[2])
ir2 = hcl.lower(s2)
assert "allocate B[int32 * 1 * 1 * 1]" in str(ir2)
_verify_build(s2)
def test_case_2():
A, B, C = _build_kernel()
s = hcl.create_schedule([A, C])
s[B].compute_at(s[C], C.axis[2])
s[C].fuse(C.axis[0], C.axis[1])
ir = hcl.lower(s)
assert "allocate B[int32 * 1 * 1 * 1]" in str(ir)
_verify_build(s)
def test_case_3():
A, B, C = _build_kernel()
s = hcl.create_schedule([A, C])
s[B].compute_at(s[C], C.axis[2])
s[C].split(C.axis[0], factor=3)
s[C].split(C.axis[1], factor=3)
ir = hcl.lower(s)
assert "allocate B[int32 * 1 * 1 * 1]" in str(ir)
_verify_build(s)
# compute_at and reorder, compute at an axis that is not reordered
# check both directions of reorder and compute_at
def test_case_4():
A, B, C = _build_kernel()
s0 = hcl.create_schedule([A, C])
s0[B].compute_at(s0[C], C.axis[2])
s0[C].reorder(C.axis[1], C.axis[0])
ir0 = hcl.lower(s0)
assert "allocate B[int32 * 1 * 1 * 1]" in str(ir0)
_verify_build(s0)
# compute_at and reorder, compute at an axis that has been reordered
# note that the results will be different
def test_case_5():
A, B, C = _build_kernel()
s0 = hcl.create_schedule([A, C])
s0[B].compute_at(s0[C], C.axis[1])
s0[C].reorder(C.axis[1], C.axis[0])
ir0 = hcl.lower(s0)
assert "allocate B[int32 * 1 * 1 * 30]" in str(ir0)
_verify_build(s0)
def test_case_6():
A, B, C = _build_kernel()
s = hcl.create_schedule([A, C])
s[B].compute_at(s[C], C.axis[2])
yo, yi = s[C].split(C.axis[0], factor=3)
xo, xi = s[C].split(C.axis[1], factor=3)
s[C].reorder(yo, xo, yi, xi)
ir = hcl.lower(s)
assert "allocate B[int32 * 1 * 1 * 1]" in str(ir)
_verify_build(s)
test_case_1()
test_case_2()
test_case_3()
test_case_4()
test_case_5()
test_case_6()
def test_compute_at_complex():
hcl.init()
A = hcl.placeholder((10, 20, 30), name="A")
B = hcl.compute(A.shape, lambda i, j, m: A[i, j, m] * 2, name="B")
C = hcl.compute(B.shape, lambda ii, jj, mm: B[ii, jj, mm] + 1, name="C")
D = hcl.compute(C.shape, lambda iii, jjj, mmm: C[iii, jjj, mmm] % 3, name="D")
s = hcl.create_schedule([A, D])
s[B].compute_at(s[C], C.axis[1])
s[C].compute_at(s[D], D.axis[2])
ir = hcl.lower(s)
assert "allocate B[int32 * 1 * 1 * 30]" in str(ir)
assert "allocate C[int32 * 1 * 1 * 1]" in str(ir)
f = hcl.build(s)
a_np = np.random.randint(low=0, high=100, size=A.shape)
a_hcl = hcl.asarray(a_np)
d_hcl = hcl.asarray(np.zeros(D.shape), dtype="int32")
f(a_hcl, d_hcl)
d_np = (a_np * 2 + 1) % 3
np.testing.assert_allclose(d_np, d_hcl.asnumpy())
def test_compute_at_complex_num_axis():
hcl.init()
A = hcl.placeholder((10, 20, 30), name="A")
B = hcl.compute(A.shape, lambda i, j, m: A[i, j, m] * 2, name="B")
C = hcl.compute(B.shape, lambda ii, jj, mm: B[ii, jj, mm] + 1, name="C")
D = hcl.compute(C.shape, lambda iii, jjj, mmm: C[iii, jjj, mmm] % 3, name="D")
s = hcl.create_schedule([A, D])
s[B].compute_at(s[C], 1)
s[C].compute_at(s[D], 2)
ir = hcl.lower(s)
assert "allocate B[int32 * 1 * 1 * 30]" in str(ir)
assert "allocate C[int32 * 1 * 1 * 1]" in str(ir)
f = hcl.build(s)
a_np = np.random.randint(low=0, high=100, size=A.shape)
a_hcl = hcl.asarray(a_np)
d_hcl = hcl.asarray(np.zeros(D.shape), dtype="int32")
f(a_hcl, d_hcl)
d_np = (a_np * 2 + 1) % 3
np.testing.assert_allclose(d_np, d_hcl.asnumpy())
def test_compute_at_with_reuse_1D():
hcl.init()
A = hcl.compute((10, 10), lambda y, x: x + y, "A")
B = hcl.compute((10, 8), lambda y, x: A[y, x] + A[y, x+1] + A[y, x+2], "B")
s = hcl.create_schedule([B])
s[A].compute_at(s[B], B.axis[1])
ir = hcl.lower(s)
assert "allocate A[int32 * 1 * 3]" in str(ir)
f = hcl.build(s)
a_np = np.fromfunction(lambda i, j: i + j, A.shape, dtype="int")
b_np = np.zeros(B.shape, dtype="int")
c_np = np.zeros(B.shape, dtype="int")
for y in range(0, 10):
for x in range(0, 8):
c_np[y][x] = a_np[y][x] + a_np[y][x+1] + a_np[y][x+2]
b_hcl = hcl.asarray(b_np)
f(b_hcl)
np.testing.assert_array_equal(c_np, b_hcl.asnumpy())
def test_compute_at_with_reuse_2D():
hcl.init()
A = hcl.compute((10, 10), lambda y, x: x + y, "A")
B = hcl.compute((8, 8), lambda y, x: A[y, x] + A[y+1, x+1] + A[y+2, x+2], "B")
s = hcl.create_schedule([B])
s[A].compute_at(s[B], B.axis[1])
ir = hcl.lower(s)
assert "allocate A[int32 * 3 * 3]" in str(ir)
f = hcl.build(s)
a_np = np.fromfunction(lambda i, j: i + j, A.shape, dtype="int")
b_np = np.zeros(B.shape, dtype="int")
c_np = np.zeros(B.shape, dtype="int")
for y in range(0, 8):
for x in range(0, 8):
c_np[y][x] = a_np[y][x] + a_np[y+1][x+1] + a_np[y+2][x+2]
b_hcl = hcl.asarray(b_np)
f(b_hcl)
np.testing.assert_array_equal(c_np, b_hcl.asnumpy())
def test_compute_at_with_reuse_2D_complex():
hcl.init()
A = hcl.compute((10, 10), lambda y, x: x + y, "A")
B = hcl.compute((8, 8), lambda y, x: A[y, x] + A[y+1, x+1] + A[y+2, x+2], "B")
s = hcl.create_schedule([B])
s[A].compute_at(s[B], B.axis[1])
s[B].split(B.axis[1], 4)
ir = hcl.lower(s)
assert "allocate A[int32 * 3 * 3]" in str(ir)
f = hcl.build(s)
a_np = np.fromfunction(lambda i, j: i + j, A.shape, dtype="int")
b_np = np.zeros(B.shape, dtype="int")
c_np = np.zeros(B.shape, dtype="int")
for y in range(0, 8):
for x in range(0, 8):
c_np[y][x] = a_np[y][x] + a_np[y+1][x+1] + a_np[y+2][x+2]
b_hcl = hcl.asarray(b_np)
f(b_hcl)
np.testing.assert_array_equal(c_np, b_hcl.asnumpy())
def test_compute_at_no_dep():
hcl.init()
A = hcl.compute((10, 10), lambda y, x: y + x, "A")
B = hcl.compute((10, 10), lambda y, x: y - x, "B")
s = hcl.create_schedule([A, B])
s[A].compute_at(s[B], B.axis[1])
f = hcl.build(s)
a_hcl = hcl.asarray(np.zeros(A.shape, dtype="int"))
b_hcl = hcl.asarray(np.zeros(B.shape, dtype="int"))
f(a_hcl, b_hcl)
a_np = np.fromfunction(lambda i, j: i + j, A.shape, dtype="int")
b_np = np.fromfunction(lambda i, j: i - j, B.shape, dtype="int")
np.testing.assert_array_equal(a_np, a_hcl.asnumpy())
np.testing.assert_array_equal(b_np, b_hcl.asnumpy())
def test_compute_at_no_dep_diff_shape_smaller():
hcl.init()
A = hcl.compute((8, 8), lambda y, x: y + x, "A")
B = hcl.compute((10, 10), lambda y, x: y - x, "B")
s = hcl.create_schedule([A, B])
s[A].compute_at(s[B], B.axis[1])
f = hcl.build(s)
a_hcl = hcl.asarray(np.zeros(A.shape, dtype="int"))
b_hcl = hcl.asarray(np.zeros(B.shape, dtype="int"))
f(a_hcl, b_hcl)
a_np = np.fromfunction(lambda i, j: i + j, A.shape, dtype="int")
b_np = np.fromfunction(lambda i, j: i - j, B.shape, dtype="int")
np.testing.assert_array_equal(a_np, a_hcl.asnumpy())
np.testing.assert_array_equal(b_np, b_hcl.asnumpy())
def test_compute_at_no_dep_diff_shape_larger():
hcl.init()
A = hcl.compute((12, 12), lambda y, x: y + x, "A")
B = hcl.compute((10, 10), lambda y, x: y - x, "B")
s = hcl.create_schedule([A, B])
# the outer one will be truncated
s[A].compute_at(s[B], B.axis[1])
f = hcl.build(s)
a_hcl = hcl.asarray(np.zeros(A.shape, dtype="int"))
b_hcl = hcl.asarray(np.zeros(B.shape, dtype="int"))
f(a_hcl, b_hcl)
a_np = np.fromfunction(lambda i, j: i + j, A.shape, dtype="int")
b_np = np.fromfunction(lambda i, j: i - j, B.shape, dtype="int")
for i in range(0, 12):
for j in range(0, 12):
if (i >= 10 or j >= 10):
a_np[i][j] = 0
np.testing.assert_array_equal(a_np, a_hcl.asnumpy())
np.testing.assert_array_equal(b_np, b_hcl.asnumpy())
def test_multi_stage():
hcl.init()
def test(A):
r = hcl.reduce_axis(0, 10)
B = hcl.compute((10,), lambda x: hcl.sum(A[x, r], axis=r), "B")
return B
A = hcl.placeholder((10, 10))
s = hcl.create_schedule([A], test)
s[test.B].split(test.B.axis[0], 5)
f = hcl.build(s)
a_np = np.random.randint(0, 10, size=(10, 10))
b_np = np.zeros(shape=(10,), dtype="int")
a_hcl = hcl.asarray(a_np)
b_hcl = hcl.asarray(b_np)
f(a_hcl, b_hcl)
d_np = np.sum(a_np, axis=1)
np.testing.assert_array_equal(d_np, b_hcl.asnumpy())
| 38.020661
| 88
| 0.574122
|
4b513df6a7c91858a3fc62f61a52d2811837b0b5
| 11,504
|
py
|
Python
|
.github/scripts/packager.py
|
NoMaY-jp/FreeRTOS
|
7e8023a0f5095a08ee61784b7c6e74f7430cfad3
|
[
"MIT"
] | null | null | null |
.github/scripts/packager.py
|
NoMaY-jp/FreeRTOS
|
7e8023a0f5095a08ee61784b7c6e74f7430cfad3
|
[
"MIT"
] | null | null | null |
.github/scripts/packager.py
|
NoMaY-jp/FreeRTOS
|
7e8023a0f5095a08ee61784b7c6e74f7430cfad3
|
[
"MIT"
] | 1
|
2021-08-15T04:05:49.000Z
|
2021-08-15T04:05:49.000Z
|
#!/usr/bin/env python3
import os, sys
from argparse import ArgumentParser
import shutil
from zipfile import ZipFile
import subprocess
FREERTOS_GIT_LINK = 'https://github.com/FreeRTOS/FreeRTOS.git'
LABS_GIT_LINK = 'https://github.com/FreeRTOS/FreeRTOS-Labs.git'
DIR_INTERMEDIATE_FILES = os.path.join(os.path.basename(__file__).replace('.py', '-tmp-output'))
DIR_INPUT_TREES = os.path.join(DIR_INTERMEDIATE_FILES, 'baseline')
DIR_OUTPUT_TREES = os.path.join(DIR_INTERMEDIATE_FILES, 'git-head-master')
RELATIVE_FILE_EXCLUDES = [
os.path.join('.git'),
os.path.join('.github'),
os.path.join('.gitignore'),
os.path.join('.gitmodules'),
os.path.join('CONTRIBUTING.md'),
os.path.join('LICENSE.md'),
os.path.join('README.md'),
os.path.join('FreeRTOS', 'Source', '.git'),
os.path.join('FreeRTOS', 'Source', '.github'),
os.path.join('FreeRTOS', 'Source', 'CONTRIBUTING.md'),
os.path.join('FreeRTOS', 'Source', 'GitHub-FreeRTOS-Kernel-Home.url'),
os.path.join('FreeRTOS', 'Source', 'History.txt'),
os.path.join('FreeRTOS', 'Source', 'LICENSE.md'),
os.path.join('FreeRTOS', 'Source', 'Quick_Start_Guide.url'),
os.path.join('FreeRTOS', 'Source', 'README.md'),
os.path.join('FreeRTOS', 'Source', 'SECURITY.md'),
]
LABS_RELATIVE_EXCLUDE_FILES = [
os.path.join('.git')
]
# -------------------------------------------------------------------------------------------------
# Helpers
# -------------------------------------------------------------------------------------------------
def info(msg):
print('[INFO]: %s' % str(msg))
def authorize_filetree_diff():
'''
Presents the filetree diff between baseline zip and resulting zip contents.
Then queries a 'y/n' response from user, to verify file diff.
This does not consider files that were pruned from result filetree and is to instead show
Return boolean True if user authorizes the diff, else False
'''
info('TODO')
def get_file_bytesize_diff(path_newfile, path_basefile):
return os.path.getsize(path_newfile) - os.path.getsize(path_basefile)
# -------------------------------------------------------------------------------------------------
# Core
# -------------------------------------------------------------------------------------------------
def cleanup_intermediate_files(scratch_dir):
'''
Undo and cleanup actions done by 'setup_intermediate_files()'
'''
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir)
def unzip_baseline_zip(path_inzip, path_outdir):
'''
Unzips baseline zip into intermediate files directory. The baseline zip is used to compare against
resulting output zip and its contents, to produce filetree diffs, size diffs, or other diagnostics
'''
with ZipFile(path_inzip, 'r') as inzip:
inzip.extractall(path_outdir)
return os.path.join(path_outdir, str(os.path.basename(path_inzip)).replace('.zip', ''))
def download_git_tree(git_link, root_dir, dir_name, ref='master', commit_id='HEAD', recurse=False):
'''
Download HEAD from Git Master. Place into working files dir
'''
args = ['git', '-C', root_dir, 'clone', '-b', ref, git_link, dir_name]
subprocess.run(args, check=True)
subprocess.run(['git', '-C', os.path.join(root_dir, dir_name), 'checkout', '-f', commit_id], check=True)
subprocess.run(['git', '-C', os.path.join(root_dir, dir_name), 'clean', '-fd'], check=True)
if recurse:
subprocess.run(['git', '-C', os.path.join(root_dir, dir_name), 'submodule', 'update', '--init', '--recursive'], check=True)
return os.path.join(root_dir, dir_name)
def commit_git_tree_changes(repo_dir, commit_message=''):
subprocess.run(['git', '-C', repo_dir, 'add', '-u'], check=True)
subprocess.run(['git', '-C', repo_dir, 'commit', '-m', commit_message], check=True)
return 0
def push_git_tree_changes(repo_dir, tag=None, force_tag=False):
subprocess.run(['git', '-C', repo_dir, 'push'], check=True)
if tag != None:
force_tag_arg = '-f' if force_tag else ''
subprocess.run(['git', '-C', repo_dir, 'tag', force_tag_arg, tag], check=True)
subprocess.run(['git', '-C', repo_dir, 'push', force_tag_arg, '--tags'], check=True)
return 0
def update_submodule_pointer(repo_dir, rel_submodule_path, new_submodule_ref):
subprocess.run(['git', '-C', repo_dir, 'submodule', 'update', '--init'], check=True)
subprocess.run(['git', '-C', os.path.join(repo_dir, rel_submodule_path), 'fetch'], check=True)
subprocess.run(['git', '-C', os.path.join(repo_dir, rel_submodule_path), 'checkout', new_submodule_ref], check=True)
subprocess.run(['git', '-C', repo_dir, 'add', rel_submodule_path], check=True)
return 0
def setup_intermediate_files(scratch_dir, intree_dir, outtree_dir):
cleanup_intermediate_files(scratch_dir)
os.mkdir(scratch_dir)
os.mkdir(intree_dir)
os.mkdir(outtree_dir)
def create_file_trees(intree_dir, baseline_zip, outtree_dir, git_link, outtree_name, git_ref='master', commit_id='HEAD'):
path_in_tree = None
path_out_tree = None
# Input baseline file tree
if baseline_zip != None:
print("Unzipping baseline: '%s'..." % baseline_zip)
path_in_tree = unzip_baseline_zip(baseline_zip, intree_dir)
print('Done.')
# Output file tree to be pruned and packaged
path_out_tree = download_git_tree(git_link, outtree_dir, outtree_name, commit_id=commit_id)
return (path_in_tree, path_out_tree)
def prune_result_tree(path_root, exclude_files=[], dry_run=False):
'''
Remove all files specifed in 'exclude_files' from intermediate result file tree.
Paths in 'exclude_files' are taken relative to path_root
'''
files_removed = []
for f in exclude_files:
path_full = os.path.join(path_root, f)
if os.path.exists(path_full):
if os.path.isfile(path_full):
if not dry_run:
os.remove(path_full)
files_removed.append(path_full)
else:
if not dry_run:
shutil.rmtree(path_full)
files_removed.append(path_full)
return files_removed
def zip_result_tree(path_tree, path_outzip):
'''
Zip file tree rooted at 'path_root', using same compression as 7z at max compression,
to zip at 'path_outzip'
'''
subprocess.run(['7z', 'a', '-tzip', '-mx=9', path_outzip, os.path.join('.', path_tree, '*')])
def show_package_diagnostics(path_newzip, path_basezip):
'''
Show various diagnostics about resulting package zip including Byte-size diff from baseline
and a path to
'''
if path_basezip:
size_diff_KB = get_file_bytesize_diff(path_newzip, path_basezip) / 1024
print('\nPackage growth from baseline:\n size(%s) - size(%s) = %s%.2d KB' %
(path_newzip,
path_basezip,
'+' if size_diff_KB >= 0 else '', size_diff_KB))
def create_package(path_ziproot, path_outtree, package_name, exclude_files=[]):
print("Packaging '%s'..." % package_name)
pruned_files = prune_result_tree(path_outtree, exclude_files)
print('Files removed:\n %s' % '\n '.join(pruned_files))
path_outzip = '%s.zip' % package_name
zip_result_tree(path_ziproot, path_outzip)
print('Done.')
return path_outzip
# -------------------------------------------------------------------------------------------------
# CLI
# -------------------------------------------------------------------------------------------------
def configure_argparser():
parser = ArgumentParser(description = 'Zip packaging tool for FreeRTOS release.')
parser.add_argument('--core-input-zip',
metavar = 'CORE-BASELINE.ZIP',
default = None,
help = 'FreeRTOS baseline zip to compare against new core zip')
parser.add_argument('--labs-input-zip',
metavar = 'LABS-BASELINE.ZIP',
default = None,
help = 'FreeRTOS-Labs baseline zip to compare agains new labs zip')
parser.add_argument('--zip-version',
metavar = 'PACKAGE_VERSION_NUMBER',
type = str,
default = None,
help = 'Version number to be suffixed to FreeRTOS and FreeRTOS-Labs zips')
parser.add_argument('--freertos-commit',
metavar = 'FREERTOS_COMMIT_ID',
type = str,
default = 'HEAD',
help = 'Commit ID of FreeRTOS repo to package')
return parser
def sanitize_cmd_args(args):
# Check FreeRTOS Core options
if not args.core_input_zip:
info('No FreeRTOS baseline zip provided. Zip-comparison diagnostics will not be provided...')
args.core_input_zip = None
elif not os.path.exists(args.core_input_zip):
error('Input zip does not exist: %s' % args.core_input_zip)
exit(1)
# Check FreeRTOS Labs options
if not args.labs_input_zip:
info('No FreeRTOS-Labs baseline zip provided. Zip-comparison diagnostics will not be provided...')
args.labs_input_zip = None
elif not os.path.exists(args.labs_input_zip):
error('Input zip does not exist: %s' % args.input_zip)
exit(1)
# Check version options
if args.zip_version == None:
info('No version string provide. Will use "XX.YY.ZZ" as version suffix...')
args.zip_version = 'XX.YY.ZZ'
def main():
# CLI
cmd = configure_argparser()
# Setup
args = cmd.parse_args()
sanitize_cmd_args(args)
setup_intermediate_files(DIR_INTERMEDIATE_FILES, DIR_INPUT_TREES, DIR_OUTPUT_TREES)
# Create FreeRTOS and FreeRTOS-Labs packages
core_package_name = 'FreeRTOSv%s' % args.zip_version
(path_core_in_tree, path_core_out_tree) = create_file_trees(DIR_INPUT_TREES,
args.core_input_zip,
DIR_OUTPUT_TREES,
FREERTOS_GIT_LINK,
core_package_name,
commit_id=args.freertos_commit)
if path_core_out_tree == None:
print('Failed to prepare repo for zipping')
exit(1);
core_outzip = create_package(path_core_out_tree, core_package_name, RELATIVE_FILE_EXCLUDES)
# Create FreeRTOS-Labs package
labs_package_name = 'FreeRTOS-Labs'
(path_labs_in_tree, path_labs_out_tree) = create_file_trees(DIR_INPUT_TREES,
args.labs_input_zip,
DIR_OUTPUT_TREES,
LABS_GIT_LINK,
labs_package_name)
if path_labs_out_tree == None:
print('Failed to prepare repo for zipping')
exit(1);
labs_outzip = create_package(path_labs_out_tree, labs_package_name, LABS_RELATIVE_EXCLUDE_FILES)
# Package summaries
show_package_diagnostics(core_outzip, args.core_input_zip)
show_package_diagnostics(labs_outzip, args.labs_input_zip)
if __name__ == '__main__':
main()
| 40.507042
| 131
| 0.599096
|
931d2ee1f16fcc0b65231d862418fb54e2b4df84
| 1,925
|
py
|
Python
|
example/example_test.py
|
bsquizz/pysurge
|
f225f8518611bf742dfbc1919da80e08e7a31c1f
|
[
"MIT"
] | null | null | null |
example/example_test.py
|
bsquizz/pysurge
|
f225f8518611bf742dfbc1919da80e08e7a31c1f
|
[
"MIT"
] | 4
|
2020-07-09T02:06:47.000Z
|
2020-08-03T15:11:27.000Z
|
example/example_test.py
|
bsquizz/pysurge
|
f225f8518611bf742dfbc1919da80e08e7a31c1f
|
[
"MIT"
] | null | null | null |
import time
import uuid
import requests
from pysurge import TestCase
class ExampleTest(TestCase):
@classmethod
def startup(cls):
# A single requests session will be used for all tests fired in a
# child process' thread pool
cls.session = requests.Session()
@classmethod
def shutdown(cls):
pass
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Create a unique request ID to send as a header so we can track the requests
# on the server side.
self.request_id = str(uuid.uuid4())
# Allow for a kwarg called 'path' to be specified which changes the URL that this
# test will send requests to.
self.path = str(kwargs.get("path", ""))
# Allow for a kwarg that defines the base url
self.url = kwargs.get("url", "http://localhost:80")
@property
def summary(self):
# A brief summary of this test -- used in logging and report printing
return "example test"
@property
def description(self):
# A brief description of this test -- used in logging -- usually useful to include
# "unique identifiers" in the test to help when analyzing logs.
return f"example test: request id {self.request_id}"
@property
def max_duration(self):
# How long we think each test instance takes to run at a maximum
return 180
def setup(self):
pass
def teardown(self):
pass
def run(self):
headers = {"request-id": self.request_id}
start_time = time.time()
r = self.session.get(f"{self.url}/{self.path}", headers=headers)
end_time = time.time()
# A metric called 'response_time' is stored for each test.
self.metrics["response_time"] = end_time - start_time
# If the test hits an exception, it will be marked as a failure.
r.raise_for_status()
| 31.048387
| 90
| 0.631169
|
d1bdf5d12343331d31148f699093dc841acdd7c1
| 2,654
|
py
|
Python
|
src/amqp-examples/fortune-cookie-client.py
|
vert-x3/vertx-amqp-service
|
4f9296b0f5771da2a0a14ef37b2abb469623d2b8
|
[
"Apache-2.0"
] | 3
|
2015-07-03T08:42:16.000Z
|
2015-12-04T23:23:45.000Z
|
src/amqp-examples/fortune-cookie-client.py
|
vert-x3/vertx-amqp-service
|
4f9296b0f5771da2a0a14ef37b2abb469623d2b8
|
[
"Apache-2.0"
] | 11
|
2015-06-04T16:59:37.000Z
|
2021-06-08T14:39:20.000Z
|
src/amqp-examples/fortune-cookie-client.py
|
vert-x3/vertx-amqp-service
|
4f9296b0f5771da2a0a14ef37b2abb469623d2b8
|
[
"Apache-2.0"
] | 4
|
2015-03-19T02:23:53.000Z
|
2017-08-07T23:25:48.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import optparse
import uuid
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container, DynamicNodeProperties
class FortuneCookieClient(MessagingHandler):
def __init__(self, service_addr, resp_addr):
super(FortuneCookieClient, self).__init__()
self.service_addr = service_addr
self.reply_to = resp_addr + '/' + str(uuid.uuid1())
def on_start(self, event):
self.sender = event.container.create_sender(self.service_addr)
self.receiver = event.container.create_receiver(self.reply_to)
self.receiver.flow(1)
def on_sendable(self, event):
print "\n===================================="
print "fortune-cookie-service has granted a single request credit"
event.sender.send(Message(reply_to=self.reply_to));
print "Sent a request for a fortune cookie"
def on_accept(self, event):
print "fortune-cookie-service has received my request and has accepted it"
def on_message(self, event):
print "Received my fortune cookie : '%s'" % event.message.body
self.accept(event.delivery)
print "Accepted the cookie"
print "====================================\n"
parser = optparse.OptionParser(usage="usage: %prog [options]",
description="Send requests to the supplied address and print responses.")
parser.add_option("--service_addr", default="localhost:5673/fortune-cookie-service",
help="AMQP address for fortune-cookie-service(default %default)")
parser.add_option("--response_addr", default="localhost:5673",
help="address to which responses are sent by the service (default %default)")
opts, args = parser.parse_args()
Container(FortuneCookieClient(opts.service_addr, opts.response_addr)).run()
| 42.126984
| 104
| 0.701583
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.