hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff80adc369c47d43fabfa8cf7cbfcf649b9bfff8 | 8,538 | py | Python | tests/test_modeling_tf_auto.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 34 | 2021-07-05T02:44:31.000Z | 2022-03-28T14:39:57.000Z | tests/test_modeling_tf_auto.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 3 | 2021-07-22T15:49:44.000Z | 2022-03-19T08:46:27.000Z | tests/test_modeling_tf_auto.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 6 | 2021-07-05T02:44:32.000Z | 2022-02-14T10:10:13.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_tf_available
from transformers.testing_utils import DUMMY_UNKWOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, require_tf, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPT2Config,
T5Config,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPT2LMHeadModel,
TFRobertaForMaskedLM,
TFT5ForConditionalGeneration,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
@require_tf
class TFAutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
import h5py
self.assertTrue(h5py.version.hdf5_version.startswith("1.10"))
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModel.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertModel)
@slow
def test_model_for_pretraining_from_pretrained(self):
import h5py
self.assertTrue(h5py.version.hdf5_version.startswith("1.10"))
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForPreTraining.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForPreTraining)
@slow
def test_model_for_causal_lm(self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = TFAutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = TFAutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFGPT2LMHeadModel)
@slow
def test_lmhead_model_from_pretrained(self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelWithLMHead.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForMaskedLM)
@slow
def test_model_for_masked_lm(self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = TFAutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFT5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForQuestionAnswering.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForQuestionAnswering)
def test_from_pretrained_identifier(self):
model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, TFBertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER)
self.assertIsInstance(model, TFRobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_parents_and_children_in_mappings(self):
# Test that the children are placed before the parents in the mappings, as the `instanceof` will be triggered
# by the parents and will return the wrong configuration type when using auto models
mappings = (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
for mapping in mappings:
mapping = tuple(mapping.items())
for index, (child_config, child_model) in enumerate(mapping[1:]):
for parent_config, parent_model in mapping[: index + 1]:
with self.subTest(msg=f"Testing if {child_config.__name__} is child of {parent_config.__name__}"):
self.assertFalse(issubclass(child_config, parent_config))
self.assertFalse(issubclass(child_model, parent_model))
| 42.477612 | 118 | 0.715273 |
180eb60091293fb6ebc8ea2454a6d4ccf6e11be1 | 1,424 | py | Python | pyprobml-master/examples/patsyCategoricalDemo.py | storopoli/Machine-Learning-Probalistic | f8617e7b81f4d6c71e72edc40ba11ac746794a95 | [
"MIT"
] | 1 | 2019-03-04T05:43:10.000Z | 2019-03-04T05:43:10.000Z | Old/examples/patsyCategoricalDemo.py | tywang89/pyprobml | 82cfdcb8daea653cda8f77e8737e585418476ca7 | [
"MIT"
] | null | null | null | Old/examples/patsyCategoricalDemo.py | tywang89/pyprobml | 82cfdcb8daea653cda8f77e8737e585418476ca7 | [
"MIT"
] | null | null | null | from patsy import dmatrix, demo_data
# demo of how patsy handles categorical variables
# Patsy notation is described here
#http://statsmode#ls.sourceforge.net/devel/example_formulas.html
#http://patsy.readthedocs.org/en/latest/categorical-coding.html
data = demo_data("a", nlevels=3)
dmatrix("a", data)
'''
DesignMatrix with shape (6, 3)
Intercept a[T.a2] a[T.a3]
1 0 0
1 1 0
1 0 1
1 0 0
1 1 0
1 0 1
Terms:
'Intercept' (column 0)
'a' (columns 1:3)
'''
data = demo_data("a", nlevels=3)
dmatrix("a-1", data)
'''
DesignMatrix with shape (6, 3)
a[a1] a[a2] a[a3]
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
Terms:
'a' (columns 0:3)'''
data = demo_data("a", nlevels=2)
dmatrix("a", data)
'''
DesignMatrix with shape (6, 2)
Intercept a[T.a2]
1 0
1 1
1 0
1 1
1 0
1 1
Terms:
'Intercept' (column 0)
'a' (column 1)'''
data = demo_data("a", nlevels=2)
dmatrix("a-1", data)
'''
DesignMatrix with shape (6, 2)
a[a1] a[a2]
1 0
0 1
1 0
0 1
1 0
0 1
Terms:
'a' (columns 0:2)
'''
| 20.342857 | 64 | 0.443118 |
dbc93155e9fd809ac724e91a8766b1c19d3b9c30 | 1,001 | py | Python | webdriver/tests/bidi/browsing_context/get_tree/invalid.py | BasixKOR/wpt | aa27d567c10dcdb2aea6884d5155dfaaa177a800 | [
"BSD-3-Clause"
] | null | null | null | webdriver/tests/bidi/browsing_context/get_tree/invalid.py | BasixKOR/wpt | aa27d567c10dcdb2aea6884d5155dfaaa177a800 | [
"BSD-3-Clause"
] | 112 | 2021-09-27T14:39:02.000Z | 2022-03-30T14:26:35.000Z | webdriver/tests/bidi/browsing_context/get_tree/invalid.py | clopez/wpt | 4ba8a4a1f41e166289c0a7feaa5665e1385e90f3 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import webdriver.bidi.error as error
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize("value", [False, "foo", {}, []])
async def test_params_max_depth_invalid_type(bidi_session, value):
with pytest.raises(error.InvalidArgumentException):
await bidi_session.browsing_context.get_tree(max_depth=value)
@pytest.mark.parametrize("value", [-1, 1.1, 2**53])
async def test_params_max_depth_invalid_value(bidi_session, value):
with pytest.raises(error.InvalidArgumentException):
await bidi_session.browsing_context.get_tree(max_depth=value)
@pytest.mark.parametrize("value", [False, 42, {}, []])
async def test_params_root_invalid_type(bidi_session, value):
with pytest.raises(error.InvalidArgumentException):
await bidi_session.browsing_context.get_tree(root=value)
async def test_params_root_invalid_value(bidi_session):
with pytest.raises(error.NoSuchFrameException):
await bidi_session.browsing_context.get_tree(root="foo")
| 35.75 | 69 | 0.775225 |
818b1634563c001479063caf87731133dad76650 | 17,121 | py | Python | test/test_shared.py | facebookresearch/mpcfp | cb29797aa4f2ce524dd584ecf47c863fd9f414a6 | [
"MIT"
] | 5 | 2020-11-18T23:55:17.000Z | 2022-01-14T07:15:35.000Z | test/test_shared.py | facebookresearch/mpcfp | cb29797aa4f2ce524dd584ecf47c863fd9f414a6 | [
"MIT"
] | null | null | null | test/test_shared.py | facebookresearch/mpcfp | cb29797aa4f2ce524dd584ecf47c863fd9f414a6 | [
"MIT"
] | 2 | 2021-11-06T14:06:13.000Z | 2022-01-14T07:16:29.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# dependencies:
import itertools
import sys
import torch
import unittest
import torch.distributed as dist
import sys
sys.path.append("..")
from shared import SharedTensor
from multiprocess_test_case import MultiProcessTestCase
def get_random_test_tensor(max_value=6, size=(5, 5)):
tensor = (2 * torch.rand(*size, dtype=torch.float64) - 1) * max_value
if dist.is_initialized():
dist.broadcast(tensor, 0)
return tensor.type(torch.float64)
class TestShared(MultiProcessTestCase):
"""
This class tests all functions of the shared tensors.
"""
benchmarks_enabled = False
def setUp(self):
super().setUp()
def _check(self, encrypted_tensor, reference, msg, tol=1e-4):
tensor = encrypted_tensor.get_plain_text()
if self.rank != 0: # Do not check for non-0 rank
return
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
test_passed = torch.allclose(
tensor, reference, rtol=tol, atol=tol)
self.assertTrue(test_passed, msg=msg)
def test_encrypt_decrypt(self):
"""
Tests tensor encryption and decryption for both positive
and negative values.
"""
reference = get_random_test_tensor()
encrypted_tensor = SharedTensor(reference)
self._check(encrypted_tensor, reference, 'en/decryption failed')
def test_clone(self):
reference = get_random_test_tensor()
encrypted_tensor = SharedTensor(reference)
cloned = encrypted_tensor.clone()
self._check(cloned, reference, 'cloning failed')
def test_arithmetic(self):
"""Tests arithmetic functions on encrypted tensor."""
arithmetic_functions = ['add', 'add_', 'sub', 'sub_', 'mul', 'mul_']
for func in arithmetic_functions:
for tensor_type in [lambda x: x, SharedTensor]:
tensor1 = get_random_test_tensor()
tensor2 = get_random_test_tensor()
encrypted = SharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted, func)(encrypted2)
msg = '%s %s failed' % (
'private' if tensor_type is SharedTensor else 'public',
func)
self._check(encrypted_out, reference, msg)
if '_' in func:
# Check in-place op worked
self._check(encrypted, reference, msg)
else:
# Check original is not modified
self._check(encrypted, tensor1, msg)
# Check encrypted vector with encrypted scalar works.
tensor1 = get_random_test_tensor()
tensor2 = get_random_test_tensor(size=(1, 1))
encrypted1 = SharedTensor(tensor1)
encrypted2 = SharedTensor(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
self._check(encrypted_out, reference, msg)
# Test radd, rsub, and rmul
tensor = get_random_test_tensor()
reference = 2 + tensor
encrypted = SharedTensor(tensor)
encrypted_out = 2 + encrypted
self._check(encrypted_out, reference, 'right add failed')
reference = 2 - tensor
encrypted_out = 2 - encrypted
self._check(encrypted_out, reference, 'right sub failed')
reference = 2 * tensor
encrypted_out = 2 * encrypted
self._check(encrypted_out, reference, 'right mul failed')
def test_broadcast(self):
"""Test broadcast functionality."""
arithmetic_functions = ['add', 'sub', 'mul']
sizes = [(2, 2), (2, 1), (1, 2), (1, 1)]
for func in arithmetic_functions:
for tensor_type in [lambda x: x, SharedTensor]:
for size1, size2 in itertools.combinations(sizes, 2):
tensor1 = get_random_test_tensor(size=size1)
tensor2 = get_random_test_tensor(size=size2)
encrypted = SharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted, func)(encrypted2)
self._check(
encrypted_out, reference,
'%s %s failed' % ('private' if tensor_type
is SharedTensor else 'public', func)
)
def test_transpose(self):
"""Tests transpose on encrypted tensor."""
funcs = ['transpose', 'transpose_']
for func in funcs:
tensor = get_random_test_tensor()
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)(0, 1)
encrypted_out = getattr(encrypted, func)(0, 1)
msg = 'private %s failed' % func
self._check(encrypted_out, reference, msg)
if '_' in func:
# Check in-place op worked
self._check(encrypted, reference, msg)
else:
# Check original is not modified
self._check(encrypted, tensor, msg)
# Check property
tensor = get_random_test_tensor()
encrypted = SharedTensor(tensor)
self._check(encrypted.T, tensor.T, msg)
def test_matmul(self):
"""Test matrix multiplication."""
for tensor_type in [lambda x: x, SharedTensor]:
tensor = get_random_test_tensor()
for width in range(2, tensor.shape[1]):
matrix_size = (tensor.shape[1], width)
matrix = get_random_test_tensor(size=matrix_size)
reference = tensor.matmul(matrix)
encrypted_tensor = SharedTensor(tensor)
matrix = tensor_type(matrix)
encrypted_tensor = encrypted_tensor.matmul(matrix)
self._check(
encrypted_tensor, reference,
'Private-%s matrix multiplication failed' %
('private' if tensor_type is SharedTensor else 'public')
)
def test_reductions(self):
"""Test reduction operations."""
funcs = ['sum']
for func in funcs:
tensor = get_random_test_tensor()
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, 'private %s failed' % func)
for dim in [0, 1]:
reference = getattr(tensor, func)(dim)
encrypted_out = getattr(encrypted, func)(dim)
self._check(
encrypted_out, reference, 'private %s failed' % func)
def test_get_set(self):
for size in range(1, 5):
# Test __getitem__
tensor = get_random_test_tensor(size=(size, size))
reference = tensor[:, 0]
encrypted_tensor = SharedTensor(tensor)
encrypted_out = encrypted_tensor[:, 0]
self._check(encrypted_out, reference, 'getitem failed')
reference = tensor[0, :]
encrypted_out = encrypted_tensor[0, :]
self._check(encrypted_out, reference, 'getitem failed')
for encrypted_type in [lambda x: x, SharedTensor]:
# Test __setitem__
tensor2 = get_random_test_tensor(size=(size,))
reference = tensor.clone()
reference[:, 0] = tensor2
encrypted_out = SharedTensor(tensor)
encrypted2 = encrypted_type(tensor2)
encrypted_out[:, 0] = encrypted2
self._check(
encrypted_out, reference,
'%s setitem failed' % type(encrypted2))
reference = tensor.clone()
reference[0, :] = tensor2
encrypted_out = SharedTensor(tensor)
encrypted2 = encrypted_type(tensor2)
encrypted_out[0, :] = encrypted2
self._check(
encrypted_out, reference,
'%s setitem failed' % type(encrypted2))
def test_cuda(self):
if not torch.cuda.is_available():
return
tensor1 = SharedTensor(get_random_test_tensor())
tensor2 = SharedTensor(get_random_test_tensor())
reference = (tensor1 * tensor2).get_plain_text()
tensor1 = tensor1.cuda()
tensor2 = tensor2.cuda()
out = tensor1 * tensor2
self._check(out.cpu(), reference, "CUDA op failed")
def test_conv(self):
"""Test convolution of encrypted tensor with public/private tensors."""
for kernel_type in [lambda x: x, SharedTensor]:
for matrix_width in range(2, 5):
for kernel_width in range(1, matrix_width):
for padding in range(kernel_width // 2 + 1):
matrix_size = (5, matrix_width)
matrix = get_random_test_tensor(size=matrix_size)
kernel_size = (kernel_width, kernel_width)
kernel = get_random_test_tensor(size=kernel_size)
matrix = matrix.unsqueeze(0).unsqueeze(0)
kernel = kernel.unsqueeze(0).unsqueeze(0)
reference = torch.nn.functional.conv2d(
matrix, kernel, padding=padding)
encrypted_matrix = SharedTensor(matrix)
encrypted_kernel = kernel_type(kernel)
encrypted_conv = encrypted_matrix.conv2d(
encrypted_kernel, padding=padding
)
self._check(encrypted_conv, reference, 'conv2d failed')
def test_pooling(self):
"""Test average pooling on encrypted tensor."""
for width in range(2, 5):
for width2 in range(1, width):
matrix_size = (4, 5, width)
matrix = get_random_test_tensor(size=matrix_size)
pool_size = width2
for stride in range(1, width2):
for padding in range(2):
reference = torch.nn.functional.avg_pool2d(
matrix.unsqueeze(0), pool_size,
stride=stride, padding=padding
)
encrypted_matrix = SharedTensor(matrix)
encrypted_pool = encrypted_matrix.avg_pool2d(
pool_size, stride=stride, padding=padding)
self._check(
encrypted_pool, reference[0], 'avg_pool2d failed')
def test_square(self):
"""Test square."""
funcs = ['square', 'square_']
for func in funcs:
tensor = get_random_test_tensor()
encrypted = SharedTensor(tensor)
reference = tensor * tensor
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, 'private %s failed' % func)
def test_div(self):
"""Tests division by numbers in [1, 2] on encrypted tensor."""
funcs = ['div', 'div_']
for func in funcs:
for tensor_type in [lambda x: x, SharedTensor]:
tensor1 = get_random_test_tensor()
tensor2 = get_random_test_tensor(max_value=0.5) + 1.5
encrypted = SharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted, func)(encrypted2)
msg = '%s %s failed' % (
'private' if tensor_type is SharedTensor else 'public',
func)
self._check(encrypted_out, reference, msg)
if '_' in func:
# Check in-place op worked
self._check(encrypted, reference, msg)
else:
# Check original is not modified
self._check(encrypted, tensor1, msg)
def test_sign(self):
"""Tests sign on encrypted tensor."""
funcs = ['sign', 'sign_', 'abs', 'abs_', 'relu', 'relu_']
for func in funcs:
tensor = get_random_test_tensor(max_value=1e4)
if func != 'sign' and func != 'sign_':
# Make sure we test with some entry, say entry (0, 0), being 0
tensor[0, 0] = 0
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_exp(self):
"""Tests exp on encrypted tensor."""
funcs = ['exp', 'exp_']
for func in funcs:
tensor = get_random_test_tensor(max_value=2)
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_tanh(self):
funcs = ['tanh', 'tanh_']
for func in funcs:
tensor = get_random_test_tensor(max_value=2)
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_sigmoid(self):
funcs = ['sigmoid', 'sigmoid_']
for func in funcs:
tensor = get_random_test_tensor(max_value=2)
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_softmax(self):
axes = [0, 1, 2]
funcs = ['softmax', 'softmax_']
for axis in axes:
for func in funcs:
tensor = get_random_test_tensor(max_value=5, size=(5, 5, 5))
encrypted = SharedTensor(tensor)
encrypted_out = getattr(encrypted, func)(axis)
# Calculate the plaintext reference.
x = tensor.clone()
x.relu_()
x = x.sum(axis, keepdim=True)
tensor.sub_(x)
tensor.exp_()
tensor.mul_(tensor.sum(axis, keepdim=True).reciprocal_())
reference = tensor
# Reduce the tolerance requested for softmax.
self._check(encrypted_out, reference, "%s failed" % func)
def test_erf(self):
funcs = ['erf', 'erf_']
for func in funcs:
tensor = get_random_test_tensor(max_value=2)
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_reciprocal(self):
"""Test reciprocal."""
funcs = ['reciprocal', 'reciprocal_']
for func in funcs:
for scale in [2, 10]:
tensor = get_random_test_tensor(max_value=(scale - 1) / 2)
tensor += 1 + (scale - 1) / 2
encrypted = SharedTensor(tensor)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted, func)(scale=scale)
self._check(
encrypted_out, reference, 'private %s failed' % func)
def test_invsqrt(self):
"""Test invsqrt."""
funcs = ['invsqrt', 'invsqrt_']
for func in funcs:
tensor = (get_random_test_tensor(max_value=1000) + 1001) / 2001
encrypted = SharedTensor(tensor)
reference = tensor.sqrt().reciprocal()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, 'private %s failed' % func)
def test_inv8root(self):
"""Test inv8root."""
funcs = ['inv8root', 'inv8root_']
for func in funcs:
tensor = (get_random_test_tensor(max_value=1000) + 1001) / 2001
encrypted = SharedTensor(tensor)
reference = tensor.sqrt()
reference = reference.sqrt()
reference = reference.sqrt()
reference = reference.reciprocal()
encrypted_out = getattr(encrypted, func)()
self._check(encrypted_out, reference, 'private %s failed' % func)
# run all the tests:
if __name__ == '__main__':
unittest.main()
| 40.667458 | 79 | 0.560423 |
450285912cca3d72570ee9f09bfd66185fd0d474 | 489 | py | Python | data/scripts/templates/object/tangible/ship/crafted/repair/shared_repair_kit_weapon_capacitor.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/ship/crafted/repair/shared_repair_kit_weapon_capacitor.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/ship/crafted/repair/shared_repair_kit_weapon_capacitor.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/repair/shared_repair_kit_weapon_capacitor.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","repair_kit_capacitor_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 28.764706 | 95 | 0.746421 |
b74f76f4568c3bbc215c3c2468a8833040029c2c | 511 | py | Python | mlfinlab/bet_sizing/__init__.py | scibol/mlfinlab | 3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984 | [
"BSD-3-Clause"
] | 8 | 2020-04-19T08:09:34.000Z | 2022-03-30T20:49:40.000Z | mlfinlab/bet_sizing/__init__.py | scibol/mlfinlab | 3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984 | [
"BSD-3-Clause"
] | 1 | 2019-07-24T17:52:30.000Z | 2019-07-24T17:52:30.000Z | mlfinlab/bet_sizing/__init__.py | scibol/mlfinlab | 3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984 | [
"BSD-3-Clause"
] | 8 | 2020-08-09T02:25:04.000Z | 2022-03-20T15:08:11.000Z | """
Functions derived from Chapter 10: Bet Sizing
Only the highest-level user functions are included in the __init__ file.
"""
from mlfinlab.bet_sizing.bet_sizing import (bet_size_probability, bet_size_dynamic, bet_size_budget, bet_size_reserve,
confirm_and_cast_to_df, get_concurrent_sides, cdf_mixture,
single_bet_size_mixed)
from mlfinlab.bet_sizing.ef3m import M2N, centered_moment, raw_moment, most_likely_parameters
| 51.1 | 118 | 0.702544 |
5d50fbf53f1d59f1bfe6ce5e17de6e05982c38cf | 1,794 | py | Python | django_comments_xtd/management/commands/populate_xtdcomments.py | jgourmelen/django-comments-xtd | 2cd6dd0e5ab253643b1d4f05264a207a9255f0b0 | [
"BSD-2-Clause"
] | null | null | null | django_comments_xtd/management/commands/populate_xtdcomments.py | jgourmelen/django-comments-xtd | 2cd6dd0e5ab253643b1d4f05264a207a9255f0b0 | [
"BSD-2-Clause"
] | 1 | 2020-10-14T02:58:22.000Z | 2020-10-14T02:58:22.000Z | django_comments_xtd/management/commands/populate_xtdcomments.py | jgourmelen/django-comments-xtd | 2cd6dd0e5ab253643b1d4f05264a207a9255f0b0 | [
"BSD-2-Clause"
] | null | null | null | import sys
from django.db import connections
from django.db.utils import ConnectionDoesNotExist, IntegrityError
from django.core.management.base import BaseCommand
from django_comments.models import Comment
from django_comments_xtd.models import XtdComment
__all__ = ['Command']
class Command(BaseCommand):
help = "Load the xtdcomment table with valid data from django_comments."
def add_arguments(self, parser):
parser.add_argument('using', nargs='*', type=str)
def populate_db(self, cursor):
for comment in Comment.objects.all():
sql = ("INSERT INTO %(table)s "
" ('comment_ptr_id', 'thread_id', 'parent_id',"
" 'level', 'order', 'followup') "
"VALUES (%(id)d, %(id)d, %(id)d, 0, 1, FALSE)")
cursor.execute(sql % {'table': XtdComment._meta.db_table,
'id': comment.id})
def handle(self, *args, **options):
total = 0
using = options['using'] or ['default']
for db_conn in using:
try:
self.populate_db(connections[db_conn].cursor())
total += XtdComment.objects.using(db_conn).count()
except ConnectionDoesNotExist:
print("DB connection '%s' does not exist." % db_conn)
continue
except IntegrityError:
if db_conn != 'default':
print("Table '%s' (in '%s' DB connection) must be empty."
% (XtdComment._meta.db_table, db_conn))
else:
print("Table '%s' must be empty."
% XtdComment._meta.db_table)
sys.exit(1)
print("Added %d XtdComment object(s)." % total)
| 36.612245 | 77 | 0.557414 |
9a3aaadf8e98361d59e4c330e2878d055c06b628 | 165 | py | Python | python/programme/5-average-of-numbers/average.py | yacine-zitouni/codinasion | 7037234874fa18c900573a6e921f1119273bbfe5 | [
"MIT"
] | null | null | null | python/programme/5-average-of-numbers/average.py | yacine-zitouni/codinasion | 7037234874fa18c900573a6e921f1119273bbfe5 | [
"MIT"
] | null | null | null | python/programme/5-average-of-numbers/average.py | yacine-zitouni/codinasion | 7037234874fa18c900573a6e921f1119273bbfe5 | [
"MIT"
] | null | null | null | # Write a Python programme to calculate the average of numbers.
import statistics
n = list(map(int, input("Input: ").split()))
print('Output:', statistics.mean(n))
| 27.5 | 63 | 0.721212 |
68107dce840d3ce4db1baef915c0b5f71a034008 | 3,130 | py | Python | displayingtime/settings.py | rmmo14/time_display | 117ace5e381b1bc64435d8b0ba9460d531d2af46 | [
"MIT"
] | null | null | null | displayingtime/settings.py | rmmo14/time_display | 117ace5e381b1bc64435d8b0ba9460d531d2af46 | [
"MIT"
] | null | null | null | displayingtime/settings.py | rmmo14/time_display | 117ace5e381b1bc64435d8b0ba9460d531d2af46 | [
"MIT"
] | null | null | null | """
Django settings for displayingtime project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jmg=u^*fg!xh^%&r=nz3o)xsrsj395zukn5#iq9cx8is$7!71h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'time_display',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'displayingtime.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'displayingtime.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.655738 | 91 | 0.697764 |
e52aef7388d2e813d5c005a6416c13a2fcfde435 | 4,108 | py | Python | addModels.py | kursatbakis/cmpe492project | bb882ac3febada05824e407e7d573a08b4232ee8 | [
"MIT"
] | null | null | null | addModels.py | kursatbakis/cmpe492project | bb882ac3febada05824e407e7d573a08b4232ee8 | [
"MIT"
] | null | null | null | addModels.py | kursatbakis/cmpe492project | bb882ac3febada05824e407e7d573a08b4232ee8 | [
"MIT"
] | null | null | null | from scheduler.utils import get_db_collection
import random
def removeThreeBlockSlots():
collection = get_db_collection('time_slot')
collection.delete_many({'length': 3})
def addSlots():
collection = get_db_collection('time_slot')
collection.delete_many({})
days = ['MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY']
for i in range(1, 11):
for length in range(1, 4):
for day in days:
if i + length - 1 > 10:
continue
slot = {
'day': day,
'length': length,
'slot': i
}
collection.insert_one(slot)
def addInstructors():
instructors = [{"full_name": "Emre Ugur"},
{"full_name": "Ali Akkaya"},
{"full_name": "Pinar Yolum"},
{"full_name": "Sadik Fikret Gurgen"},
{"full_name": "Arzucan Ozgur"},
{"full_name": "Can Ozturan"},
{"full_name": "Ethem Alpaydin"},
{"full_name": "Taylan Cemgil"},
{"full_name": "Arda Yurdakul"},
{"full_name": "Tuna Tugcu"},
{"full_name": "Suzan Uskudarli"},
{"full_name": "Cem Ersoy"},
{"full_name": "Cem Say"},
{"full_name": "Alper Şen"},
{"full_name": "Fatma Basak Aydemir"},
{"full_name": "Fatih Alagoz"},
{"full_name": "Haluk Bingol"},
]
collection = get_db_collection('instructor')
collection.delete_many({})
for instructor in instructors:
collection.insert_one(instructor)
def addClassrooms():
classrooms = [{"code": "BM-B5", "capacity": 30},
{"code": "BM-A3", "capacity": 60},
{"code": "BM-A6", "capacity": 25},
{"code": "BM-B4", "capacity": 80},
{"code": "NH401", "capacity": 200},
{"code": "NH405", "capacity": 250},
]
collection = get_db_collection('classroom')
for classroom in classrooms:
collection.insert_one(classroom)
def addCourses():
courses = [{"department": "CMPE", "code": 150, "section": 1, "quota": 240},
{"department": "CMPE", "code": 210, "section": 1, "quota": 70},
{"department": "CMPE", "code": 220, "section": 1, "quota": 230},
{"department": "CMPE", "code": 230, "section": 1, "quota": 126},
{"department": "CMPE", "code": 250, "section": 1, "quota": 75},
{"department": "CMPE", "code": 300, "section": 1, "quota": 100},
{"department": "CMPE", "code": 322, "section": 1, "quota": 115},
{"department": "CMPE", "code": 343, "section": 1, "quota": 95},
{"department": "CMPE", "code": 344, "section": 1, "quota": 80},
{"department": "CMPE", "code": 350, "section": 1, "quota": 50},
{"department": "CMPE", "code": 436, "section": 1, "quota": 40},
{"department": "CMPE", "code": 443, "section": 1, "quota": 131}
]
collection = get_db_collection('course')
collection.insert_many(courses)
def addHoursToCourses():
collection = get_db_collection('course')
collection.update_many({}, {"$set": {"hours": 2}})
def addAvailableSlots():
collection = get_db_collection('instructorAvailableSlots')
collection.delete_many({})
days = ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY"]
availableSlots = get_db_collection('instructorAvailableSlots')
slots = get_db_collection('time_slot')
instructors = get_db_collection('instructor')
for ins in instructors.find():
list = []
for s in slots.find({"day": {'$ne': days[random.randint(0, 4)]}}):
list.append(s["_id"])
availableSlots.insert_one({"instructor": ins, "slots": list})
| 41.494949 | 80 | 0.50073 |
043701a024bdab7614cdcaa1d50f8bcc0945028b | 30,082 | py | Python | toontown/toonbase/ToontownGlobals.py | MasterLoopyBM/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 1 | 2020-02-07T18:15:12.000Z | 2020-02-07T18:15:12.000Z | toontown/toonbase/ToontownGlobals.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | null | null | null | toontown/toonbase/ToontownGlobals.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 2 | 2020-11-08T03:38:35.000Z | 2021-09-02T07:03:47.000Z | import TTLocalizer
from otp.otpbase.OTPGlobals import *
from direct.showbase.PythonUtil import Enum, invertDict
from pandac.PandaModules import BitMask32, Vec4
from toontown.estate import HouseGlobals
MapHotkeyOn = 'alt'
MapHotkeyOff = 'alt-up'
MapHotkey = 'alt'
AccountDatabaseChannelId = 4008
ToonDatabaseChannelId = 4021
DoodleDatabaseChannelId = 4023
DefaultDatabaseChannelId = AccountDatabaseChannelId
DatabaseIdFromClassName = {'Account': AccountDatabaseChannelId}
CogHQCameraFov = 60.0
BossBattleCameraFov = 72.0
MakeAToonCameraFov = 48.0
VPElevatorFov = 53.0
CFOElevatorFov = 43.0
CJElevatorFov = 59.0
CEOElevatorFov = 59.0
CBElevatorFov = 42.0
WantPromotion = 0
PendingPromotion = 1
CeilingBitmask = BitMask32(256)
FloorEventBitmask = BitMask32(16)
PieBitmask = BitMask32(256)
PetBitmask = BitMask32(8)
CatchGameBitmask = BitMask32(16)
CashbotBossObjectBitmask = BitMask32(16)
FurnitureSideBitmask = BitMask32(32)
FurnitureTopBitmask = BitMask32(64)
FurnitureDragBitmask = BitMask32(128)
PetLookatPetBitmask = BitMask32(256)
PetLookatNonPetBitmask = BitMask32(512)
BanquetTableBitmask = BitMask32(1024)
FullPies = 65535
CogHQCameraFar = 900.0
CogHQCameraNear = 1.0
CashbotHQCameraFar = 2000.0
CashbotHQCameraNear = 1.0
LawbotHQCameraFar = 3000.0
LawbotHQCameraNear = 1.0
BossbotHQCameraFar = 3000.0
BossbotHQCameraNear = 1.0
SpeedwayCameraFar = 8000.0
SpeedwayCameraNear = 1.0
DreamlandCameraNear = 1.0
DreamlandCameraFar= 2000.0
MaxMailboxContents = 30
MaxHouseItems = 45
MaxAccessories = 50
ExtraDeletedItems = 5
DeletedItemLifetime = 7 * 24 * 60
CatalogNumWeeksPerSeries = 13
CatalogNumWeeks = 78
PetFloorCollPriority = 5
PetPanelProximityPriority = 6
P_NoTrunk = -28
P_AlreadyOwnBiggerCloset = -27
P_ItemAlreadyRented = -26
P_OnAwardOrderListFull = -25
P_AwardMailboxFull = -24
P_ItemInPetTricks = -23
P_ItemInMyPhrases = -22
P_ItemOnAwardOrder = -21
P_ItemInAwardMailbox = -20
P_ItemAlreadyWorn = -19
P_ItemInCloset = -18
P_ItemOnGiftOrder = -17
P_ItemOnOrder = -16
P_ItemInMailbox = -15
P_PartyNotFound = 14
P_WillNotFit = -13
P_NotAGift = -12
P_OnOrderListFull = -11
P_MailboxFull = -10
P_NoPurchaseMethod = -9
P_ReachedPurchaseLimit = -8
P_NoRoomForItem = -7
P_NotShopping = -6
P_NotAtMailbox = -5
P_NotInCatalog = -4
P_NotEnoughMoney = -3
P_InvalidIndex = -2
P_UserCancelled = -1
P_ItemAvailable = 1
P_ItemOnOrder = 2
P_ItemUnneeded = 3
GIFT_user = 0
GIFT_admin = 1
GIFT_RAT = 2
GIFT_mobile = 3
GIFT_cogs = 4
GIFT_partyrefund = 5
FM_InvalidItem = -7
FM_NondeletableItem = -6
FM_InvalidIndex = -5
FM_NotOwner = -4
FM_NotDirector = -3
FM_RoomFull = -2
FM_HouseFull = -1
FM_MovedItem = 1
FM_SwappedItem = 2
FM_DeletedItem = 3
FM_RecoveredItem = 4
SPDonaldsBoat = 3
SPMinniesPiano = 4
CEVirtual = 14
MaxHpLimit = 145
MaxCarryLimit = 80
MaxQuestCarryLimit = 4
GravityValue = 32.174
MaxCogSuitLevel = 12 - 1
setInterfaceFont(TTLocalizer.InterfaceFont)
setSignFont(TTLocalizer.SignFont)
from toontown.toontowngui import TTDialog
setDialogClasses(TTDialog.TTDialog, TTDialog.TTGlobalDialog)
ToonFont = None
BuildingNametagFont = None
MinnieFont = None
SuitFont = None
def getToonFont():
global ToonFont
if ToonFont == None:
ToonFont = loader.loadFont(TTLocalizer.ToonFont, lineHeight=1.0)
return ToonFont
def getBuildingNametagFont():
global BuildingNametagFont
if BuildingNametagFont == None:
BuildingNametagFont = loader.loadFont(TTLocalizer.BuildingNametagFont)
return BuildingNametagFont
def getMinnieFont():
global MinnieFont
if MinnieFont == None:
MinnieFont = loader.loadFont(TTLocalizer.MinnieFont)
return MinnieFont
def getSuitFont():
global SuitFont
if SuitFont == None:
SuitFont = loader.loadFont(TTLocalizer.SuitFont, pixelsPerUnit=40, spaceAdvance=0.25, lineHeight=1.0)
return SuitFont
DonaldsDock = 1000
ToontownCentral = 2000
TheBrrrgh = 3000
MinniesMelodyland = 4000
DaisyGardens = 5000
OutdoorZone = 6000
FunnyFarm = 7000
GoofySpeedway = 8000
DonaldsDreamland = 9000
BarnacleBoulevard = 1100
SeaweedStreet = 1200
LighthouseLane = 1300
SillyStreet = 2100
LoopyLane = 2200
PunchlinePlace = 2300
WalrusWay = 3100
SleetStreet = 3200
PolarPlace = 3300
AltoAvenue = 4100
BaritoneBoulevard = 4200
TenorTerrace = 4300
ElmStreet = 5100
MapleStreet = 5200
OakStreet = 5300
LullabyLane = 9100
PajamaPlace = 9200
ToonHall = 2513
HoodHierarchy = {ToontownCentral: (SillyStreet, LoopyLane, PunchlinePlace),
DonaldsDock: (BarnacleBoulevard, SeaweedStreet, LighthouseLane),
TheBrrrgh: (WalrusWay, SleetStreet, PolarPlace),
MinniesMelodyland: (AltoAvenue, BaritoneBoulevard, TenorTerrace),
DaisyGardens: (ElmStreet, MapleStreet, OakStreet),
DonaldsDreamland: (LullabyLane, PajamaPlace),
GoofySpeedway: ()}
WelcomeValleyToken = 0
BossbotHQ = 10000
BossbotLobby = 10100
BossbotCountryClubIntA = 10500
BossbotCountryClubIntB = 10600
BossbotCountryClubIntC = 10700
SellbotHQ = 11000
SellbotLobby = 11100
SellbotFactoryExt = 11200
SellbotFactoryInt = 11500
SellbotBrutalFactoryInt = 11600
CashbotHQ = 12000
CashbotLobby = 12100
CashbotMintIntA = 12500
CashbotMintIntB = 12600
CashbotMintIntC = 12700
LawbotHQ = 13000
LawbotLobby = 13100
LawbotOfficeExt = 13200
LawbotOfficeInt = 13300
LawbotStageIntA = 13300
LawbotStageIntB = 13400
LawbotStageIntC = 13500
LawbotStageIntD = 13600
Tutorial = 15000
MyEstate = 16000
GolfZone = 17000
PartyHood = 18000
HoodsAlwaysVisited = [17000, 18000]
WelcomeValleyBegin = 22000
WelcomeValleyEnd = 61000
DynamicZonesBegin = 61000
DynamicZonesEnd = 1 << 20
cogDept2index = {'c': 0,
'l': 1,
'm': 2,
's': 3}
cogIndex2dept = invertDict(cogDept2index)
HQToSafezone = {SellbotHQ: DaisyGardens,
CashbotHQ: DonaldsDreamland,
LawbotHQ: TheBrrrgh,
BossbotHQ: DonaldsDock}
CogDeptNames = [TTLocalizer.Bossbot,
TTLocalizer.Lawbot,
TTLocalizer.Cashbot,
TTLocalizer.Sellbot]
def cogHQZoneId2deptIndex(zone):
if zone >= 13000 and zone <= 13999:
return 1
elif zone >= 12000:
return 2
elif zone >= 11000:
return 3
else:
return 0
def cogHQZoneId2dept(zone):
return cogIndex2dept[cogHQZoneId2deptIndex(zone)]
def dept2cogHQ(dept):
dept2hq = {'c': BossbotHQ,
'l': LawbotHQ,
'm': CashbotHQ,
's': SellbotHQ}
return dept2hq[dept]
MockupFactoryId = 0
MintNumFloors = {CashbotMintIntA: 20,
CashbotMintIntB: 20,
CashbotMintIntC: 20}
CashbotMintCogLevel = 10
CashbotMintSkelecogLevel = 11
CashbotMintBossLevel = 12
MintNumBattles = {CashbotMintIntA: 4,
CashbotMintIntB: 6,
CashbotMintIntC: 8}
MintCogBuckRewards = {CashbotMintIntA: 8,
CashbotMintIntB: 14,
CashbotMintIntC: 20}
MintNumRooms = {CashbotMintIntA: 2 * (6,) + 5 * (7,) + 5 * (8,) + 5 * (9,) + 3 * (10,),
CashbotMintIntB: 3 * (8,) + 6 * (9,) + 6 * (10,) + 5 * (11,),
CashbotMintIntC: 4 * (10,) + 10 * (11,) + 6 * (12,)}
BossbotCountryClubCogLevel = 11
BossbotCountryClubSkelecogLevel = 12
BossbotCountryClubBossLevel = 12
CountryClubNumRooms = {BossbotCountryClubIntA: (4,),
BossbotCountryClubIntB: 3 * (8,) + 6 * (9,) + 6 * (10,) + 5 * (11,),
BossbotCountryClubIntC: 4 * (10,) + 10 * (11,) + 6 * (12,)}
CountryClubNumBattles = {BossbotCountryClubIntA: 3,
BossbotCountryClubIntB: 2,
BossbotCountryClubIntC: 3}
CountryClubCogBuckRewards = {BossbotCountryClubIntA: 8,
BossbotCountryClubIntB: 14,
BossbotCountryClubIntC: 20}
LawbotStageCogLevel = 10
LawbotStageSkelecogLevel = 11
LawbotStageBossLevel = 12
StageNumBattles = {LawbotStageIntA: 0,
LawbotStageIntB: 0,
LawbotStageIntC: 0,
LawbotStageIntD: 0}
StageNoticeRewards = {LawbotStageIntA: 75,
LawbotStageIntB: 150,
LawbotStageIntC: 225,
LawbotStageIntD: 300}
StageNumRooms = {LawbotStageIntA: 2 * (6,) + 5 * (7,) + 5 * (8,) + 5 * (9,) + 3 * (10,),
LawbotStageIntB: 3 * (8,) + 6 * (9,) + 6 * (10,) + 5 * (11,),
LawbotStageIntC: 4 * (10,) + 10 * (11,) + 6 * (12,),
LawbotStageIntD: 4 * (10,) + 10 * (11,) + 6 * (12,)}
FT_FullSuit = 'fullSuit'
FT_Leg = 'leg'
FT_Arm = 'arm'
FT_Torso = 'torso'
factoryId2factoryType = {MockupFactoryId: FT_FullSuit,
SellbotFactoryInt: FT_FullSuit,
SellbotBrutalFactoryInt: FT_FullSuit,
LawbotOfficeInt: FT_FullSuit}
StreetNames = TTLocalizer.GlobalStreetNames
StreetBranchZones = StreetNames.keys()
Hoods = (DonaldsDock,
ToontownCentral,
TheBrrrgh,
MinniesMelodyland,
DaisyGardens,
OutdoorZone,
FunnyFarm,
GoofySpeedway,
DonaldsDreamland,
BossbotHQ,
SellbotHQ,
CashbotHQ,
LawbotHQ,
GolfZone)
HoodsForTeleportAll = (DonaldsDock,
ToontownCentral,
TheBrrrgh,
MinniesMelodyland,
DaisyGardens,
OutdoorZone,
GoofySpeedway,
DonaldsDreamland,
BossbotHQ,
SellbotHQ,
CashbotHQ,
LawbotHQ,
GolfZone)
BingoCardNames = {'normal': 0,
'corners': 1,
'diagonal': 2,
'threeway': 3,
'blockout': 4}
NoPreviousGameId = 0
RaceGameId = 1
CannonGameId = 2
TagGameId = 3
PatternGameId = 4
RingGameId = 5
MazeGameId = 6
TugOfWarGameId = 7
CatchGameId = 8
DivingGameId = 9
TargetGameId = 10
PairingGameId = 11
VineGameId = 12
IceGameId = 13
CogThiefGameId = 14
TwoDGameId = 15
PhotoGameId = 16
TravelGameId = 100
MinigameNames = {'race': RaceGameId,
'cannon': CannonGameId,
'tag': TagGameId,
'pattern': PatternGameId,
'minnie': PatternGameId,
'match': PatternGameId,
'matching': PatternGameId,
'ring': RingGameId,
'maze': MazeGameId,
'tug': TugOfWarGameId,
'catch': CatchGameId,
'diving': DivingGameId,
'target': TargetGameId,
'pairing': PairingGameId,
'vine': VineGameId,
'ice': IceGameId,
'thief': CogThiefGameId,
'2d': TwoDGameId,
'photo': PhotoGameId,
'travel': TravelGameId}
MinigameTemplateId = -1
MinigameIDs = (RaceGameId,
CannonGameId,
TagGameId,
PatternGameId,
RingGameId,
MazeGameId,
TugOfWarGameId,
CatchGameId,
DivingGameId,
TargetGameId,
PairingGameId,
VineGameId,
IceGameId,
CogThiefGameId,
TwoDGameId,
PhotoGameId,
TravelGameId)
MinigamePlayerMatrix = {
1: (CannonGameId, MazeGameId, TugOfWarGameId, RingGameId, VineGameId, CogThiefGameId, TwoDGameId, DivingGameId, PairingGameId, CatchGameId, TargetGameId, PhotoGameId),
2: (CannonGameId, MazeGameId, TugOfWarGameId, PatternGameId, TagGameId, RingGameId, VineGameId, IceGameId, CogThiefGameId, TwoDGameId, DivingGameId, PairingGameId, CatchGameId, TargetGameId, PhotoGameId),
3: (CannonGameId, MazeGameId, TugOfWarGameId, PatternGameId, RaceGameId, TagGameId, VineGameId, RingGameId, IceGameId, CogThiefGameId, TwoDGameId, DivingGameId, PairingGameId, CatchGameId, TargetGameId, PhotoGameId),
4: (CannonGameId, MazeGameId, TugOfWarGameId, PatternGameId, RaceGameId, TagGameId, VineGameId, RingGameId, IceGameId, CogThiefGameId, TwoDGameId, DivingGameId, PairingGameId, CatchGameId, TargetGameId, PhotoGameId),
}
MinigameReleaseDates = {IceGameId: (2008, 8, 5),
PhotoGameId: (2008, 8, 13),
TwoDGameId: (2008, 8, 20),
CogThiefGameId: (2008, 8, 27)}
KeyboardTimeout = 300
phaseMap = {Tutorial: 4,
ToontownCentral: 4,
MyEstate: 5.5,
DonaldsDock: 6,
MinniesMelodyland: 6,
GoofySpeedway: 6,
TheBrrrgh: 8,
DaisyGardens: 8,
FunnyFarm: 8,
DonaldsDreamland: 8,
OutdoorZone: 6,
BossbotHQ: 12,
SellbotHQ: 9,
CashbotHQ: 10,
LawbotHQ: 11,
GolfZone: 6,
PartyHood: 13}
streetPhaseMap = {ToontownCentral: 5,
DonaldsDock: 6,
MinniesMelodyland: 6,
GoofySpeedway: 6,
TheBrrrgh: 8,
DaisyGardens: 8,
FunnyFarm: 8,
DonaldsDreamland: 8,
OutdoorZone: 8,
BossbotHQ: 12,
SellbotHQ: 9,
CashbotHQ: 10,
LawbotHQ: 11,
PartyHood: 13}
dnaMap = {Tutorial: 'toontown_central',
ToontownCentral: 'toontown_central',
DonaldsDock: 'donalds_dock',
MinniesMelodyland: 'minnies_melody_land',
GoofySpeedway: 'goofy_speedway',
TheBrrrgh: 'the_burrrgh',
DaisyGardens: 'daisys_garden',
FunnyFarm: 'not done yet',
DonaldsDreamland: 'donalds_dreamland',
OutdoorZone: 'outdoor_zone',
BossbotHQ: 'cog_hq_bossbot',
SellbotHQ: 'cog_hq_sellbot',
CashbotHQ: 'cog_hq_cashbot',
LawbotHQ: 'cog_hq_lawbot',
GolfZone: 'golf_zone'}
hoodNameMap = {DonaldsDock: TTLocalizer.DonaldsDock,
ToontownCentral: TTLocalizer.ToontownCentral,
TheBrrrgh: TTLocalizer.TheBrrrgh,
MinniesMelodyland: TTLocalizer.MinniesMelodyland,
DaisyGardens: TTLocalizer.DaisyGardens,
OutdoorZone: TTLocalizer.OutdoorZone,
FunnyFarm: TTLocalizer.FunnyFarm,
GoofySpeedway: TTLocalizer.GoofySpeedway,
DonaldsDreamland: TTLocalizer.DonaldsDreamland,
BossbotHQ: TTLocalizer.BossbotHQ,
SellbotHQ: TTLocalizer.SellbotHQ,
CashbotHQ: TTLocalizer.CashbotHQ,
LawbotHQ: TTLocalizer.LawbotHQ,
Tutorial: TTLocalizer.Tutorial,
MyEstate: TTLocalizer.MyEstate,
GolfZone: TTLocalizer.GolfZone,
PartyHood: TTLocalizer.PartyHood}
safeZoneCountMap = {MyEstate: 8,
Tutorial: 6,
ToontownCentral: 6,
DonaldsDock: 10,
MinniesMelodyland: 5,
GoofySpeedway: 500,
TheBrrrgh: 8,
DaisyGardens: 9,
FunnyFarm: 500,
DonaldsDreamland: 5,
OutdoorZone: 500,
GolfZone: 500,
PartyHood: 500}
townCountMap = {MyEstate: 8,
Tutorial: 40,
ToontownCentral: 37,
DonaldsDock: 40,
MinniesMelodyland: 40,
GoofySpeedway: 40,
TheBrrrgh: 40,
DaisyGardens: 40,
FunnyFarm: 40,
DonaldsDreamland: 40,
OutdoorZone: 40,
PartyHood: 20}
hoodCountMap = {MyEstate: 2,
Tutorial: 2,
ToontownCentral: 2,
DonaldsDock: 2,
MinniesMelodyland: 2,
GoofySpeedway: 2,
TheBrrrgh: 2,
DaisyGardens: 2,
FunnyFarm: 2,
DonaldsDreamland: 2,
OutdoorZone: 2,
BossbotHQ: 2,
SellbotHQ: 43,
CashbotHQ: 2,
LawbotHQ: 2,
GolfZone: 2,
PartyHood: 2}
TrophyStarLevels = (10,
20,
30,
50,
75,
100)
TrophyStarColors = (Vec4(0.9, 0.6, 0.2, 1),
Vec4(0.9, 0.6, 0.2, 1),
Vec4(0.8, 0.8, 0.8, 1),
Vec4(0.8, 0.8, 0.8, 1),
Vec4(1, 1, 0, 1),
Vec4(1, 1, 0, 1))
MickeySpeed = 5.0
VampireMickeySpeed = 1.15
MinnieSpeed = 3.2
WitchMinnieSpeed = 1.8
DonaldSpeed = 3.68
FrankenDonaldSpeed = 0.9
DaisySpeed = 2.3
GoofySpeed = 5.2
SuperGoofySpeed = 1.6
PlutoSpeed = 5.5
WesternPlutoSpeed = 3.2
ChipSpeed = 3
DaleSpeed = 3.5
DaleOrbitDistance = 3
SuitWalkSpeed = 4.8
PieThrowArc = 0
PieThrowLinear = 1
PieCodeBossCog = 1
PieCodeNotBossCog = 2
PieCodeToon = 3
PieCodeBossInsides = 4
PieCodeDefensePan = 5
PieCodeProsecutionPan = 6
PieCodeLawyer = 7
PieCodeInvasionSuit = 8
PieCodeColors = {PieCodeBossCog: None,
PieCodeNotBossCog: (0.8,
0.8,
0.8,
1),
PieCodeToon: None}
suitIndex = {
'f' : 0,
'p' : 1,
'ym' : 2,
'mm' : 3,
'ds' : 4,
'hh' : 5,
'cr' : 6,
'tbc' : 7,
'bf' : 8,
'b' : 9,
'dt' : 10,
'ac' : 11,
'bs' : 12,
'sd' : 13,
'le' : 14,
'bw' : 15,
'sc' : 16,
'pp' : 17,
'tw' : 18,
'bc' : 19,
'nc' : 20,
'mb' : 21,
'ls' : 22,
'rb' : 23,
'cc' : 24,
'tm' : 25,
'nd' : 26,
'gh' : 27,
'ms' : 28,
'tf' : 29,
'm' : 30,
'mh' : 31
}
BossCogRollSpeed = 7.5
BossCogTurnSpeed = 20
BossCogTreadSpeed = 3.5
BossCogDizzy = 0
BossCogElectricFence = 1
BossCogSwatLeft = 2
BossCogSwatRight = 3
BossCogAreaAttack = 4
BossCogFrontAttack = 5
BossCogRecoverDizzyAttack = 6
BossCogDirectedAttack = 7
BossCogStrafeAttack = 8
BossCogNoAttack = 9
BossCogGoonZap = 10
BossCogSlowDirectedAttack = 11
BossCogDizzyNow = 12
BossCogGavelStomp = 13
BossCogGavelHandle = 14
BossCogLawyerAttack = 15
BossCogMoveAttack = 16
BossCogGolfAttack = 17
BossCogGolfAreaAttack = 18
BossCogGearDirectedAttack = 19
BossCogOvertimeAttack = 20
BossCogAttackTimes = {BossCogElectricFence: 0,
BossCogSwatLeft: 5.5,
BossCogSwatRight: 5.5,
BossCogAreaAttack: 4.21,
BossCogFrontAttack: 2.65,
BossCogRecoverDizzyAttack: 5.1,
BossCogDirectedAttack: 4.84,
BossCogNoAttack: 6,
BossCogSlowDirectedAttack: 7.84,
BossCogMoveAttack: 3,
BossCogGolfAttack: 6,
BossCogGolfAreaAttack: 7,
BossCogGearDirectedAttack: 4.84,
BossCogOvertimeAttack: 5}
BossCogDamageLevels = {BossCogElectricFence: 1,
BossCogSwatLeft: 5,
BossCogSwatRight: 5,
BossCogAreaAttack: 10,
BossCogFrontAttack: 3,
BossCogRecoverDizzyAttack: 3,
BossCogDirectedAttack: 3,
BossCogStrafeAttack: 2,
BossCogGoonZap: 5,
BossCogSlowDirectedAttack: 10,
BossCogGavelStomp: 20,
BossCogGavelHandle: 2,
BossCogLawyerAttack: 5,
BossCogMoveAttack: 20,
BossCogGolfAttack: 15,
BossCogGolfAreaAttack: 15,
BossCogGearDirectedAttack: 15,
BossCogOvertimeAttack: 10}
BossCogBattleAPosHpr = (0,
-25,
0,
0,
0,
0)
BossCogBattleBPosHpr = (0,
25,
0,
180,
0,
0)
SellbotBossMaxDamage = 100
SellbotBossMaxDamageNerfed = 100
SellbotBossBattleOnePosHpr = (0,
-35,
0,
-90,
0,
0)
SellbotBossBattleTwoPosHpr = (0,
60,
18,
-90,
0,
0)
SellbotBossBattleThreeHpr = (180, 0, 0)
SellbotBossBottomPos = (0, -110, -6.5)
SellbotBossDeathPos = (0, -175, -6.5)
SellbotBossDooberTurnPosA = (-20, -50, 0)
SellbotBossDooberTurnPosB = (20, -50, 0)
SellbotBossDooberTurnPosDown = (0, -50, 0)
SellbotBossDooberFlyPos = (0, -135, -6.5)
SellbotBossTopRampPosA = (-80, -35, 18)
SellbotBossTopRampTurnPosA = (-80, 10, 18)
SellbotBossP3PosA = (-50, 40, 18)
SellbotBossTopRampPosB = (80, -35, 18)
SellbotBossTopRampTurnPosB = (80, 10, 18)
SellbotBossP3PosB = (50, 60, 18)
CashbotBossMaxDamage = 500
BrutalCashbotBossMaxDamage = 1000
CashbotBossOffstagePosHpr = (120,
-195,
0,
0,
0,
0)
CashbotBossBattleOnePosHpr = (120,
-230,
0,
90,
0,
0)
CashbotRTBattleOneStartPosHpr = (94,
-220,
0,
110,
0,
0)
CashbotBossBattleThreePosHpr = (120,
-315,
0,
180,
0,
0)
CashbotToonsBattleThreeStartPosHpr = [(105,
-285,
0,
208,
0,
0),
(136,
-342,
0,
398,
0,
0),
(105,
-342,
0,
333,
0,
0),
(135,
-292,
0,
146,
0,
0),
(93,
-303,
0,
242,
0,
0),
(144,
-327,
0,
64,
0,
0),
(145,
-302,
0,
117,
0,
0),
(93,
-327,
0,
-65,
0,
0)]
CashbotBossSafePosHprs = [(120,
-315,
30,
0,
0,
0),
(77.2,
-329.3,
0,
-90,
0,
0),
(77.1,
-302.7,
0,
-90,
0,
0),
(165.7,
-326.4,
0,
90,
0,
0),
(165.5,
-302.4,
0,
90,
0,
0),
(107.8,
-359.1,
0,
0,
0,
0),
(133.9,
-359.1,
0,
0,
0,
0),
(107.0,
-274.7,
0,
180,
0,
0),
(134.2,
-274.7,
0,
180,
0,
0)]
CashbotBossCranePosHprs = [(97.4,
-337.6,
0,
-45,
0,
0),
(97.4,
-292.4,
0,
-135,
0,
0),
(142.6,
-292.4,
0,
135,
0,
0),
(142.6,
-337.6,
0,
45,
0,
0)]
CashbotBossToMagnetTime = 0.2
CashbotBossFromMagnetTime = 1
CashbotBossSafeKnockImpact = 0.5
CashbotBossSafeNewImpact = 0.0
CashbotBossGoonImpact = 0.1
CashbotBossKnockoutDamage = 15
TTWakeWaterHeight = -4.79
DDWakeWaterHeight = 1.669
EstateWakeWaterHeight = -.3
OZWakeWaterHeight = -0.5
WakeRunDelta = 0.1
WakeWalkDelta = 0.2
NoItems = 0
NewItems = 1
OldItems = 2
SuitInvasionBegin = 0
SuitInvasionEnd = 1
SuitInvasionUpdate = 2
SuitInvasionBulletin = 3
SkelecogInvasionBegin = 4
SkelecogInvasionEnd = 5
SkelecogInvasionBulletin = 6
WaiterInvasionBegin = 7
WaiterInvasionEnd = 8
WaiterInvasionBulletin = 9
V2InvasionBegin = 10
V2InvasionEnd = 11
V2InvasionBulletin = 12
VirtualInvasionBegin = 13
VirtualInvasionEnd = 14
VirtualInvasionBulletin = 15
RentalInvasionBegin = 16
RentalInvasionEnd = 17
RentalInvasionBulletin = 18
NO_HOLIDAY = 0
JULY4_FIREWORKS = 1
NEWYEARS_FIREWORKS = 2
HALLOWEEN = 3
WINTER_DECORATIONS = 4
SKELECOG_INVASION = 5
MR_HOLLYWOOD_INVASION = 6
FISH_BINGO_NIGHT = 7
BLACK_CAT_DAY = 9
RESISTANCE_EVENT = 10
KART_RECORD_DAILY_RESET = 11
KART_RECORD_WEEKLY_RESET = 12
TRICK_OR_TREAT = 13
CIRCUIT_RACING = 14
POLAR_PLACE_EVENT = 15
CIRCUIT_RACING_EVENT = 16
TROLLEY_HOLIDAY = 17
TROLLEY_WEEKEND = 18
SILLY_SATURDAY_BINGO = 19
SILLY_SATURDAY_CIRCUIT = 20
SILLY_SATURDAY_TROLLEY = 21
ROAMING_TRIALER_WEEKEND = 22
BOSSCOG_INVASION = 23
MARCH_INVASION = 24
MORE_XP_HOLIDAY = 25
HALLOWEEN_PROPS = 26
HALLOWEEN_COSTUMES = 27
DECEMBER_INVASION = 28
APRIL_FOOLS_COSTUMES = 29
CRASHED_LEADERBOARD = 30
OCTOBER31_FIREWORKS = 31
NOVEMBER19_FIREWORKS = 32
SELLBOT_SURPRISE_1 = 33
SELLBOT_SURPRISE_2 = 34
SELLBOT_SURPRISE_3 = 35
SELLBOT_SURPRISE_4 = 36
CASHBOT_CONUNDRUM_1 = 37
CASHBOT_CONUNDRUM_2 = 38
CASHBOT_CONUNDRUM_3 = 39
CASHBOT_CONUNDRUM_4 = 40
LAWBOT_GAMBIT_1 = 41
LAWBOT_GAMBIT_2 = 42
LAWBOT_GAMBIT_3 = 43
LAWBOT_GAMBIT_4 = 44
TROUBLE_BOSSBOTS_1 = 45
TROUBLE_BOSSBOTS_2 = 46
TROUBLE_BOSSBOTS_3 = 47
TROUBLE_BOSSBOTS_4 = 48
JELLYBEAN_DAY = 49
FEBRUARY14_FIREWORKS = 51
JULY14_FIREWORKS = 52
JUNE22_FIREWORKS = 53
BIGWIG_INVASION = 54
COLD_CALLER_INVASION = 53
BEAN_COUNTER_INVASION = 54
DOUBLE_TALKER_INVASION = 55
DOWNSIZER_INVASION = 56
WINTER_CAROLING = 57
HYDRANT_ZERO_HOLIDAY = 58
VALENTINES_DAY = 59
SILLYMETER_HOLIDAY = 60
MAILBOX_ZERO_HOLIDAY = 61
TRASHCAN_ZERO_HOLIDAY = 62
SILLY_SURGE_HOLIDAY = 63
HYDRANTS_BUFF_BATTLES = 64
MAILBOXES_BUFF_BATTLES = 65
TRASHCANS_BUFF_BATTLES = 66
SILLY_CHATTER_ONE = 67
SILLY_CHATTER_TWO = 68
SILLY_CHATTER_THREE = 69
SILLY_CHATTER_FOUR = 70
SILLY_TEST = 71
YES_MAN_INVASION = 72
TIGHTWAD_INVASION = 73
TELEMARKETER_INVASION = 74
HEADHUNTER_INVASION = 75
SPINDOCTOR_INVASION = 76
MONEYBAGS_INVASION = 77
TWOFACES_INVASION = 78
MINGLER_INVASION = 79
LOANSHARK_INVASION = 80
CORPORATE_RAIDER_INVASION = 81
ROBBER_BARON_INVASION = 82
LEGAL_EAGLE_INVASION = 83
BIG_WIG_INVASION = 84
BIG_CHEESE_INVASION = 85
DOWN_SIZER_INVASION = 86
MOVER_AND_SHAKER_INVASION = 87
DOUBLETALKER_INVASION = 88
PENNY_PINCHER_INVASION = 89
NAME_DROPPER_INVASION = 90
AMBULANCE_CHASER_INVASION = 91
MICROMANAGER_INVASION = 92
NUMBER_CRUNCHER_INVASION = 93
SILLY_CHATTER_FIVE = 94
VICTORY_PARTY_HOLIDAY = 95
SELLBOT_NERF_HOLIDAY = 96
JELLYBEAN_TROLLEY_HOLIDAY = 97
JELLYBEAN_FISHING_HOLIDAY = 98
JELLYBEAN_PARTIES_HOLIDAY = 99
BANK_UPGRADE_HOLIDAY = 100
TOP_TOONS_MARATHON = 101
SELLBOT_INVASION = 102
SELLBOT_FIELD_OFFICE = 103
SELLBOT_INVASION_MOVER_AND_SHAKER = 104
IDES_OF_MARCH = 105
EXPANDED_CLOSETS = 106
TAX_DAY_INVASION = 107
KARTING_TICKETS_HOLIDAY = 109
PRE_JULY_4_DOWNSIZER_INVASION = 110
PRE_JULY_4_BIGWIG_INVASION = 111
COMBO_FIREWORKS = 112
JELLYBEAN_TROLLEY_HOLIDAY_MONTH = 113
JELLYBEAN_FISHING_HOLIDAY_MONTH = 114
JELLYBEAN_PARTIES_HOLIDAY_MONTH = 115
SILLYMETER_EXT_HOLIDAY = 116
SPOOKY_BLACK_CAT = 117
SPOOKY_TRICK_OR_TREAT = 118
SPOOKY_PROPS = 119
SPOOKY_COSTUMES = 120
WACKY_WINTER_DECORATIONS = 121
WACKY_WINTER_CAROLING = 122
TOT_REWARD_JELLYBEAN_AMOUNT = 100
TOT_REWARD_END_OFFSET_AMOUNT = 0
LawbotBossMaxDamage = 2700
LawbotBossWinningTilt = 40
LawbotBossInitialDamage = 1350
LawbotBossBattleOnePosHpr = (-2.798,
-60,
0,
0,
0,
0)
LawbotBossBattleTwoPosHpr = (-2.798,
89,
19.145,
0,
0,
0)
LawbotBossTopRampPosA = (-80, -35, 18)
LawbotBossTopRampTurnPosA = (-80, 10, 18)
LawbotBossP3PosA = (55, -9, 0)
LawbotBossTopRampPosB = (80, -35, 18)
LawbotBossTopRampTurnPosB = (80, 10, 18)
LawbotBossP3PosB = (55, -9, 0)
LawbotBossBattleThreePosHpr = LawbotBossBattleTwoPosHpr
LawbotBossBottomPos = (50, 39, 0)
LawbotBossDeathPos = (50, 40, 0)
LawbotBossGavelPosHprs = [(35,
78.328,
0,
-135,
0,
0),
(68.5,
78.328,
0,
135,
0,
0),
(47,
-33,
0,
45,
0,
0),
(-50,
-39,
0,
-45,
0,
0),
(-9,
-37,
0,
0,
0,
0),
(-9,
49,
0,
-180,
0,
0),
(32,
0,
0,
45,
0,
0),
(33,
56,
0,
135,
0,
0)]
LawbotBossGavelTimes = [(0.2, 0.9, 0.6),
(0.25, 1, 0.5),
(1.0, 6, 0.5),
(0.3, 3, 1),
(0.26, 0.9, 0.45),
(0.24, 1.1, 0.65),
(0.27, 1.2, 0.45),
(0.25, 0.95, 0.5)]
LawbotBossGavelHeadings = [(0,
-15,
4,
-70 - 45,
5,
45),
(0,
-45,
-4,
-35,
-45,
-16,
32),
(0,
-8,
19,
-7,
5,
23),
(0,
-4,
8,
-16,
32,
-45,
7,
7,
-30,
19,
-13,
25),
(0,
-45,
-90,
45,
90),
(0,
-45,
-90,
45,
90),
(0, -45, 45),
(0, -45, 45)]
LawbotBossCogRelBattleAPosHpr = (-25,
-10,
0,
0,
0,
0)
LawbotBossCogRelBattleBPosHpr = (-25,
10,
0,
0,
0,
0)
LawbotBossCogAbsBattleAPosHpr = (-5,
-2,
0,
0,
0,
0)
LawbotBossCogAbsBattleBPosHpr = (-5,
0,
0,
0,
0,
0)
LawbotBossWitnessStandPosHpr = (54,
100,
0,
-90,
0,
0)
LawbotBossInjusticePosHpr = (-3,
12,
0,
90,
0,
0)
LawbotBossInjusticeScale = (1.75, 1.75, 1.5)
LawbotBossDefensePanDamage = 1
LawbotBossLawyerPosHprs = [(-57,
-24,
0,
-90,
0,
0),
(-57,
-12,
0,
-90,
0,
0),
(-57,
0,
0,
-90,
0,
0),
(-57,
12,
0,
-90,
0,
0),
(-57,
24,
0,
-90,
0,
0),
(-57,
36,
0,
-90,
0,
0),
(-57,
48,
0,
-90,
0,
0),
(-57,
60,
0,
-90,
0,
0),
(-3,
-37.3,
0,
0,
0,
0),
(-3,
53,
0,
-180,
0,
0)]
LawbotBossLawyerCycleTime = 6
LawbotBossLawyerToPanTime = 2.5
LawbotBossLawyerChanceToAttack = 50
LawbotBossLawyerHeal = 2
LawbotBossLawyerStunTime = 5
LawbotBossDifficultySettings = [(38,
4,
8,
1,
0,
0),
(36,
5,
8,
1,
0,
0),
(34,
5,
8,
1,
0,
0),
(32,
6,
8,
2,
0,
0),
(30,
6,
8,
2,
0,
0),
(28,
7,
8,
3,
0,
0),
(26,
7,
9,
3,
1,
1),
(24,
8,
9,
4,
1,
1),
(22,
8,
10,
4,
1,
0)]
LawbotBossCannonPosHprs = [(-40,
-12,
0,
-90,
0,
0),
(-40,
0,
0,
-90,
0,
0),
(-40,
12,
0,
-90,
0,
0),
(-40,
24,
0,
-90,
0,
0),
(-40,
36,
0,
-90,
0,
0),
(-40,
48,
0,
-90,
0,
0),
(-40,
60,
0,
-90,
0,
0),
(-40,
72,
0,
-90,
0,
0)]
LawbotBossCannonPosA = (-80, -51.48, 0)
LawbotBossCannonPosB = (-80, 70.73, 0)
LawbotBossChairPosHprs = [(60,
72,
0,
-90,
0,
0),
(60,
62,
0,
-90,
0,
0),
(60,
52,
0,
-90,
0,
0),
(60,
42,
0,
-90,
0,
0),
(60,
32,
0,
-90,
0,
0),
(60,
22,
0,
-90,
0,
0),
(70,
72,
5,
-90,
0,
0),
(70,
62,
5,
-90,
0,
0),
(70,
52,
5,
-90,
0,
0),
(70,
42,
5,
-90,
0,
0),
(70,
32,
5,
-90,
0,
0),
(70,
22,
5,
-90,
0,
0)]
LawbotBossChairRow1PosB = (59.3, 48, 14.05)
LawbotBossChairRow1PosA = (59.3, -18.2, 14.05)
LawbotBossChairRow2PosB = (75.1, 48, 28.2)
LawbotBossChairRow2PosA = (75.1, -18.2, 28.2)
LawbotBossCannonBallMax = 12
LawbotBossJuryBoxStartPos = (94, -8, 5)
LawbotBossJuryBoxRelativeEndPos = (30, 0, 12.645)
LawbotBossJuryBoxMoveTime = 70
LawbotBossJurorsForBalancedScale = 8
LawbotBossDamagePerJuror = 68
LawbotBossCogJurorFlightTime = 10
LawbotBossCogJurorDistance = 75
LawbotBossBaseJurorNpcId = 2001
LawbotBossWitnessEpiloguePosHpr = (-3,
0,
0,
180,
0,
0)
LawbotBossChanceForTaunt = 25
LawbotBossBonusWaitTime = 60
LawbotBossBonusDuration = 20
LawbotBossBonusToonup = 10
LawbotBossBonusWeightMultiplier = 2
LawbotBossChanceToDoAreaAttack = 11
LOW_POP_JP = 0
MID_POP_JP = 100
HIGH_POP_JP = 200
LOW_POP_INTL = 399
MID_POP_INTL = 499
HIGH_POP_INTL = -1
LOW_POP = 100
MID_POP = 200
HIGH_POP = -1
PinballCannonBumper = 0
PinballCloudBumperLow = 1
PinballCloudBumperMed = 2
PinballCloudBumperHigh = 3
PinballTarget = 4
PinballRoof = 5
PinballHouse = 6
PinballFence = 7
PinballBridge = 8
PinballStatuary = 9
PinballScoring = [(100, 1),
(150, 1),
(200, 1),
(250, 1),
(350, 1),
(100, 1),
(50, 1),
(25, 1),
(100, 1),
(10, 1)]
PinballCannonBumperInitialPos = (0, -20, 40)
RentalCop = 0
RentalCannon = 1
RentalGameTable = 2
GlitchKillerZones = [13300,
13400,
13500,
13600]
ColorPlayer = (0.3,
0.7,
0.3,
1)
ColorAvatar = (0.3,
0.3,
0.7,
1)
ColorPet = (0.6,
0.4,
0.2,
1)
ColorFreeChat = (0.3,
0.3,
0.8,
1)
ColorSpeedChat = (0.2,
0.6,
0.4,
1)
ColorNoChat = (0.8,
0.5,
0.1,
1)
FactoryLaffMinimums = [(0, 31, 0),
(0, 66, 71),
(0,
81,
86,
96),
(0, 101, 106)]
PICNIC_COUNTDOWN_TIME = 60
BossbotRTIntroStartPosHpr = (0,
-64,
0,
180,
0,
0)
BossbotRTPreTwoPosHpr = (0,
-20,
0,
180,
0,
0)
BossbotRTEpiloguePosHpr = (0,
90,
0,
180,
0,
0)
BossbotBossBattleOnePosHpr = (0,
355,
0,
0,
0,
0)
BossbotBossPreTwoPosHpr = (0,
20,
0,
0,
0,
0)
BossbotElevCamPosHpr = (0,
-100.544,
7.18258,
0,
0,
0)
BossbotFoodModelScale = 0.75
BossbotNumFoodToExplode = 3
BossbotBossServingDuration = 300
BossbotPrepareBattleThreeDuration = 20
WaiterBattleAPosHpr = (20,
-400,
0,
0,
0,
0)
WaiterBattleBPosHpr = (-20,
-400,
0,
0,
0,
0)
BossbotBossBattleThreePosHpr = (0,
355,
0,
0,
0,
0)
DinerBattleAPosHpr = (20,
-240,
0,
0,
0,
0)
DinerBattleBPosHpr = (-20,
-240,
0,
0,
0,
0)
BossbotBossMaxDamage = 500
BossbotMaxSpeedDamage = 90
BossbotSpeedRecoverRate = 20
BossbotBossDifficultySettings = [(8,
4,
11,
3,
30,
25),
(9,
5,
12,
6,
28,
26),
(10,
6,
11,
7,
26,
27),
(8,
8,
12,
8,
24,
28),
(13,
5,
12,
9,
22,
29)]
BossbotRollSpeedMax = 22
BossbotRollSpeedMin = 7.5
BossbotTurnSpeedMax = 60
BossbotTurnSpeedMin = 20
BossbotTreadSpeedMax = 10.5
BossbotTreadSpeedMin = 3.5
CalendarFilterShowAll = 0
CalendarFilterShowOnlyHolidays = 1
CalendarFilterShowOnlyParties = 2
TTC = 1
DD = 2
MM = 3
GS = 4
DG = 5
BR = 6
OZ = 7
DL = 8
DefaultWantNewsPageSetting = 0
gmMagicWordList = ['restock',
'restockUber',
'autoRestock',
'resistanceRestock',
'restockSummons',
'uberDrop',
'rich',
'maxBankMoney',
'toonUp',
'rod',
'cogPageFull',
'pinkSlips',
'Tickets',
'newSummons',
'who',
'who all']
NewsPageScaleAdjust = 0.85
AnimPropTypes = Enum(('Unknown',
'Hydrant',
'Mailbox',
'Trashcan'), start=-1)
EmblemTypes = Enum(('Silver', 'Gold'))
NumEmblemTypes = 2
MaxBankMoney = 50000
DefaultBankItemId = 1350
ToonAnimStates = set(['off',
'neutral',
'victory',
'Happy',
'Sad',
'Catching',
'CatchEating',
'Sleep',
'walk',
'jumpSquat',
'jump',
'jumpAirborne',
'jumpLand',
'run',
'swim',
'swimhold',
'dive',
'cringe',
'OpenBook',
'ReadBook',
'CloseBook',
'TeleportOut',
'Died',
'TeleportedOut',
'TeleportIn',
'Emote',
'SitStart',
'Sit',
'Push',
'Squish',
'FallDown',
'GolfPuttLoop',
'GolfRotateLeft',
'GolfRotateRight',
'GolfPuttSwing',
'GolfGoodPutt',
'GolfBadPutt',
'Flattened',
'CogThiefRunning',
'ScientistJealous',
'ScientistEmcee',
'ScientistWork',
'ScientistLessWork',
'ScientistPlay'])
AV_FLAG_REASON_TOUCH = 1
AV_FLAG_HISTORY_LEN = 500
AV_TOUCH_CHECK_DELAY_AI = 3.0
AV_TOUCH_CHECK_DELAY_CL = 1.0
AV_TOUCH_CHECK_DIST = 2.0
AV_TOUCH_CHECK_DIST_Z = 5.0
AV_TOUCH_CHECK_TIMELIMIT_CL = 0.002
AV_TOUCH_COUNT_LIMIT = 5
AV_TOUCH_COUNT_TIME = 300
# Buffs...
BMovementSpeed = 0
BMovementSpeedMultiplier = 1.3
BGagAccuracy = 1
BGagAccuracyMultiplier = 1.3
BGagExperience = 2
BGagExperienceMultiplier = 1.5
# House catalog prices
housePrices = {
HouseGlobals.HOUSE_DEFAULT: 10000,
HouseGlobals.HOUSE_CABIN: 20000
}
def getHousePriceById(houseId):
return housePrices[houseId]
| 17.581531 | 220 | 0.704275 |
8b5f5eaff8daad3dcb40a1a1a9ed661f477e06cc | 253 | py | Python | manage.py | leviplj/militar | 1558a4909d86867058241595c77efd6a11cbae66 | [
"MIT"
] | null | null | null | manage.py | leviplj/militar | 1558a4909d86867058241595c77efd6a11cbae66 | [
"MIT"
] | null | null | null | manage.py | leviplj/militar | 1558a4909d86867058241595c77efd6a11cbae66 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aditamento.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23 | 74 | 0.774704 |
1658eab4bd63a5ecebe0cfaf5a2fffede1dd6d82 | 15,781 | py | Python | tensorflow/python/kernel_tests/split_op_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/kernel_tests/split_op_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/python/kernel_tests/split_op_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Split Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
_TEST_DTYPES = (dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128)
class SplitOpTest(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
@test_util.run_deprecated_v1
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.cached_session(use_gpu=True) as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# scalar Tensors are not permitted as num_splits
for axis in [0, -2]:
with self.cached_session(use_gpu=True) as sess:
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=constant_op.constant(2),
axis=axis))
# pylint: enable=expression-not-assigned
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.cached_session(use_gpu=True) as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
@test_util.run_deprecated_v1
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.session(use_gpu=True) as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.cast(5, dtypes.int32)
b = math_ops.cast(6, dtypes.int32)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
@test_util.run_in_graph_and_eager_modes
def testDegenerateVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [-1, 4], 0))
self.assertAllEqual(result[0], inp[0:0, :])
self.assertAllEqual(result[1], inp[0:4, :])
result = self.evaluate(array_ops.split(inp, [4, -1], 0))
self.assertAllEqual(result[0], inp[0:4, :])
self.assertAllEqual(result[1], inp[4:4, :])
result = self.evaluate(array_ops.split(inp, [-1, 4], 1))
self.assertAllEqual(result[0], inp[:, 0:0])
self.assertAllEqual(result[1], inp[:, 0:4])
result = self.evaluate(array_ops.split(inp, [4, -1], 1))
self.assertAllEqual(result[0], inp[:, 0:4])
self.assertAllEqual(result[1], inp[:, 4:4])
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
@test_util.run_deprecated_v1
def testOutputShape(self):
for axis in [1, -1]:
with self.cached_session(use_gpu=True):
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.cached_session(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = self.evaluate(grad)
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
@test_util.run_deprecated_v1
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegex(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
@test_util.run_deprecated_v1
def testVariableShapeFunction(self):
# size_splits too big
with self.assertRaises(ValueError):
array_ops.split([0, 1], [3, -1], axis=0)
# Correct inference of variable dimension
s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
assert s0.shape.as_list() == [2]
assert s1.shape.as_list() == [1]
@test_util.run_deprecated_v1
def testNonexistentDimTensor(self):
x = array_ops.placeholder(dtypes.int32)
values = np.zeros([5, 30])
splits = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegex(ValueError, "Cannot infer"):
y = array_ops.split(values, splits, axis=x)
splits = array_ops.placeholder(dtypes.int32, [3])
y = array_ops.split(values, splits, axis=x)
with self.session(use_gpu=True) as sess:
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"must have exactly one element"):
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
@test_util.run_in_graph_and_eager_modes
def testNegativeSizes(self):
x = constant_op.constant([1, 2, 3], dtypes.float32)
# A size of -1 signifies to determine size based on sum of other splits.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Split size at index 1 must be >= 0. Got: -2"):
splits = [-1, -2]
self.evaluate(array_ops.split(x, splits, axis=0))
@test_util.run_in_graph_and_eager_modes
def testBadSplitSizes(self):
x = constant_op.constant([1, 2], dtypes.float32)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Determined shape must either match input"
"|can't split axis"):
splits = [1, 2]
self.evaluate(array_ops.split(x, splits, axis=0))
if __name__ == "__main__":
test.main()
| 38.396594 | 80 | 0.673025 |
b363cac669558975725f1f4438e462bcb57a0fc1 | 1,811 | py | Python | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/AddSecurityGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/AddSecurityGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/AddSecurityGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class AddSecurityGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'AddSecurityGroup')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_SecurityGroupId(self):
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self,SecurityGroupId):
self.add_query_param('SecurityGroupId',SecurityGroupId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId) | 36.22 | 74 | 0.770845 |
af47902753acaadd555731e6d584a5e083044f7e | 303 | py | Python | datamaps/admin.py | rochapps/django-datamaps | 74d50077d31c79095122968bc3c2fde9628da69a | [
"BSD-2-Clause"
] | 3 | 2016-07-13T17:19:12.000Z | 2017-09-07T01:49:48.000Z | datamaps/admin.py | rochapps/django-datamaps | 74d50077d31c79095122968bc3c2fde9628da69a | [
"BSD-2-Clause"
] | 22 | 2015-09-20T14:00:16.000Z | 2021-06-10T20:08:25.000Z | datamaps/admin.py | rochapps/django-datamaps | 74d50077d31c79095122968bc3c2fde9628da69a | [
"BSD-2-Clause"
] | 6 | 2015-12-14T21:05:01.000Z | 2019-11-02T19:35:24.000Z | from django.contrib import admin
from .models import Country, Scope
class CountryAdmin(admin.ModelAdmin):
list_display = ('name', 'scope', 'color', 'lat', 'lon', )
list_per_page = 10
search_fields = ["name", "code"]
admin.site.register(Country, CountryAdmin)
admin.site.register(Scope)
| 21.642857 | 61 | 0.706271 |
c2ac8bb71dde99d797db82cff33a8c8cafd5c35c | 5,156 | py | Python | bgx/families/settings/bgx_settings/processor/main.py | sparsov/DGT-Kawartha-demo | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | [
"Apache-2.0"
] | null | null | null | bgx/families/settings/bgx_settings/processor/main.py | sparsov/DGT-Kawartha-demo | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | [
"Apache-2.0"
] | 10 | 2020-05-12T06:58:15.000Z | 2022-02-26T23:59:35.000Z | bgx/families/settings/bgx_settings/processor/main.py | DGT-Network/DGT-Mississauga | 52b5f1f4015db2aa7196e727a25b399de5fbf3c3 | [
"Apache-2.0"
] | 1 | 2021-01-12T21:38:01.000Z | 2021-01-12T21:38:01.000Z | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
import logging
import os
import sys
import pkg_resources
from colorlog import ColoredFormatter
from sawtooth_sdk.processor.core import TransactionProcessor
from sawtooth_sdk.processor.log import init_console_logging
from sawtooth_sdk.processor.log import log_configuration
from sawtooth_sdk.processor.config import get_log_config
from sawtooth_sdk.processor.config import get_log_dir
from sawtooth_sdk.processor.config import get_config_dir
from bgx_settings.processor.handler import SettingsTransactionHandler
from bgx_settings.processor.config.settings import SettingsConfig
from bgx_settings.processor.config.settings import load_default_settings_config
from bgx_settings.processor.config.settings import load_toml_settings_config
from bgx_settings.processor.config.settings import merge_settings_config
DISTRIBUTION_NAME = 'bgx-settings'
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s.%(msecs)03d "
"%(levelname)-8s %(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def setup_loggers(verbose_level, processor):
log_config = get_log_config(filename="settings_log_config.toml")
# If no toml, try loading yaml
if log_config is None:
log_config = get_log_config(filename="settings_log_config.yaml")
if log_config is not None:
log_configuration(log_config=log_config)
else:
log_dir = get_log_dir()
# use the transaction processor zmq identity for filename
log_configuration(
log_dir=log_dir,
name="settings-" + str(processor.zmq_id)[2:-1])
init_console_logging(verbose_level=verbose_level)
def create_parser(prog_name):
parser = argparse.ArgumentParser(
prog=prog_name,
description='Starts a Settings transaction processor (settings-tp).',
epilog='This process is required to apply any changes to on-chain '
'settings used by the Sawtooth platform.',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-C', '--connect',
help='specify the endpoint for the validator connection (default: '
'tcp://localhost:4004) ')
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='enable more verbose output to stderr')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='display version information')
return parser
def load_settings_config(first_config):
default_settings_config = \
load_default_settings_config()
conf_file = os.path.join(get_config_dir(), 'settings.toml')
toml_config = load_toml_settings_config(conf_file)
return merge_settings_config(
configs=[first_config, toml_config, default_settings_config])
def create_settings_config(args):
return SettingsConfig(connect=args.connect)
def main(prog_name=os.path.basename(sys.argv[0]), args=None,
with_loggers=True):
if args is None:
args = sys.argv[1:]
parser = create_parser(prog_name)
args = parser.parse_args(args)
arg_config = create_settings_config(args)
settings_config = load_settings_config(arg_config)
processor = TransactionProcessor(url=settings_config.connect)
if with_loggers is True:
if args.verbose is None:
verbose_level = 0
else:
verbose_level = args.verbose
setup_loggers(verbose_level=verbose_level, processor=processor)
handler = SettingsTransactionHandler()
processor.add_handler(handler)
try:
processor.start()
except KeyboardInterrupt:
pass
finally:
processor.stop()
| 31.439024 | 80 | 0.691234 |
2b19649b06249fd1a393927830b89b877a30bdd7 | 265 | py | Python | disassemble.py | JHU-PL-Lab/representation-types | 8805849eaa793692aada84c2c49fa227ed8c6387 | [
"CC-BY-4.0"
] | null | null | null | disassemble.py | JHU-PL-Lab/representation-types | 8805849eaa793692aada84c2c49fa227ed8c6387 | [
"CC-BY-4.0"
] | null | null | null | disassemble.py | JHU-PL-Lab/representation-types | 8805849eaa793692aada84c2c49fa227ed8c6387 | [
"CC-BY-4.0"
] | null | null | null |
import sys
import importlib.util
from dis import dis
src_file = sys.argv[1]
module_spec = importlib.util.spec_from_file_location("unknown.module", src_file)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
dis(module)
| 22.083333 | 80 | 0.818868 |
9169f1de3c9221d2cbc054026129f2617571f0eb | 664 | py | Python | puskesmas_app/migrations/0012_auto_20180625_1500.py | kurniantoska/medicalwebapp_project | a2e36a44b598ad2989c207f950a89c02d987e00d | [
"BSD-3-Clause"
] | 1 | 2019-10-22T02:12:49.000Z | 2019-10-22T02:12:49.000Z | puskesmas_app/migrations/0012_auto_20180625_1500.py | kurniantoska/medicalwebapp_project | a2e36a44b598ad2989c207f950a89c02d987e00d | [
"BSD-3-Clause"
] | 3 | 2020-06-05T18:30:35.000Z | 2021-06-10T20:31:09.000Z | puskesmas_app/migrations/0012_auto_20180625_1500.py | kurniantoska/medicalwebapp_project | a2e36a44b598ad2989c207f950a89c02d987e00d | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-25 07:00
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('puskesmas_app', '0011_auto_20180625_1458'),
]
operations = [
migrations.AlterField(
model_name='demografipenduduk',
name='tahun',
field=models.PositiveIntegerField(choices=[(2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018)], help_text='Gunakan Format Tahun: <YYYY>', validators=[django.core.validators.MinValueValidator(2015), django.core.validators.MaxValueValidator(2018)]),
),
]
| 33.2 | 264 | 0.653614 |
7e287e9d8e1c08f66aedfdd5e3a6cb377ca3a4b5 | 993 | py | Python | src/read_csv.py | jmarca/initial-solution | 61766ef4dff6f1405fbaf9d13e271d13fda33ad7 | [
"Apache-2.0"
] | 6 | 2019-05-30T17:55:02.000Z | 2021-03-17T11:45:38.000Z | src/read_csv.py | jmarca/initial-solution | 61766ef4dff6f1405fbaf9d13e271d13fda33ad7 | [
"Apache-2.0"
] | null | null | null | src/read_csv.py | jmarca/initial-solution | 61766ef4dff6f1405fbaf9d13e271d13fda33ad7 | [
"Apache-2.0"
] | 7 | 2019-07-04T02:05:48.000Z | 2021-03-17T11:47:32.000Z | """Read in the problem configuration from CSV"""
import pandas as pd
import numpy as np
import re
def load_demand_from_csv(filename):
"""extract a usable data structure from a csv file
Args:
filename (str): the input csv file to read. will be read with pandas.read_csv(filename)
Returns: a pandas.DataFrame you can use, or just save as json for future runs
"""
demand = pd.read_csv(filename,names=['from_node','to_node','early','late'],header=0)
return demand
def load_matrix_from_csv(filename):
"""extract a usable data structure from a csv file
Args:
filename (str): the input csv file to read. will be read with pandas.read_csv(filename)
Returns: a pandas.DataFrame you can use, or just save as json for future runs
"""
matrix = pd.read_csv(filename,header=None)
return matrix
def travel_time(speed,matrix):
"""convert the distance matrix into a travel time matrix"""
return matrix.copy().floordiv(speed)
| 26.837838 | 95 | 0.703927 |
389407f598cb521f33869b2c89cff70a7d3468f5 | 10,253 | py | Python | docs/conf.py | SimeonSimjanovski/RP2018-19 | 10d548a28ab5883666b9fdf9f838665c384d86a4 | [
"MIT"
] | 3 | 2018-05-03T05:08:56.000Z | 2021-09-29T12:54:07.000Z | docs/conf.py | SimeonSimjanovski/RP2018-19 | 10d548a28ab5883666b9fdf9f838665c384d86a4 | [
"MIT"
] | null | null | null | docs/conf.py | SimeonSimjanovski/RP2018-19 | 10d548a28ab5883666b9fdf9f838665c384d86a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# M-LOOP documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 24 11:34:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'M-LOOP'
copyright = '2016, Michael R Hush'
author = 'Michael R Hush'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# exclude_patterns = ['_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Custom sidebar templates, maps document names to template names.
html_sidebars = { '**': ['about.html','navigation.html','relations.html', 'searchbox.html'], }
#'globaltoc.html',
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo':'M-LOOP_logo.png',
'logo_name':True,
'description':'Machine-Learning Online Optimization Package',
'github_user':'michaelhush',
'github_repo':'M-LOOP',
'github_banner':True,
'font_family':"Arial, Helvetica, sans-serif",
'head_font_family':"Arial, Helvetica, sans-serif",
'analytics_id':'UA-83520804-1'}
#'github_button':True,
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'M-LOOP v2.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/M-LOOP_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/M-LOOP_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'M-LOOPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'M-LOOP.tex', 'M-LOOP Documentation',
'Michael R Hush', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'M-LOOP_logo.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'm-loop', 'M-LOOP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'M-LOOP', 'M-LOOP Documentation',
author, 'M-LOOP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.446203 | 95 | 0.721545 |
04f3b997adaf2352fc40cf3e349b0ef870b9c40d | 278 | py | Python | Modulo-01/ex028/ex028.py | Matheus-Henrique-Burey/Curso-de-Python | 448aebaab96527affa1e45897a662bb0407c11c6 | [
"MIT"
] | null | null | null | Modulo-01/ex028/ex028.py | Matheus-Henrique-Burey/Curso-de-Python | 448aebaab96527affa1e45897a662bb0407c11c6 | [
"MIT"
] | null | null | null | Modulo-01/ex028/ex028.py | Matheus-Henrique-Burey/Curso-de-Python | 448aebaab96527affa1e45897a662bb0407c11c6 | [
"MIT"
] | null | null | null | from random import randint
pc = randint(0,5)
print('-=' * 15)
print('ESTOU PENSANDO EM UM NUMERO')
print('-=' * 15)
player = int(input('ADIVINHE QUAL É DE 0 A 5: '))
if pc == player:
print('PARABENS VOCCE ACERTOU!!!!')
else:
print(f'GANHEI!! estava pensando em {pc}')
| 21.384615 | 49 | 0.636691 |
0c855c79de8ae4e9c43a787cc0bb617f54bafb0f | 1,977 | py | Python | qa/rpc-tests/reject-version-bit.py | stakecom/stakework | a2110b0ba6aa9638a18c2e7ae12f0f229e074f35 | [
"MIT"
] | null | null | null | qa/rpc-tests/reject-version-bit.py | stakecom/stakework | a2110b0ba6aa9638a18c2e7ae12f0f229e074f35 | [
"MIT"
] | null | null | null | qa/rpc-tests/reject-version-bit.py | stakecom/stakework | a2110b0ba6aa9638a18c2e7ae12f0f229e074f35 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 The StakeWork Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rejection of version bit votes
#
import time
from test_framework.test_framework import StakeWorkTestFramework
from test_framework.util import *
from test_framework.mininode import *
class RejectVersionBitTest(StakeWorkTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-rejectversionbit=6"]))
self.nodes.append(start_node(1, self.options.tmpdir, []))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
blocks_node_1= slow_gen(self.nodes[0], 100)
self.sync_all()
assert(self.nodes[0].getblockchaininfo()["bip9_softforks"]["segwit"]["status"] == "started")
blocks_node_1= slow_gen(self.nodes[0], 10)
self.sync_all()
blocks_node_2= slow_gen(self.nodes[1],90)
self.sync_all()
assert(self.nodes[0].getblock(blocks_node_1[-1])["version"] & (1<<6) == 0)
assert(self.nodes[1].getblock(blocks_node_2[-1])["version"] & (1<<6) == (1<<6))
self.sync_all()
assert(self.nodes[0].getblockchaininfo()["bip9_softforks"]["segwit"]["status"] == "locked_in")
blocks_node_1= slow_gen(self.nodes[0], 50)
blocks_node_2= slow_gen(self.nodes[1], 50)
assert(self.nodes[0].getblock(blocks_node_1[-1])["version"] & (1<<6) == (1<<6))
assert(self.nodes[1].getblock(blocks_node_2[-1])["version"] & (1<<6) == (1<<6))
if __name__ == '__main__':
RejectVersionBitTest().main()
| 39.54 | 102 | 0.661608 |
b227632317341b3858cd7ef733b82cbc84ce666b | 1,217 | py | Python | test/test_count_commander.py | MushroomPoet/percheron | e49d4bb416a98dff1f7137834232df67fe7065fb | [
"MIT"
] | null | null | null | test/test_count_commander.py | MushroomPoet/percheron | e49d4bb416a98dff1f7137834232df67fe7065fb | [
"MIT"
] | 10 | 2022-03-30T12:49:39.000Z | 2022-03-30T12:57:48.000Z | test/test_count_commander.py | MushroomPoet/percheron | e49d4bb416a98dff1f7137834232df67fe7065fb | [
"MIT"
] | null | null | null | import io
from percheron.count_commander import CountCommander
from percheron.library import Library
PROGRAM = "percheron-test"
def test_run_help():
cmds = "help\n.\n"
assert CountCommander.HELP in _process_cmds(cmds)
def test_multiple_cards():
cmds = "set iko\na\n.\n"
assert "Multiple matches" in _process_cmds(cmds)
def test_no_cards():
cmds = "set iko\nno match\n.\n"
assert "Could not find match" in _process_cmds(cmds)
def test_report():
cmds = "set iko\nreport\n.\n"
assert "\tBrokkos, Apex of Forever" in _process_cmds(cmds)
def test_single_card():
cmds = "set iko\nbrok\n.\n"
assert 'Found "Brokkos, Apex of Forever"' in _process_cmds(cmds)
def test_single_card_with_count():
cmds = "set iko\n3 brok\n.\n"
expected = '"Brokkos, Apex of Forever" given the count 3'
assert expected in _process_cmds(cmds)
def test_single_explicit_card():
cmds = "4 Brokkos, Apex of Forever (IKO) 179\n.\n"
expected = '"Brokkos, Apex of Forever" given the count 4'
assert expected in _process_cmds(cmds)
def _process_cmds(cmds):
output = io.StringIO()
CountCommander(PROGRAM, Library()).run(io.StringIO(cmds), output)
return output.getvalue()
| 29.682927 | 69 | 0.709121 |
6bd649e1cac5a5ac7567d9c87dfe757143c34d57 | 263 | py | Python | BackEnd/manage.py | Dataproces/Open4Citizens | 239f971d1ed61a50ae565773e3ba8df308626065 | [
"MIT"
] | null | null | null | BackEnd/manage.py | Dataproces/Open4Citizens | 239f971d1ed61a50ae565773e3ba8df308626065 | [
"MIT"
] | null | null | null | BackEnd/manage.py | Dataproces/Open4Citizens | 239f971d1ed61a50ae565773e3ba8df308626065 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "O4CService.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.909091 | 75 | 0.745247 |
62c9047b17f3de4a99d016ce737c9ada518281d9 | 30,915 | py | Python | pandas/tests/reshape/merge/test_join.py | the-nose-knows/pandas | dcf7137ccc81986091b6c76624855bb5c32185f7 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 80 | 2015-01-01T17:32:11.000Z | 2022-01-24T07:17:47.000Z | pandas/tests/reshape/merge/test_join.py | stevenbw/pandas | 9c0f6a8d703b6bee48918f2c5d16418a7ff736e3 | [
"BSD-3-Clause"
] | 1 | 2018-04-04T16:46:41.000Z | 2018-04-04T16:46:41.000Z | pandas/tests/reshape/merge/test_join.py | stevenbw/pandas | 9c0f6a8d703b6bee48918f2c5d16418a7ff736e3 | [
"BSD-3-Clause"
] | 28 | 2015-01-30T16:07:48.000Z | 2022-02-11T18:41:13.000Z | # pylint: disable=E1103
import numpy as np
from numpy.random import randn
import pytest
from pandas._libs import join as libjoin
import pandas.compat as compat
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat, merge
from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
a_ = np.array
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestJoin(object):
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = libjoin.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = libjoin.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = libjoin.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key1.bar' in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key2.bar' in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
tm.assert_series_equal(merged['MergedA'], target['A'],
check_names=False)
tm.assert_series_equal(merged['MergedD'], target['D'],
check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
assert np.isnan(joined['two']['c'])
assert np.isnan(joined['three']['c'])
# merge column not p resent
with pytest.raises(KeyError, match="^'E'$"):
target.join(source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
msg = ("You are trying to merge on float64 and object columns. If"
" you wish to proceed you should use pd.concat")
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
msg = (r'len\(left_on\) must equal the number of levels in the index'
' of "right"')
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(3, 2))
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
msg = (r'len\(right_on\) must equal the number of levels in the index'
' of "left"')
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on='a', left_on=['a', 'b'])
@pytest.mark.parametrize("wrong_type", [2, 'str', None, np.array([0, 1])])
def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# GH12081 - original issue
# GH21220 - merging of Series and DataFrame is now allowed
# Edited test to remove the Series object from test parameters
df = DataFrame({'a': [1, 1]})
msg = ("Can only merge Series or DataFrame objects, a {} was passed"
.format(str(type(wrong_type))))
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on='a', right_on='a')
with pytest.raises(TypeError, match=msg):
merge(df, wrong_type, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notna()]
tm.assert_series_equal(joined['key'], expected['key'],
check_dtype=False)
tm.assert_series_equal(joined['value'], expected['value'],
check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
assert df1['B'].dtype == np.int64
assert df1['D'].dtype == np.bool_
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how='outer')
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how='outer').sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ('b', 'mean') in result
assert 'b' in result
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes['a'] == 'float64'
assert joined.dtypes['b'] == 'float64'
assert joined.dtypes['c'] == 'float32'
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c})
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
assert rs.dtypes['a'] == 'int64'
assert rs.dtypes['b'] == 'float64'
assert rs.dtypes['c'] == 'float32'
assert rs.dtypes['md'] == 'float32'
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
expected = expected[result.columns]
expected['a'] = expected.a.astype('int64')
expected['b'] = expected.b.astype('int64')
assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
tm.assert_index_equal(joined.index, pd.Index(lrange(4)))
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 3, 'a'])
df2 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame({'a': [1, 2, 3, 3, 4],
'b': [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, 'a'])
tm.assert_frame_equal(result, expected)
df3 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 2, 'a'])
df4 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 6, np.nan]},
index=[1, 2, 2, 'a'])
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range('2016-01-01', periods=16, freq='M')
df = DataFrame([i for i in range(len(index))],
index=index, columns=['pnum'])
df2 = concat([df, df])
result = df.join(df2, how='inner', rsuffix='_df2')
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=['pnum', 'pnum_df2'], index=df2.sort_index().index)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
msg = "Joining multiple DataFrames only supported for joining on index"
with pytest.raises(ValueError, match=msg):
df_list[0].join(df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.loc[:, ['A', 'B']]
df2 = df.loc[:, ['C', 'D']]
df3 = df.loc[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ['x_x', 'y_x', 'x_y',
'y_y', 'x_x', 'y_x', 'x_y', 'y_y']
assert_frame_equal(dta, expected)
def test_join_multi_to_multi(self, join_type):
# GH 20475
leftindex = MultiIndex.from_product([list('abc'), list('xy'), [1, 2]],
names=['abc', 'xy', 'num'])
left = DataFrame({'v1': range(12)}, index=leftindex)
rightindex = MultiIndex.from_product([list('abc'), list('xy')],
names=['abc', 'xy'])
right = DataFrame({'v2': [100 * i for i in range(1, 7)]},
index=rightindex)
result = left.join(right, on=['abc', 'xy'], how=join_type)
expected = (left.reset_index()
.merge(right.reset_index(),
on=['abc', 'xy'], how=join_type)
.set_index(['abc', 'xy', 'num'])
)
assert_frame_equal(expected, result)
msg = (r'len\(left_on\) must equal the number of levels in the index'
' of "right"')
with pytest.raises(ValueError, match=msg):
left.join(right, on='xy', how=join_type)
with pytest.raises(ValueError, match=msg):
right.join(left, on=['abc', 'xy'], how=join_type)
def test_join_on_tz_aware_datetimeindex(self):
# GH 23931
df1 = pd.DataFrame(
{
'date': pd.date_range(start='2018-01-01', periods=5,
tz='America/Chicago'),
'vals': list('abcde')
}
)
df2 = pd.DataFrame(
{
'date': pd.date_range(start='2018-01-03', periods=5,
tz='America/Chicago'),
'vals_2': list('tuvwx')
}
)
result = df1.join(df2.set_index('date'), on='date')
expected = df1.copy()
expected['vals_2'] = pd.Series([np.nan] * len(expected), dtype=object)
assert_frame_equal(result, expected)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notna().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isna().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
| 39.132911 | 79 | 0.523306 |
427443a77944837ca52068ef5b3ff19a520b852c | 748 | py | Python | core/middleware/ga.py | pascalopitz/microweb | b5c2c6351d814ebdee55f41505d9500404fd309a | [
"MIT"
] | 11 | 2015-01-02T12:19:14.000Z | 2019-09-25T13:31:09.000Z | core/middleware/ga.py | pascalopitz/microweb | b5c2c6351d814ebdee55f41505d9500404fd309a | [
"MIT"
] | 17 | 2015-01-16T23:15:05.000Z | 2020-06-05T16:58:49.000Z | core/middleware/ga.py | pascalopitz/microweb | b5c2c6351d814ebdee55f41505d9500404fd309a | [
"MIT"
] | 7 | 2015-10-29T14:40:45.000Z | 2021-06-03T10:44:55.000Z | from django.core import exceptions
from django.conf import settings
from pyga.requests import Tracker, Page, Session, Visitor
class GAMiddleware():
def process_request(self, request):
if settings.GA_ENABLED:
ip = request.META["REMOTE_ADDR"]
if request.META.has_key("CF-Connecting-IP"):
ip = request.META["CF-Connecting-IP"]
tracker = Tracker(settings.GA_KEY, request.META["HTTP_HOST"])
visitor = Visitor()
visitor.ip_address = ip
session = Session()
page = Page(request.path)
tracker.track_pageview(page, session, visitor)
return None
def process_response(self, request, response):
return response
| 29.92 | 73 | 0.629679 |
f164636867e8d3bfbef25ea488ce744628b0f14b | 58 | py | Python | gsd/data/splits.py | pzelasko/gsd | c4f0e62ee01f982f284632dd725590a4198de8c7 | [
"Apache-2.0"
] | null | null | null | gsd/data/splits.py | pzelasko/gsd | c4f0e62ee01f982f284632dd725590a4198de8c7 | [
"Apache-2.0"
] | null | null | null | gsd/data/splits.py | pzelasko/gsd | c4f0e62ee01f982f284632dd725590a4198de8c7 | [
"Apache-2.0"
] | null | null | null | # TODO:
# - split
# - combine
# - subset
# - copy
# - map
| 8.285714 | 11 | 0.5 |
44d4de159d1ca14d8c5f1f5f28eaacb7ced0a366 | 2,888 | py | Python | python/lsst/eotask_gen3/eoNonlinearityData.py | lsst-camera-dh/eotask-gen3 | 41e7de97c607c5a8b21c4f7164b3852e7d07359a | [
"BSD-3-Clause-LBNL"
] | null | null | null | python/lsst/eotask_gen3/eoNonlinearityData.py | lsst-camera-dh/eotask-gen3 | 41e7de97c607c5a8b21c4f7164b3852e7d07359a | [
"BSD-3-Clause-LBNL"
] | 33 | 2021-04-23T17:43:34.000Z | 2022-01-17T19:15:14.000Z | python/lsst/eotask_gen3/eoNonlinearityData.py | lsst-camera-dh/eotask-gen3 | 41e7de97c607c5a8b21c4f7164b3852e7d07359a | [
"BSD-3-Clause-LBNL"
] | null | null | null | # from lsst.ip.isr import IsrCalib
from .eoCalibTable import EoCalibField, EoCalibTableSchema, EoCalibTable, EoCalibTableHandle
from .eoCalib import EoCalibSchema, EoCalib, RegisterEoCalibSchema
from .eoPlotUtils import EoPlotMethod, nullFigure
__all__ = ["EoNonlinearityAmpRunData",
"EoNonlinearityData"]
class EoNonlinearityAmpRunDataSchemaV0(EoCalibTableSchema):
"""Schema definitions for output data for per-amp, per-run tables
for EoNonlinearityTask.
This are the 'profile' parameters of the non-linearity correction.
I.e., means and errors on the correction coefficients as at
given ADU values
"""
TABLELENGTH = 'nAmp'
profX = EoCalibField(name="PROF_X", dtype=float, unit='adu', shape=['nProf'])
profYCorr = EoCalibField(name="PROF_YCORR", dtype=float, unit='adu', shape=['nProf'])
profYErr = EoCalibField(name="PROF_YERR", dtype=float, unit='adu', shape=['nProf'])
class EoNonlinearityAmpRunData(EoCalibTable):
"""Container class and interface for per-amp, per-exposure-pair tables
for EoNonlinearityTask."""
SCHEMA_CLASS = EoNonlinearityAmpRunDataSchemaV0
def __init__(self, data=None, **kwargs):
"""C'tor, arguments are passed to base class.
Class specialization just associates class properties with columns
"""
super(EoNonlinearityAmpRunData, self).__init__(data=data, **kwargs)
self.profX = self.table[self.SCHEMA_CLASS.profX.name]
self.profYCorr = self.table[self.SCHEMA_CLASS.profYCorr.name]
self.profYErr = self.table[self.SCHEMA_CLASS.profYErr.name]
class EoNonlinearityDataSchemaV0(EoCalibSchema):
"""Schema definitions for output data for EoNonlinearityTask.
This defines correct versions of the sub-tables"""
amps = EoCalibTableHandle(tableName="amps",
tableClass=EoNonlinearityAmpRunData)
class EoNonlinearityData(EoCalib):
"""Container class and interface for EoNonlinearityTask outputs."""
SCHEMA_CLASS = EoNonlinearityDataSchemaV0
_OBSTYPE = 'flat'
_SCHEMA = SCHEMA_CLASS.fullName()
_VERSION = SCHEMA_CLASS.version()
def __init__(self, **kwargs):
"""C'tor, arguments are passed to base class.
Class specialization just associates instance properties with
sub-tables
"""
super(EoNonlinearityData, self).__init__(**kwargs)
self.amps = self['amps']
@EoPlotMethod(EoNonlinearityData, "curve", "slot", "nonlinearity", "Linearity")
def plotLinearity(obj):
return nullFigure()
@EoPlotMethod(EoNonlinearityData, "resids", "slot", "nonlinearity", "Linearity residual")
def plotLinearityResidual(obj):
return nullFigure()
RegisterEoCalibSchema(EoNonlinearityData)
AMPS = ["%02i" % i for i in range(16)]
NPROFILE = 20
EoNonlinearityData.testData = dict(testCtor=dict(nAmp=len(AMPS), nProf=NPROFILE))
| 32.818182 | 92 | 0.72126 |
a4c4a33184b64f064937c05e6886f6910ed34363 | 2,910 | py | Python | market_backend/apps/accounts/admin.py | muthukumar4999/market-backend | 61ccdba3bd77af76d47e0907c3d7c0833320d381 | [
"MIT"
] | null | null | null | market_backend/apps/accounts/admin.py | muthukumar4999/market-backend | 61ccdba3bd77af76d47e0907c3d7c0833320d381 | [
"MIT"
] | null | null | null | market_backend/apps/accounts/admin.py | muthukumar4999/market-backend | 61ccdba3bd77af76d47e0907c3d7c0833320d381 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.utils.translation import ugettext_lazy as _
from .models import User, AuthUser, Category, SubCategory, Media, Product # , ConsumerProduct, OrderCart
class CustomUserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'),
{'fields': (
'first_name', 'last_name', 'email', 'user_type', 'address', 'referral_code', 'referred_by')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': (
'username', 'password1', 'password2', 'email', 'first_name', 'last_name', 'address',
'user_type',
)}
),
)
form = UserChangeForm
add_form = UserCreationForm
list_display = ['username', 'type', 'is_active']
list_filter = []
def type(self, obj):
if obj.user_type == User.ADMIN:
return 'Admin'
elif obj.user_type == User.CUSTOMER:
return 'Customer'
elif obj.user_type == User.DELIVERY_MAN:
return 'Delivery man'
elif obj.user_type == User.WHOLESALER:
return 'Whole saler'
else:
return 'User'
class AuthUserAdmin(admin.ModelAdmin):
model = AuthUser
list_display = ['user', 'token', 'is_expired']
class CategoryAdmin(admin.ModelAdmin):
model = Category
list_display = ['name', ]
class SubCategoryAdmin(admin.ModelAdmin):
model = SubCategory
list_display = ['name', 'category']
class ProductAdmin(admin.ModelAdmin):
model = Product
list_display = ['name', 'sub_category', 'wholesaler', 'is_out_of_stock']
class MediaAdmin(admin.ModelAdmin):
model = Media
list_display = ['key', 'file_name', 'uploaded_at']
#
# class ConsumerProductAdmin(admin.ModelAdmin):
# model = ConsumerProduct
# list_display = ['product', 'wholesaler_product', 'customer_price', 'is_drafted', 'is_published',]
#
# class OrderCartAdmin(admin.ModelAdmin):
# model = OrderCart
# list_display = ['consumer', 'consumer_product', 'quantity']
admin.site.register(User, CustomUserAdmin)
admin.site.register(AuthUser, AuthUserAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Media, MediaAdmin)
# admin.site.register(ConsumerProduct, ConsumerProductAdmin)
# admin.site.register(OrderCart, OrderCartAdmin)
admin.site.site_url = 'http://market-backend.herokuapp.com/api/v0/docs/'
| 32.333333 | 107 | 0.648797 |
8ff7579e66394800fbae9efe43a62fc23ea1dae8 | 185 | py | Python | microcosm/tests/test_api.py | Sinon/microcosm | e8bab13b19e873b9b097968feeb8c3cb84bca045 | [
"Apache-2.0"
] | 30 | 2016-04-05T18:37:57.000Z | 2021-06-21T18:58:43.000Z | microcosm/tests/test_api.py | Sinon/microcosm | e8bab13b19e873b9b097968feeb8c3cb84bca045 | [
"Apache-2.0"
] | 24 | 2016-03-08T17:33:00.000Z | 2020-04-26T06:55:48.000Z | microcosm/tests/test_api.py | Sinon/microcosm | e8bab13b19e873b9b097968feeb8c3cb84bca045 | [
"Apache-2.0"
] | 6 | 2016-12-19T22:39:20.000Z | 2020-11-15T15:27:58.000Z | """
Test high-level api
"""
def test_api_imports():
"""
Imports of the public API work.
"""
from microcosm.api import binding, create_object_graph, defaults # noqa
| 14.230769 | 76 | 0.648649 |
f51cfcb491d0e16973bca8b2ed2763b2e0a9448a | 11,264 | py | Python | src/rastervision/label_stores/classification_geojson_file_test.py | nholeman/raster-vision | f3e1e26c555feed6fa018183c3fa04d7858d91bd | [
"Apache-2.0"
] | null | null | null | src/rastervision/label_stores/classification_geojson_file_test.py | nholeman/raster-vision | f3e1e26c555feed6fa018183c3fa04d7858d91bd | [
"Apache-2.0"
] | null | null | null | src/rastervision/label_stores/classification_geojson_file_test.py | nholeman/raster-vision | f3e1e26c555feed6fa018183c3fa04d7858d91bd | [
"Apache-2.0"
] | null | null | null | import unittest
import tempfile
import os
import json
from shapely import geometry
from shapely.strtree import STRtree
from rastervision.label_stores.classification_geojson_file import (
ClassificationGeoJSONFile, infer_cell, infer_labels, read_labels,
to_geojson)
from rastervision.label_stores.utils import geojson_to_shapely_polygons
from rastervision.core.crs_transformer import CRSTransformer
from rastervision.core.box import Box
from rastervision.core.class_map import ClassMap, ClassItem
from rastervision.protos.label_store_pb2 import (
ClassificationGeoJSONFile as ClassificationGeoJSONFileConfig)
class DoubleCRSTransformer(CRSTransformer):
"""Mock CRSTransformer used for testing.
Assumes map coords are 2x pixels coords.
"""
def map_to_pixel(self, web_point):
return (web_point[0] * 2, web_point[1] * 2)
def pixel_to_map(self, pixel_point):
return (pixel_point[0] / 2, pixel_point[1] / 2)
class TestObjectDetectionJsonFile(unittest.TestCase):
def setUp(self):
self.crs_transformer = DoubleCRSTransformer()
self.geojson_dict = {
'type':
'FeatureCollection',
'features': [{
'type': 'Feature',
'geometry': {
'type':
'Polygon',
'coordinates': [[[0., 0.], [0., 1.], [1., 1.], [1., 0.],
[0., 0.]]]
},
'properties': {
'class_name': 'car',
'class_id': 1,
'score': 0.0
}
}, {
'type': 'Feature',
'geometry': {
'type':
'Polygon',
'coordinates': [[[1., 1.], [1., 2.], [2., 2.], [2., 1.],
[1., 1.]]]
},
'properties': {
'score': 0.0,
'class_name': 'house',
'class_id': 2
}
}]
}
self.class_map = ClassMap([ClassItem(1, 'car'), ClassItem(2, 'house')])
self.box1 = Box.make_square(0, 0, 2)
self.box2 = Box.make_square(2, 2, 2)
self.class_id1 = 1
self.class_id2 = 2
self.background_class_id = 3
self.polygons = geojson_to_shapely_polygons(self.geojson_dict,
self.crs_transformer)
self.str_tree = STRtree(self.polygons)
self.file_name = 'labels.json'
self.temp_dir = tempfile.TemporaryDirectory()
self.file_path = os.path.join(self.temp_dir.name, self.file_name)
with open(self.file_path, 'w') as label_file:
self.geojson_str = json.dumps(self.geojson_dict)
label_file.write(self.geojson_str)
def tearDown(self):
self.temp_dir.cleanup()
def test_get_str_tree(self):
# Check first box.
query_box = Box.make_square(0, 0, 1)
query_geom = geometry.Polygon(
[(p[0], p[1]) for p in query_box.geojson_coordinates()])
polygons = self.str_tree.query(query_geom)
self.assertEqual(len(polygons), 1)
self.assertEqual(Box.from_shapely(polygons[0]), self.box1)
self.assertEqual(polygons[0].class_id, self.class_id1)
# Check second box.
query_box = Box.make_square(3, 3, 1)
query_geom = geometry.Polygon(
[(p[0], p[1]) for p in query_box.geojson_coordinates()])
polygons = self.str_tree.query(query_geom)
self.assertEqual(len(polygons), 1)
self.assertEqual(Box.from_shapely(polygons[0]), self.box2)
self.assertEqual(polygons[0].class_id, self.class_id2)
def test_infer_cell1(self):
# More of box 1 is in cell.
cell = Box.make_square(0, 0, 3)
ioa_thresh = 0.5
use_intersection_over_cell = False
background_class_id = None
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, self.class_id1)
def test_infer_cell2(self):
# More of box 2 is in cell.
cell = Box.make_square(1, 1, 3)
ioa_thresh = 0.5
use_intersection_over_cell = False
background_class_id = None
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, self.class_id2)
def test_infer_cell3(self):
# Only box 2 is in cell, but IOA isn't high enough.
cell = Box.make_square(3, 3, 3)
ioa_thresh = 0.5
use_intersection_over_cell = False
background_class_id = None
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, None)
def test_infer_cell4(self):
# Both boxes inside cell, but using intersection_over_cell,
# the IOA isn't high enough.
cell = Box.make_square(0, 0, 10)
ioa_thresh = 0.5
use_intersection_over_cell = True
background_class_id = None
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, None)
def test_infer_cell5(self):
# More of box1 in cell, using intersection_over_cell with the
# IOA high enough.
cell = Box.make_square(0, 0, 3)
ioa_thresh = 0.4
use_intersection_over_cell = True
background_class_id = None
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, self.class_id1)
def test_infer_cell6(self):
# No boxes overlap enough, use background_class_id
cell = Box.make_square(0, 0, 10)
ioa_thresh = 0.5
use_intersection_over_cell = True
background_class_id = self.background_class_id
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, self.background_class_id)
def test_infer_cell7(self):
# Cell doesn't overlap with any boxes.
cell = Box.make_square(10, 10, 1)
ioa_thresh = 0.5
use_intersection_over_cell = True
background_class_id = None
pick_min_class_id = False
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, None)
def test_infer_cell8(self):
# box2 overlaps more than box1, but using pick_min_class_id, so
# picks box1.
cell = Box.make_square(1, 1, 3)
ioa_thresh = 0.5
use_intersection_over_cell = False
background_class_id = None
pick_min_class_id = True
class_id = infer_cell(self.str_tree, cell, ioa_thresh,
use_intersection_over_cell, background_class_id,
pick_min_class_id)
self.assertEqual(class_id, self.class_id2)
def test_infer_labels(self):
extent = Box.make_square(0, 0, 4)
options = ClassificationGeoJSONFileConfig.Options()
options.ioa_thresh = 0.5
options.use_intersection_over_cell = False
options.background_class_id = self.background_class_id
options.pick_min_class_id = False
options.infer_cells = True
options.cell_size = 2
labels = infer_labels(self.geojson_dict, self.crs_transformer, extent,
options)
cells = labels.get_cells()
self.assertEqual(len(cells), 4)
class_id = labels.get_cell_class_id(self.box1)
self.assertEqual(class_id, self.class_id1)
class_id = labels.get_cell_class_id(self.box2)
self.assertEqual(class_id, self.class_id2)
class_id = labels.get_cell_class_id(Box.make_square(0, 2, 2))
self.assertEqual(class_id, self.background_class_id)
class_id = labels.get_cell_class_id(Box.make_square(2, 0, 2))
self.assertEqual(class_id, self.background_class_id)
def test_read_labels1(self):
# Extent only has enough of first box in it.
extent = Box.make_square(0, 0, 2.5)
labels = read_labels(self.geojson_dict, self.crs_transformer, extent)
cells = labels.get_cells()
self.assertEqual(len(cells), 1)
class_id = labels.get_cell_class_id(self.box1)
self.assertEqual(class_id, self.class_id1)
class_id = labels.get_cell_class_id(self.box2)
self.assertEqual(class_id, None)
def test_read_labels2(self):
# Extent contains both boxes.
extent = Box.make_square(0, 0, 4)
labels = read_labels(self.geojson_dict, self.crs_transformer, extent)
cells = labels.get_cells()
self.assertEqual(len(cells), 2)
class_id = labels.get_cell_class_id(self.box1)
self.assertEqual(class_id, self.class_id1)
class_id = labels.get_cell_class_id(self.box2)
self.assertEqual(class_id, self.class_id2)
def test_to_geojson(self):
extent = Box.make_square(0, 0, 4)
labels = read_labels(self.geojson_dict, self.crs_transformer, extent)
geojson_dict = to_geojson(labels, self.crs_transformer, self.class_map)
self.assertDictEqual(geojson_dict, self.geojson_dict)
def test_constructor_save(self):
# Read it, write it using label_store, read it again, and compare.
extent = Box.make_square(0, 0, 10)
options = ClassificationGeoJSONFileConfig.Options()
options.infer_cells = False
label_store = ClassificationGeoJSONFile(
self.file_path,
self.crs_transformer,
options,
self.class_map,
extent,
readable=True,
writable=True)
labels1 = label_store.get_labels()
label_store.save()
label_store = ClassificationGeoJSONFile(
self.file_path,
self.crs_transformer,
options,
self.class_map,
extent=None,
readable=True,
writable=True)
labels2 = label_store.get_labels()
self.assertDictEqual(labels1.cell_to_class_id,
labels2.cell_to_class_id)
if __name__ == '__main__':
unittest.main()
| 36.690554 | 79 | 0.60396 |
42997b3bde48df7618855b8c6b149eaeb8ed0cc1 | 49,751 | py | Python | gplearn/genetic.py | anshulrai/gplearn | 01ca44026630f56cacce13e0b022488554e4a1fc | [
"BSD-3-Clause"
] | null | null | null | gplearn/genetic.py | anshulrai/gplearn | 01ca44026630f56cacce13e0b022488554e4a1fc | [
"BSD-3-Clause"
] | null | null | null | gplearn/genetic.py | anshulrai/gplearn | 01ca44026630f56cacce13e0b022488554e4a1fc | [
"BSD-3-Clause"
] | null | null | null | """Genetic Programming in Python, with a scikit-learn inspired API
The :mod:`gplearn.genetic` module implements Genetic Programming. These
are supervised learning methods based on applying evolutionary operations on
computer programs.
"""
# Author: Trevor Stephens <trevorstephens.com>
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
from time import time
from warnings import warn
import numpy as np
from scipy.stats import rankdata
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.externals import six
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.validation import check_X_y, check_array
from ._program import _Program
from .fitness import _fitness_map, _Fitness
from .functions import _function_map, _Function
from .utils import _partition_estimators
from .utils import check_random_state, NotFittedError
__all__ = ['SymbolicRegressor', 'SymbolicTransformer']
MAX_INT = np.iinfo(np.int32).max
def _parallel_evolve(n_programs, parents, X, y, sample_weight, seeds, params):
"""Private function used to build a batch of programs within a job."""
n_samples, n_features = X.shape
# Unpack parameters
tournament_size = params['tournament_size']
function_set = params['function_set']
arities = params['arities']
init_depth = params['init_depth']
init_method = params['init_method']
const_range = params['const_range']
metric = params['_metric']
parsimony_coefficient = params['parsimony_coefficient']
method_probs = params['method_probs']
p_point_replace = params['p_point_replace']
max_samples = params['max_samples']
feature_names = params['feature_names']
max_samples = int(max_samples * n_samples)
def _tournament():
"""Find the fittest individual from a sub-population."""
contenders = random_state.randint(0, len(parents), tournament_size)
fitness = [parents[p].fitness_ for p in contenders]
if metric.greater_is_better:
parent_index = contenders[np.argmax(fitness)]
else:
parent_index = contenders[np.argmin(fitness)]
return parents[parent_index], parent_index
# Build programs
programs = []
for i in range(n_programs):
random_state = check_random_state(seeds[i])
if parents is None:
program = None
genome = None
else:
method = random_state.uniform()
parent, parent_index = _tournament()
if method < method_probs[0]:
# crossover
donor, donor_index = _tournament()
program, removed, remains = parent.crossover(donor.program,
random_state)
genome = {'method': 'Crossover',
'parent_idx': parent_index,
'parent_nodes': removed,
'donor_idx': donor_index,
'donor_nodes': remains}
elif method < method_probs[1]:
# subtree_mutation
program, removed, _ = parent.subtree_mutation(random_state)
genome = {'method': 'Subtree Mutation',
'parent_idx': parent_index,
'parent_nodes': removed}
elif method < method_probs[2]:
# hoist_mutation
program, removed = parent.hoist_mutation(random_state)
genome = {'method': 'Hoist Mutation',
'parent_idx': parent_index,
'parent_nodes': removed}
elif method < method_probs[3]:
# point_mutation
program, mutated = parent.point_mutation(random_state)
genome = {'method': 'Point Mutation',
'parent_idx': parent_index,
'parent_nodes': mutated}
else:
# reproduction
program = parent.reproduce()
genome = {'method': 'Reproduction',
'parent_idx': parent_index,
'parent_nodes': []}
program = _Program(function_set=function_set,
arities=arities,
init_depth=init_depth,
init_method=init_method,
n_features=n_features,
metric=metric,
const_range=const_range,
p_point_replace=p_point_replace,
parsimony_coefficient=parsimony_coefficient,
feature_names=feature_names,
random_state=random_state,
program=program)
program.parents = genome
# Draw samples, using sample weights, and then fit
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
oob_sample_weight = curr_sample_weight.copy()
indices, not_indices = program.get_all_indices(n_samples,
max_samples,
random_state)
curr_sample_weight[not_indices] = 0
oob_sample_weight[indices] = 0
program.raw_fitness_ = program.raw_fitness(X, y, curr_sample_weight)
if max_samples < n_samples:
# Calculate OOB fitness
program.oob_fitness_ = program.raw_fitness(X, y, oob_sample_weight)
programs.append(program)
return programs
class BaseSymbolic(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for symbolic regression / classification estimators.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
population_size=1000,
hall_of_fame=None,
n_components=None,
generations=20,
tournament_size=20,
stopping_criteria=0.0,
const_range=(-1., 1.),
init_depth=(2, 6),
init_method='half and half',
function_set=('add', 'sub', 'mul', 'div'),
metric='mean absolute error',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=None,
warm_start=False,
low_memory=False,
n_jobs=1,
verbose=0,
random_state=None):
self.population_size = population_size
self.hall_of_fame = hall_of_fame
self.n_components = n_components
self.generations = generations
self.tournament_size = tournament_size
self.stopping_criteria = stopping_criteria
self.const_range = const_range
self.init_depth = init_depth
self.init_method = init_method
self.function_set = function_set
self.metric = metric
self.parsimony_coefficient = parsimony_coefficient
self.p_crossover = p_crossover
self.p_subtree_mutation = p_subtree_mutation
self.p_hoist_mutation = p_hoist_mutation
self.p_point_mutation = p_point_mutation
self.p_point_replace = p_point_replace
self.max_samples = max_samples
self.feature_names = feature_names
self.warm_start = warm_start
self.low_memory = low_memory
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _verbose_reporter(self, run_details=None):
"""A report of the progress of the evolution process.
Parameters
----------
run_details : dict
Information about the evolution.
"""
if run_details is None:
print(' |{:^25}|{:^42}|'.format('Population Average',
'Best Individual'))
print('-' * 4 + ' ' + '-' * 25 + ' ' + '-' * 42 + ' ' + '-' * 10)
line_format = '{:>4} {:>8} {:>16} {:>8} {:>16} {:>16} {:>10}'
print(line_format.format('Gen', 'Length', 'Fitness', 'Length',
'Fitness', 'OOB Fitness', 'Time Left'))
else:
# Estimate remaining time for run
gen = run_details['generation'][-1]
generation_time = run_details['generation_time'][-1]
remaining_time = (self.generations - gen - 1) * generation_time
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
oob_fitness = 'N/A'
line_format = '{:4d} {:8.2f} {:16g} {:8d} {:16g} {:>16} {:>10}'
if self.max_samples < 1.0:
oob_fitness = run_details['best_oob_fitness'][-1]
line_format = '{:4d} {:8.2f} {:16g} {:8d} {:16g} {:16g} {:>10}'
print(line_format.format(run_details['generation'][-1],
run_details['average_length'][-1],
run_details['average_fitness'][-1],
run_details['best_length'][-1],
run_details['best_fitness'][-1],
oob_fitness,
remaining_time))
def fit(self, X, y, sample_weight=None):
"""Fit the Genetic Program according to X, y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Check arrays
X, y = check_X_y(X, y, y_numeric=True)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
_, self.n_features_ = X.shape
hall_of_fame = self.hall_of_fame
if hall_of_fame is None:
hall_of_fame = self.population_size
if hall_of_fame > self.population_size or hall_of_fame < 1:
raise ValueError('hall_of_fame (%d) must be less than or equal to '
'population_size (%d).' % (self.hall_of_fame,
self.population_size))
n_components = self.n_components
if n_components is None:
n_components = hall_of_fame
if n_components > hall_of_fame or n_components < 1:
raise ValueError('n_components (%d) must be less than or equal to '
'hall_of_fame (%d).' % (self.n_components,
self.hall_of_fame))
self._function_set = []
for function in self.function_set:
if isinstance(function, six.string_types):
if function not in _function_map:
raise ValueError('invalid function name %s found in '
'`function_set`.' % function)
self._function_set.append(_function_map[function])
elif isinstance(function, _Function):
self._function_set.append(function)
else:
raise ValueError('invalid type %s found in `function_set`.'
% type(function))
if not self._function_set:
raise ValueError('No valid functions found in `function_set`.')
# For point-mutation to find a compatible replacement node
self._arities = {}
for function in self._function_set:
arity = function.arity
self._arities[arity] = self._arities.get(arity, [])
self._arities[arity].append(function)
if isinstance(self.metric, _Fitness):
self._metric = self.metric
elif isinstance(self, RegressorMixin):
if self.metric not in ('mean absolute error', 'mse', 'rmse',
'pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric)
else:
self._metric = _fitness_map[self.metric]
elif isinstance(self, TransformerMixin):
if self.metric not in ('pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric)
else:
self._metric = _fitness_map[self.metric]
self._method_probs = np.array([self.p_crossover,
self.p_subtree_mutation,
self.p_hoist_mutation,
self.p_point_mutation])
self._method_probs = np.cumsum(self._method_probs)
if self._method_probs[-1] > 1:
raise ValueError('The sum of p_crossover, p_subtree_mutation, '
'p_hoist_mutation and p_point_mutation should '
'total to 1.0 or less.')
if self.init_method not in ('half and half', 'grow', 'full'):
raise ValueError('Valid program initializations methods include '
'"grow", "full" and "half and half". Given %s.'
% self.init_method)
if not((isinstance(self.const_range, tuple) and
len(self.const_range) == 2) or self.const_range is None):
raise ValueError('const_range should be a tuple with length two, '
'or None.')
if (not isinstance(self.init_depth, tuple) or
len(self.init_depth) != 2):
raise ValueError('init_depth should be a tuple with length two.')
if self.init_depth[0] > self.init_depth[1]:
raise ValueError('init_depth should be in increasing numerical '
'order: (min_depth, max_depth).')
if self.feature_names is not None:
if self.n_features_ != len(self.feature_names):
raise ValueError('The supplied `feature_names` has different '
'length to n_features. Expected %d, got %d.'
% (self.n_features_, len(self.feature_names)))
for feature_name in self.feature_names:
if not isinstance(feature_name, six.string_types):
raise ValueError('invalid type %s found in '
'`feature_names`.' % type(feature_name))
params = self.get_params()
params['_metric'] = self._metric
params['function_set'] = self._function_set
params['arities'] = self._arities
params['method_probs'] = self._method_probs
if not self.warm_start or not hasattr(self, '_programs'):
# Free allocated memory, if any
self._programs = []
self.run_details_ = {'generation': [],
'average_length': [],
'average_fitness': [],
'best_length': [],
'best_fitness': [],
'best_oob_fitness': [],
'generation_time': []}
prior_generations = len(self._programs)
n_more_generations = self.generations - prior_generations
if n_more_generations < 0:
raise ValueError('generations=%d must be larger or equal to '
'len(_programs)=%d when warm_start==True'
% (self.generations, len(self._programs)))
elif n_more_generations == 0:
fitness = [program.raw_fitness_ for program in self._programs[-1]]
warn('Warm-start fitting without increasing n_estimators does not '
'fit new programs.')
if self.warm_start:
# Generate and discard seeds that would have been produced on the
# initial fit call.
for i in range(len(self._programs)):
_ = random_state.randint(MAX_INT, size=self.population_size)
if self.verbose:
# Print header fields
self._verbose_reporter()
for gen in range(prior_generations, self.generations):
start_time = time()
if gen == 0:
parents = None
else:
parents = self._programs[gen - 1]
# Parallel loop
n_jobs, n_programs, starts = _partition_estimators(
self.population_size, self.n_jobs)
seeds = random_state.randint(MAX_INT, size=self.population_size)
population = Parallel(n_jobs=n_jobs,
verbose=int(self.verbose > 1))(
delayed(_parallel_evolve)(n_programs[i],
parents,
X,
y,
sample_weight,
seeds[starts[i]:starts[i + 1]],
params)
for i in range(n_jobs))
# Reduce, maintaining order across different n_jobs
population = list(itertools.chain.from_iterable(population))
fitness = [program.raw_fitness_ for program in population]
length = [program.length_ for program in population]
parsimony_coefficient = None
if self.parsimony_coefficient == 'auto':
parsimony_coefficient = (np.cov(length, fitness)[1, 0] /
np.var(length))
for program in population:
program.fitness_ = program.fitness(parsimony_coefficient)
self._programs.append(population)
# Remove old programs that didn't make it into the new population.
if not self.low_memory:
for old_gen in np.arange(gen, 0, -1):
indices = []
for program in self._programs[old_gen]:
if program is not None:
for idx in program.parents:
if 'idx' in idx:
indices.append(program.parents[idx])
indices = set(indices)
for idx in range(self.population_size):
if idx not in indices:
self._programs[old_gen - 1][idx] = None
elif gen > 0:
# Remove old generations
self._programs[gen - 1] = None
# Record run details
if self._metric.greater_is_better:
best_program = population[np.argmax(fitness)]
else:
best_program = population[np.argmin(fitness)]
self.run_details_['generation'].append(gen)
self.run_details_['average_length'].append(np.mean(length))
self.run_details_['average_fitness'].append(np.mean(fitness))
self.run_details_['best_length'].append(best_program.length_)
self.run_details_['best_fitness'].append(best_program.raw_fitness_)
oob_fitness = np.nan
if self.max_samples < 1.0:
oob_fitness = best_program.oob_fitness_
self.run_details_['best_oob_fitness'].append(oob_fitness)
generation_time = time() - start_time
self.run_details_['generation_time'].append(generation_time)
if self.verbose:
self._verbose_reporter(self.run_details_)
# Check for early stopping
if self._metric.greater_is_better:
best_fitness = fitness[np.argmax(fitness)]
if best_fitness >= self.stopping_criteria:
break
else:
best_fitness = fitness[np.argmin(fitness)]
if best_fitness <= self.stopping_criteria:
break
if isinstance(self, RegressorMixin):
# Find the best individual in the final generation
if self._metric.greater_is_better:
self._program = self._programs[-1][np.argmax(fitness)]
else:
self._program = self._programs[-1][np.argmin(fitness)]
if isinstance(self, TransformerMixin):
# Find the best individuals in the final generation
fitness = np.array(fitness)
if self._metric.greater_is_better:
hall_of_fame = fitness.argsort()[::-1][:self.hall_of_fame]
else:
hall_of_fame = fitness.argsort()[:self.hall_of_fame]
evaluation = np.array([gp.execute(X) for gp in
[self._programs[-1][i] for
i in hall_of_fame]])
if self.metric == 'spearman':
evaluation = np.apply_along_axis(rankdata, 1, evaluation)
with np.errstate(divide='ignore', invalid='ignore'):
correlations = np.abs(np.corrcoef(evaluation))
np.fill_diagonal(correlations, 0.)
components = list(range(self.hall_of_fame))
indices = list(range(self.hall_of_fame))
# Iteratively remove least fit individual of most correlated pair
while len(components) > self.n_components:
most_correlated = np.unravel_index(np.argmax(correlations),
correlations.shape)
# The correlation matrix is sorted by fitness, so identifying
# the least fit of the pair is simply getting the higher index
worst = max(most_correlated)
components.pop(worst)
indices.remove(worst)
correlations = correlations[:, indices][indices, :]
indices = list(range(len(components)))
self._best_programs = [self._programs[-1][i] for i in
hall_of_fame[components]]
return self
class SymbolicRegressor(BaseSymbolic, RegressorMixin):
"""A Genetic Programming symbolic regressor.
A symbolic regressor is an estimator that begins by building a population
of naive random formulas to represent a relationship. The formulas are
represented as tree-like structures with mathematical functions being
recursively applied to variables and constants. Each successive generation
of programs is then evolved from the one that came before it by selecting
the fittest individuals from the population to undergo genetic operations
such as crossover, mutation or reproduction.
Parameters
----------
population_size : integer, optional (default=500)
The number of programs in each generation.
generations : integer, optional (default=10)
The number of generations to evolve.
tournament_size : integer, optional (default=20)
The number of programs that will compete to become part of the next
generation.
stopping_criteria : float, optional (default=0.0)
The required metric value required in order to stop evolution early.
const_range : tuple of two floats, or None, optional (default=(-1., 1.))
The range of constants to include in the formulas. If None then no
constants will be included in the candidate programs.
init_depth : tuple of two ints, optional (default=(2, 6))
The range of tree depths for the initial population of naive formulas.
Individual trees will randomly choose a maximum depth from this range.
When combined with `init_method='half and half'` this yields the well-
known 'ramped half and half' initialization method.
init_method : str, optional (default='half and half')
- 'grow' : Nodes are chosen at random from both functions and
terminals, allowing for smaller trees than `init_depth` allows. Tends
to grow asymmetrical trees.
- 'full' : Functions are chosen until the `init_depth` is reached, and
then terminals are selected. Tends to grow 'bushy' trees.
- 'half and half' : Trees are grown through a 50/50 mix of 'full' and
'grow', making for a mix of tree shapes in the initial population.
function_set : iterable, optional (default=('add', 'sub', 'mul', 'div'))
The functions to use when building and evolving programs. This iterable
can include strings to indicate either individual functions as outlined
below, or you can also include your own functions as built using the
``make_function`` factory from the ``functions`` module.
Available individual functions are:
- 'add' : addition, arity=2.
- 'sub' : subtraction, arity=2.
- 'mul' : multiplication, arity=2.
- 'div' : protected division where a denominator near-zero returns 1.,
arity=2.
- 'sqrt' : protected square root where the absolute value of the
argument is used, arity=1.
- 'log' : protected log where the absolute value of the argument is
used and a near-zero argument returns 0., arity=1.
- 'abs' : absolute value, arity=1.
- 'neg' : negative, arity=1.
- 'inv' : protected inverse where a near-zero argument returns 0.,
arity=1.
- 'max' : maximum, arity=2.
- 'min' : minimum, arity=2.
- 'sin' : sine (radians), arity=1.
- 'cos' : cosine (radians), arity=1.
- 'tan' : tangent (radians), arity=1.
metric : str, optional (default='mean absolute error')
The name of the raw fitness metric. Available options include:
- 'mean absolute error'.
- 'mse' for mean squared error.
- 'rmse' for root mean squared error.
- 'pearson', for Pearson's product-moment correlation coefficient.
- 'spearman' for Spearman's rank-order correlation coefficient.
Note that 'pearson' and 'spearman' will not directly predict the target
but could be useful as value-added features in a second-step estimator.
This would allow the user to generate one engineered feature at a time,
using the SymbolicTransformer would allow creation of multiple features
at once.
parsimony_coefficient : float or "auto", optional (default=0.001)
This constant penalizes large programs by adjusting their fitness to
be less favorable for selection. Larger values penalize the program
more which can control the phenomenon known as 'bloat'. Bloat is when
evolution is increasing the size of programs without a significant
increase in fitness, which is costly for computation time and makes for
a less understandable final result. This parameter may need to be tuned
over successive runs.
If "auto" the parsimony coefficient is recalculated for each generation
using c = Cov(l,f)/Var( l), where Cov(l,f) is the covariance between
program size l and program fitness f in the population, and Var(l) is
the variance of program sizes.
p_crossover : float, optional (default=0.9)
The probability of performing crossover on a tournament winner.
Crossover takes the winner of a tournament and selects a random subtree
from it to be replaced. A second tournament is performed to find a
donor. The donor also has a subtree selected at random and this is
inserted into the original parent to form an offspring in the next
generation.
p_subtree_mutation : float, optional (default=0.01)
The probability of performing subtree mutation on a tournament winner.
Subtree mutation takes the winner of a tournament and selects a random
subtree from it to be replaced. A donor subtree is generated at random
and this is inserted into the original parent to form an offspring in
the next generation.
p_hoist_mutation : float, optional (default=0.01)
The probability of performing hoist mutation on a tournament winner.
Hoist mutation takes the winner of a tournament and selects a random
subtree from it. A random subtree of that subtree is then selected
and this is 'hoisted' into the original subtrees location to form an
offspring in the next generation. This method helps to control bloat.
p_point_mutation : float, optional (default=0.01)
The probability of performing point mutation on a tournament winner.
Point mutation takes the winner of a tournament and selects random
nodes from it to be replaced. Terminals are replaced by other terminals
and functions are replaced by other functions that require the same
number of arguments as the original node. The resulting tree forms an
offspring in the next generation.
Note : The above genetic operation probabilities must sum to less than
one. The balance of probability is assigned to 'reproduction', where a
tournament winner is cloned and enters the next generation unmodified.
p_point_replace : float, optional (default=0.05)
For point mutation only, the probability that any given node will be
mutated.
max_samples : float, optional (default=1.0)
The fraction of samples to draw from X to evaluate each program on.
feature_names : list, optional (default=None)
Optional list of feature names, used purely for representations in
the `print` operation or `export_graphviz`. If None, then X0, X1, etc
will be used for representations.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more generations to the evolution, otherwise, just fit a new
evolution.
low_memory : bool, optional (default=False)
When set to ``True``, only the current generation is retained. Parent
information is discarded. For very large populations or runs with many
generations, this can result in substantial memory use reduction.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for `fit`. If -1, then the number
of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the evolution building process.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
run_details_ : dict
Details of the evolution process. Includes the following elements:
- 'generation' : The generation index.
- 'average_length' : The average program length of the generation.
- 'average_fitness' : The average program fitness of the generation.
- 'best_length' : The length of the best program in the generation.
- 'best_fitness' : The fitness of the best program in the generation.
- 'best_oob_fitness' : The out of bag fitness of the best program in
the generation (requires `max_samples` < 1.0).
- 'generation_time' : The time it took for the generation to evolve.
See Also
--------
SymbolicTransformer
References
----------
.. [1] J. Koza, "Genetic Programming", 1992.
.. [2] R. Poli, et al. "A Field Guide to Genetic Programming", 2008.
"""
def __init__(self,
population_size=1000,
generations=20,
tournament_size=20,
stopping_criteria=0.0,
const_range=(-1., 1.),
init_depth=(2, 6),
init_method='half and half',
function_set=('add', 'sub', 'mul', 'div'),
metric='mean absolute error',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=None,
warm_start=False,
low_memory=False,
n_jobs=1,
verbose=0,
random_state=None):
super(SymbolicRegressor, self).__init__(
population_size=population_size,
generations=generations,
tournament_size=tournament_size,
stopping_criteria=stopping_criteria,
const_range=const_range,
init_depth=init_depth,
init_method=init_method,
function_set=function_set,
metric=metric,
parsimony_coefficient=parsimony_coefficient,
p_crossover=p_crossover,
p_subtree_mutation=p_subtree_mutation,
p_hoist_mutation=p_hoist_mutation,
p_point_mutation=p_point_mutation,
p_point_replace=p_point_replace,
max_samples=max_samples,
feature_names=feature_names,
warm_start=warm_start,
low_memory=low_memory,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
def __str__(self):
"""Overloads `print` output of the object to resemble a LISP tree."""
if not hasattr(self, '_program'):
return self.__repr__()
return self._program.__str__()
def predict(self, X):
"""Perform regression on test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples]
Predicted values for X.
"""
if not hasattr(self, '_program'):
raise NotFittedError('SymbolicRegressor not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
y = self._program.execute(X)
return y
class SymbolicTransformer(BaseSymbolic, TransformerMixin):
"""A Genetic Programming symbolic transformer.
A symbolic transformer is a supervised transformer that begins by building
a population of naive random formulas to represent a relationship. The
formulas are represented as tree-like structures with mathematical
functions being recursively applied to variables and constants. Each
successive generation of programs is then evolved from the one that came
before it by selecting the fittest individuals from the population to
undergo genetic operations such as crossover, mutation or reproduction.
The final population is searched for the fittest individuals with the least
correlation to one another.
Parameters
----------
population_size : integer, optional (default=500)
The number of programs in each generation.
hall_of_fame : integer, or None, optional (default=100)
The number of fittest programs to compare from when finding the
least-correlated individuals for the n_components. If `None`, the
entire final generation will be used.
n_components : integer, or None, optional (default=10)
The number of best programs to return after searching the hall_of_fame
for the least-correlated individuals. If `None`, the entire
hall_of_fame will be used.
generations : integer, optional (default=10)
The number of generations to evolve.
tournament_size : integer, optional (default=20)
The number of programs that will compete to become part of the next
generation.
stopping_criteria : float, optional (default=1.0)
The required metric value required in order to stop evolution early.
const_range : tuple of two floats, or None, optional (default=(-1., 1.))
The range of constants to include in the formulas. If None then no
constants will be included in the candidate programs.
init_depth : tuple of two ints, optional (default=(2, 6))
The range of tree depths for the initial population of naive formulas.
Individual trees will randomly choose a maximum depth from this range.
When combined with `init_method='half and half'` this yields the well-
known 'ramped half and half' initialization method.
init_method : str, optional (default='half and half')
- 'grow' : Nodes are chosen at random from both functions and
terminals, allowing for smaller trees than `init_depth` allows. Tends
to grow asymmetrical trees.
- 'full' : Functions are chosen until the `init_depth` is reached, and
then terminals are selected. Tends to grow 'bushy' trees.
- 'half and half' : Trees are grown through a 50/50 mix of 'full' and
'grow', making for a mix of tree shapes in the initial population.
function_set : iterable, optional (default=('add', 'sub', 'mul', 'div'))
The functions to use when building and evolving programs. This iterable
can include strings to indicate either individual functions as outlined
below, or you can also include your own functions as built using the
``make_function`` factory from the ``functions`` module.
Available individual functions are:
- 'add' : addition, arity=2.
- 'sub' : subtraction, arity=2.
- 'mul' : multiplication, arity=2.
- 'div' : protected division where a denominator near-zero returns 1.,
arity=2.
- 'sqrt' : protected square root where the absolute value of the
argument is used, arity=1.
- 'log' : protected log where the absolute value of the argument is
used and a near-zero argument returns 0., arity=1.
- 'abs' : absolute value, arity=1.
- 'neg' : negative, arity=1.
- 'inv' : protected inverse where a near-zero argument returns 0.,
arity=1.
- 'max' : maximum, arity=2.
- 'min' : minimum, arity=2.
- 'sin' : sine (radians), arity=1.
- 'cos' : cosine (radians), arity=1.
- 'tan' : tangent (radians), arity=1.
metric : str, optional (default='pearson')
The name of the raw fitness metric. Available options include:
- 'pearson', for Pearson's product-moment correlation coefficient.
- 'spearman' for Spearman's rank-order correlation coefficient.
parsimony_coefficient : float or "auto", optional (default=0.001)
This constant penalizes large programs by adjusting their fitness to
be less favorable for selection. Larger values penalize the program
more which can control the phenomenon known as 'bloat'. Bloat is when
evolution is increasing the size of programs without a significant
increase in fitness, which is costly for computation time and makes for
a less understandable final result. This parameter may need to be tuned
over successive runs.
If "auto" the parsimony coefficient is recalculated for each generation
using c = Cov(l,f)/Var( l), where Cov(l,f) is the covariance between
program size l and program fitness f in the population, and Var(l) is
the variance of program sizes.
p_crossover : float, optional (default=0.9)
The probability of performing crossover on a tournament winner.
Crossover takes the winner of a tournament and selects a random subtree
from it to be replaced. A second tournament is performed to find a
donor. The donor also has a subtree selected at random and this is
inserted into the original parent to form an offspring in the next
generation.
p_subtree_mutation : float, optional (default=0.01)
The probability of performing subtree mutation on a tournament winner.
Subtree mutation takes the winner of a tournament and selects a random
subtree from it to be replaced. A donor subtree is generated at random
and this is inserted into the original parent to form an offspring in
the next generation.
p_hoist_mutation : float, optional (default=0.01)
The probability of performing hoist mutation on a tournament winner.
Hoist mutation takes the winner of a tournament and selects a random
subtree from it. A random subtree of that subtree is then selected
and this is 'hoisted' into the original subtrees location to form an
offspring in the next generation. This method helps to control bloat.
p_point_mutation : float, optional (default=0.01)
The probability of performing point mutation on a tournament winner.
Point mutation takes the winner of a tournament and selects random
nodes from it to be replaced. Terminals are replaced by other terminals
and functions are replaced by other functions that require the same
number of arguments as the original node. The resulting tree forms an
offspring in the next generation.
Note : The above genetic operation probabilities must sum to less than
one. The balance of probability is assigned to 'reproduction', where a
tournament winner is cloned and enters the next generation unmodified.
p_point_replace : float, optional (default=0.05)
For point mutation only, the probability that any given node will be
mutated.
max_samples : float, optional (default=1.0)
The fraction of samples to draw from X to evaluate each program on.
feature_names : list, optional (default=None)
Optional list of feature names, used purely for representations in
the `print` operation or `export_graphviz`. If None, then X0, X1, etc
will be used for representations.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more generations to the evolution, otherwise, just fit a new
evolution.
low_memory : bool, optional (default=False)
When set to ``True``, only the current generation is retained. Parent
information is discarded. For very large populations or runs with many
generations, this can result in substantial memory use reduction.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for `fit`. If -1, then the number
of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the evolution building process.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
run_details_ : dict
Details of the evolution process. Includes the following elements:
- 'generation' : The generation index.
- 'average_length' : The average program length of the generation.
- 'average_fitness' : The average program fitness of the generation.
- 'best_length' : The length of the best program in the generation.
- 'best_fitness' : The fitness of the best program in the generation.
- 'best_oob_fitness' : The out of bag fitness of the best program in
the generation (requires `max_samples` < 1.0).
- 'generation_time' : The time it took for the generation to evolve.
See Also
--------
SymbolicRegressor
References
----------
.. [1] J. Koza, "Genetic Programming", 1992.
.. [2] R. Poli, et al. "A Field Guide to Genetic Programming", 2008.
"""
def __init__(self,
population_size=1000,
hall_of_fame=100,
n_components=10,
generations=20,
tournament_size=20,
stopping_criteria=1.0,
const_range=(-1., 1.),
init_depth=(2, 6),
init_method='half and half',
function_set=('add', 'sub', 'mul', 'div'),
metric='pearson',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=None,
warm_start=False,
low_memory=False,
n_jobs=1,
verbose=0,
random_state=None):
super(SymbolicTransformer, self).__init__(
population_size=population_size,
hall_of_fame=hall_of_fame,
n_components=n_components,
generations=generations,
tournament_size=tournament_size,
stopping_criteria=stopping_criteria,
const_range=const_range,
init_depth=init_depth,
init_method=init_method,
function_set=function_set,
metric=metric,
parsimony_coefficient=parsimony_coefficient,
p_crossover=p_crossover,
p_subtree_mutation=p_subtree_mutation,
p_hoist_mutation=p_hoist_mutation,
p_point_mutation=p_point_mutation,
p_point_replace=p_point_replace,
max_samples=max_samples,
feature_names=feature_names,
warm_start=warm_start,
low_memory=low_memory,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
def __len__(self):
"""Overloads `len` output to be the number of fitted components."""
if not hasattr(self, '_best_programs'):
return 0
return self.n_components
def __getitem__(self, item):
"""Return the ith item of the fitted components."""
if item >= len(self):
raise IndexError
return self._best_programs[item]
def __str__(self):
"""Overloads `print` output of the object to resemble LISP trees."""
if not hasattr(self, '_best_programs'):
return self.__repr__()
output = str([gp.__str__() for gp in self])
return output.replace("',", ",\n").replace("'", "")
def transform(self, X):
"""Transform X according to the fitted transformer.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
"""
if not hasattr(self, '_best_programs'):
raise NotFittedError('SymbolicTransformer not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
X_new = np.array([gp.execute(X) for gp in self._best_programs]).T
return X_new
def fit_transform(self, X, y, sample_weight=None):
"""Fit to data, then transform it.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
"""
return self.fit(X, y, sample_weight).transform(X)
| 43.488636 | 79 | 0.601897 |
3258e0a85be1ce1a7ce74d1e58b5ab2a9ffeca3e | 11,544 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model/legs.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2021-11-07T18:53:43.000Z | 2021-11-07T18:53:43.000Z | samples/openapi3/client/petstore/python/petstore_api/model/legs.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 28 | 2021-04-07T07:38:36.000Z | 2022-03-31T03:10:56.000Z | samples/openapi3/client/petstore/python/petstore_api/model/legs.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 2 | 2021-11-03T10:07:15.000Z | 2021-12-17T13:00:53.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
class Legs(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('legs',): {
'2': "2",
'4': "4",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'legs': (str,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'legs': 'legs', # noqa: E501
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Legs - a model defined in OpenAPI
Args:
Keyword Args:
legs (str): defaults to "4", must be one of ["2", "4", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
"""
legs = kwargs.get('legs', "4")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.legs = legs
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Legs - a model defined in OpenAPI
Args:
Keyword Args:
legs (str): defaults to "4", must be one of ["2", "4", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
"""
legs = kwargs.get('legs', "4")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.legs = legs
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.441176 | 174 | 0.561071 |
41464cbcbc5bb63a1d1f78e77301003d4ad85ee9 | 2,533 | py | Python | projects/oldProyects/workStress/configuration/defineOccupancy.py | GGP00/soba | c193f323f26eccf579a454b8bb4bec4e80644444 | [
"MIT"
] | 1 | 2017-03-06T12:33:02.000Z | 2017-03-06T12:33:02.000Z | projects/oldProyects/workStress/configuration/defineOccupancy.py | GGP00/soba | c193f323f26eccf579a454b8bb4bec4e80644444 | [
"MIT"
] | 3 | 2017-04-26T08:57:35.000Z | 2019-04-24T08:28:24.000Z | projects/oldProyects/workStress/configuration/defineOccupancy.py | GGP00/soba | c193f323f26eccf579a454b8bb4bec4e80644444 | [
"MIT"
] | 1 | 2019-01-20T17:39:00.000Z | 2019-01-20T17:39:00.000Z | import random
def init():
global occupancy_json
#Store the occupancy
occupancy_json = []
#Workers
#Number of Occupants
NWorkers = 10
#Define states: name (str), position: str or ditc
statesWorkers = [
{'name':'leave', 'position': 'outBuilding'}, #initial state (the first)
{'name':'working in my workplace', 'position': {'Lab1.3': 1, 'Lab1.4': 1, 'Lab1.6': 1, 'Lab1.7': 1, 'Lab1.8': 1, 'Lab2.3': 1, 'Lab2.4': 1, 'Lab2.6': 1, 'Lab2.7': 1, 'Lab2.8': 1}},
{'name':'resting', 'position':'Hall.4'},
{'name':'lunch', 'position': 'outBuilding'}
]
#Define initial markov matrix
markov_matrixWorkers = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
controlBehaviourWorkers = {'arriveTime': 9.00, 'lunchTime': 15.00, 'backLunchTime': 16.00, 'leaveWorkTime': 19.00}
WorkersOccupants = {'type':'workers' , 'N':NWorkers, 'states': statesWorkers ,'matrix': markov_matrixWorkers, 'lifeWay': controlBehaviourWorkers}
occupancy_json.append(WorkersOccupants)
def returnMatrix(agent, time):
new_matrix = False
behaviour = agent.behaviour
if agent.type == 'workers':
if time < behaviour['arriveTime']:
new_matrix = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
elif behaviour['lunchTime'] >= time >= behaviour['arriveTime']:
new_matrix = [[55, 35, 0, 0], [0, 50, 50, 0], [0, 100, 0, 0], [0, 0, 0, 0, 0]]
elif behaviour['backLunchTime'] >= time >= behaviour['lunchTime']:
new_matrix = [[0, 0, 0, 0], [0, 70, 0, 30], [0, 100, 0, 0], [0, 0, 0, 0]]
elif behaviour['leaveWorkTime'] >= time >= behaviour['backLunchTime']:
new_matrix = [[0, 0, 0, 0], [0, 50, 50, 0], [0, 100, 0, 0], [0, 100, 0, 0]]
elif time >= behaviour['leaveWorkTime']:
new_matrix = [[100, 0, 0, 0], [70, 30, 0, 0], [0, 100, 0, 0], [0, 0, 0, 0]]
return new_matrix
else:
return new_matrix
def getTimeInState(agent, time): #Hours.Minutes
timeActivity_matrix = False
behaviour = agent.behaviour
if agent.type == 'workers':
if time < behaviour['arriveTime']:
timeActivity_matrix = [8.0, 0, 0, 0]
elif behaviour['lunchTime'] >= time >= behaviour['arriveTime']:
timeActivity_matrix = [0.30, 1.00, 0.30, 0]
elif behaviour['backLunchTime'] >= time >= behaviour['lunchTime']:
timeActivity_matrix = [0, 0.05, 0, 1.0]
elif behaviour['leaveWorkTime'] >= time >= behaviour['backLunchTime']:
timeActivity_matrix = [0, 1.00, 0.30, 0.1]
elif time >= behaviour['leaveWorkTime']:
timeActivity_matrix = [5, 0.30, 0.10, 0]
return timeActivity_matrix
else:
return timeActivity_matrix | 37.25 | 181 | 0.630083 |
fd9f3a5b3230bea5967150f17199088ace78f4d8 | 23,898 | py | Python | django/http/multipartparser.py | sublime1809/django | 9a5fe5b29fd431431a53da63ad8825d878ee5878 | [
"BSD-3-Clause"
] | 1 | 2019-01-31T17:16:56.000Z | 2019-01-31T17:16:56.000Z | django/http/multipartparser.py | rmutter/django | 5d044339037be879a11b03fe8bd8c3ef1d520b1a | [
"BSD-3-Clause"
] | null | null | null | django/http/multipartparser.py | rmutter/django | 5d044339037be879a11b03fe8bd8c3ef1d520b1a | [
"BSD-3-Clause"
] | null | null | null | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always read base64 streams by multiple of 4
over_bytes = len(chunk) % 4
if over_bytes:
over_chunk = field_stream.read(4 - over_bytes)
chunk += over_chunk
try:
chunk = base64.b64decode(chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| 35.721973 | 115 | 0.56557 |
859b2626c3a9beb21595997f25dad0c091677ebe | 2,219 | py | Python | losses.py | zitorelova/airbus-ship-detection | 137270222dd7074bc6dcb8bd35e5011ac286c9bd | [
"MIT"
] | 2 | 2020-03-31T14:18:18.000Z | 2021-02-12T19:01:24.000Z | losses.py | zitorelova/airbus-ship-detection | 137270222dd7074bc6dcb8bd35e5011ac286c9bd | [
"MIT"
] | null | null | null | losses.py | zitorelova/airbus-ship-detection | 137270222dd7074bc6dcb8bd35e5011ac286c9bd | [
"MIT"
] | null | null | null | from inclusion import *
def dice_loss(input, target):
input = torch.sigmoid(input)
smooth = 1.0
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return ((2.0 * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth))
class FocalLoss(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + \
((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
class MixedLoss(nn.Module):
def __init__(self, alpha, gamma):
super().__init__()
self.alpha = alpha
self.focal = FocalLoss(gamma)
def forward(self, input, target):
loss = self.alpha*self.focal(input, target) - torch.log(dice_loss(input, target))
return loss.mean()
def dice(pred, targs):
pred = (pred>0).float()
return 2.0 * (pred*targs).sum() / ((pred+targs).sum() + 1.0)
def IoU(pred, targs):
pred = (pred>0).float()
intersection = (pred*targs).sum()
return intersection / ((pred+targs).sum() - intersection + 1.0)
class LossBinary:
def __init__(self, jaccard_weight=0):
self.nll_loss = nn.BCEWithLogitsLoss()
self.jaccard_weight = jaccard_weight
def __call__(self, outputs, targets):
loss = self.nll_loss(outputs, targets)
if self.jaccard_weight:
eps = 1e-15
jaccard_target = (targets == 1.0).float()
jaccard_output = torch.sigmoid(outputs)
intersection = (jaccard_output * jaccard_target).sum()
union = jaccard_output.sum() + jaccard_target.sum()
loss -= self.jaccard_weight * torch.log((intersection + eps) / (union - intersection + eps))
return loss
| 32.15942 | 104 | 0.581343 |
c1f774ff05a23eb51557df160386264f4c68cd57 | 3,482 | py | Python | test/test_lookup_rotation.py | Nick-Singstock/qiskit-aqua | 8c2bc57b78dec447faec3adbc966471a3206c2ef | [
"Apache-2.0"
] | 1 | 2020-11-06T01:09:28.000Z | 2020-11-06T01:09:28.000Z | test/test_lookup_rotation.py | Nick-Singstock/qiskit-aqua | 8c2bc57b78dec447faec3adbc966471a3206c2ef | [
"Apache-2.0"
] | null | null | null | test/test_lookup_rotation.py | Nick-Singstock/qiskit-aqua | 8c2bc57b78dec447faec3adbc966471a3206c2ef | [
"Apache-2.0"
] | 1 | 2020-11-06T01:09:43.000Z | 2020-11-06T01:09:43.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import unittest
from parameterized import parameterized
from qiskit import QuantumRegister, QuantumCircuit
from test.common import QiskitAquaTestCase
from qiskit.aqua.components.reciprocals.lookup_rotation import LookupRotation
from qiskit import execute
from qiskit import BasicAer
import numpy as np
from qiskit.quantum_info import state_fidelity, basis_state
class TestLookupRotation(QiskitAquaTestCase):
"""Lookup Rotation tests."""
#def setUp(self):
@parameterized.expand([[3, 1/2], [5, 1/4], [7, 1/8], [9, 1/16], [11, 1/32]])
def test_lookup_rotation(self, reg_size, ref_rot):
self.log.debug('Testing Lookup Rotation with positive eigenvalues')
ref_sv_ampl = ref_rot**2
ref_size = reg_size + 3 # add work, msq and anc qubits
ref_dim = 2**ref_size
ref_sv = np.zeros(ref_dim, dtype=complex)
ref_sv[int(ref_dim/2)+1] = ref_sv_ampl+0j
ref_sv[1] = np.sqrt(1-ref_sv_ampl**2)+0j
state = basis_state('1', reg_size)
a = QuantumRegister(reg_size, name='a')
init_circuit = QuantumCircuit(a)
init_circuit.initialize(state, a)
lrot = LookupRotation(negative_evals=False)
lrot_circuit = init_circuit + lrot.construct_circuit('', a)
lrot_sv = sim_statevec(lrot_circuit)
fidelity = state_fidelity(lrot_sv, ref_sv)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('Lookup rotation register size: {}'.format(reg_size))
self.log.debug('Lookup rotation fidelity: {}'.format(fidelity))
@parameterized.expand([[3, 0], [5, 1/4], [7, 1/8], [9, 1/16], [11, 1/32]])
def test_lookup_rotation_neg(self, reg_size, ref_rot):
self.log.debug('Testing Lookup Rotation with support for negative '
'eigenvalues')
ref_sv_ampl = ref_rot**2
ref_size = reg_size + 3 # add work, msq and anc qubits
ref_dim = 2**ref_size
ref_sv = np.zeros(ref_dim, dtype=complex)
ref_sv[int(ref_dim/2)+1] = -ref_sv_ampl+0j
ref_sv[1] = -np.sqrt(1-ref_sv_ampl**2)+0j
state = basis_state('1', reg_size)
a = QuantumRegister(reg_size, name='a')
init_circuit = QuantumCircuit(a)
init_circuit.initialize(state, a)
lrot = LookupRotation(negative_evals=True)
lrot_circuit = init_circuit + lrot.construct_circuit('', a)
lrot_sv = sim_statevec(lrot_circuit)
fidelity = state_fidelity(lrot_sv, ref_sv)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('Lookup rotation register size: {}'.format(reg_size))
self.log.debug('Lookup rotation fidelity: {}'.format(fidelity))
def sim_statevec(qc):
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc, backend)
result = job.result()
state_vec = result.get_statevector(qc)
return state_vec
if __name__ == '__main__':
unittest.main()
| 39.123596 | 80 | 0.682079 |
7ea4220542cb45fc1cd4d49700549e3d3955ee46 | 992 | py | Python | sitch/sitchlib/ocid_csv.py | codecuisine/feed_builder | d63f543bdb306a5e25ceaa346e3ba465e735731c | [
"Apache-2.0"
] | null | null | null | sitch/sitchlib/ocid_csv.py | codecuisine/feed_builder | d63f543bdb306a5e25ceaa346e3ba465e735731c | [
"Apache-2.0"
] | null | null | null | sitch/sitchlib/ocid_csv.py | codecuisine/feed_builder | d63f543bdb306a5e25ceaa346e3ba465e735731c | [
"Apache-2.0"
] | null | null | null | import csv
import gzip
class OcidCsv(object):
""" This wraps the OpenCellID CSV dataset. """
def __init__(self, data_bundle):
self.data_bundle = data_bundle
def __iter__(self):
with gzip.open(self.data_bundle, 'r') as bolus:
consumer = csv.DictReader(bolus)
for row in consumer:
yield row
def get_mcc_list(self):
mcc_list = []
with gzip.open(self.data_bundle, 'r') as bolus:
consumer = csv.DictReader(bolus)
for row in consumer:
if row["mcc"] not in mcc_list:
mcc_list.append(row["mcc"])
return mcc_list
def get_all_for_mcc(self, radio, mcc):
results = []
with gzip.open(self.data_bundle, 'r') as bolus:
consumer = csv.DictReader(bolus)
for row in consumer:
if (row["mcc"] == mcc and row["radio"] == radio):
results.append(row)
return results
| 30.060606 | 65 | 0.554435 |
2e24be13ed8e64362503ed23131b638f1cfa7d1b | 3,978 | py | Python | vigranumpy/examples/grid_graph_shortestpath.py | ThomasWalter/vigra | e92c892aae38c3977dc3f6400f46377b0cb61799 | [
"MIT"
] | null | null | null | vigranumpy/examples/grid_graph_shortestpath.py | ThomasWalter/vigra | e92c892aae38c3977dc3f6400f46377b0cb61799 | [
"MIT"
] | null | null | null | vigranumpy/examples/grid_graph_shortestpath.py | ThomasWalter/vigra | e92c892aae38c3977dc3f6400f46377b0cb61799 | [
"MIT"
] | null | null | null | import vigra
import vigra.graphs as vigraph
import pylab
import numpy
np=numpy
import sys
import matplotlib
import pylab as plt
import math
from matplotlib.widgets import Slider, Button, RadioButtons
def makeWeights(gamma):
global hessian,gradmag,gridGraph
print "hessian",hessian.min(),hessian.max()
print "raw ",raw.min(),raw.max()
wImg= numpy.exp((gradmag**0.5)*gamma*-1.0)#**0.5
wImg = numpy.array(wImg).astype(numpy.float32)
w=vigra.graphs.implicitMeanEdgeMap(gridGraph,wImg)
return w
def makeVisuImage(path,img):
coords = (path[:,0],path[:,1])
visuimg =img.copy()
iR=visuimg[:,:,0]
iG=visuimg[:,:,1]
iB=visuimg[:,:,2]
iR[coords]=255
iG[coords]=0
iB[coords]=0
visuimg-=visuimg.min()
visuimg/=visuimg.max()
return visuimg
f = '100075.jpg'
f = '69015.jpg'
#f = "/media/tbeier/GSP1RMCPRFR/iso.03530.png"
img = vigra.impex.readImage(f)
print img.shape
if(img.shape[2]==1):
img = numpy.concatenate([img]*3,axis=2)
imgLab = img
imgLab = vigra.taggedView(imgLab,'xyc')
else:
imgLab = vigra.colors.transform_RGB2Lab(img)
sigma = 1.0
imgLab-=imgLab.min()
imgLab/=imgLab.max()
imgLab*=255
img-=img.min()
img/=img.max()
img*=255
print imgLab.shape
print "interpolate image"
imgLabSmall = imgLab
# make a few edge weights
gradmag = numpy.squeeze(vigra.filters.gaussianGradientMagnitude(imgLabSmall,sigma))
hessian = numpy.squeeze(vigra.filters.hessianOfGaussianEigenvalues(imgLabSmall[:,:,0],sigma))[:,:,0]
hessian-=hessian.min()
raw = 256-imgLabSmall[:,:,0].copy()
gridGraph = vigraph.gridGraph(imgLab.shape[:2],False)
weights = makeWeights(3.0)
pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph)
visuimg =img.copy()
ax = plt.gca()
fig = plt.gcf()
visuimg-=visuimg.min()
visuimg/=visuimg.max()
implot = ax.imshow(numpy.swapaxes(visuimg,0,1),cmap='gray')
clickList=[]
frozen = False
axslider = plt.axes([0.0, 0.00, 0.4, 0.075])
axfreeze = plt.axes([0.6, 0.00, 0.1, 0.075])
axunfreeze = plt.axes([0.8, 0.00, 0.1, 0.075])
bfreeze = Button(axfreeze, 'freeze')
bunfreeze = Button(axunfreeze, 'unfrease and clear')
sgamma = Slider(axslider, 'gamma', 0.01, 5.0, valinit=1.0)
def onclick(event):
global clickList
global weights
global img
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if not frozen and xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
clickList.append((x,y))
if len(clickList)==2:
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
weights = makeWeights(sgamma.val)
#path = pathFinder.run(weights, source,target).path(pathType='coordinates')
path = pathFinder.run(weights, source).path(pathType='coordinates',target=target)
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
def freeze(event):
global frozen
frozen=True
def unfreeze(event):
global frozen,clickList
frozen=False
clickList = []
def onslide(event):
global img,gradmag,weights,clickList,sgamma
weights = makeWeights(sgamma.val)
print "onslide",clickList
if len(clickList)>=2:
print "we have path"
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
path = pathFinder.run(weights, source,target).path(pathType='coordinates')
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
bfreeze.on_clicked(freeze)
bunfreeze.on_clicked(unfreeze)
sgamma.on_changed(onslide)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
| 26.344371 | 100 | 0.661639 |
c009465cb17fa9fc2d5aa18f09754b3b7e6d58a3 | 1,747 | py | Python | sympy/solvers/tests/test_numeric.py | goodok/sympy | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | [
"BSD-3-Clause"
] | 2 | 2015-05-11T12:26:38.000Z | 2016-08-19T00:11:03.000Z | sympy/solvers/tests/test_numeric.py | goodok/sympy | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/tests/test_numeric.py | goodok/sympy | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | [
"BSD-3-Clause"
] | null | null | null | from sympy import Eq, Matrix, pi, sin, sqrt, Symbol
from sympy.mpmath import mnorm, mpf
from sympy.solvers import nsolve
from sympy.utilities.lambdify import lambdify
from sympy.utilities.pytest import raises
def test_nsolve():
# onedimensional
x = Symbol('x')
assert nsolve(sin(x), 2) - pi.evalf() < 1e-15
assert nsolve(Eq(2*x, 2), x, -10) == nsolve(2*x - 2, -10)
# Testing checks on number of inputs
raises(TypeError, "nsolve(Eq(2*x,2))")
raises(TypeError, "nsolve(Eq(2*x,2),x,1,2)")
# Issue 1730
assert nsolve(x**2/(1-x)/(1-2*x)**2-100, x, 0) # doesn't fail
# multidimensional
x1 = Symbol('x1')
x2 = Symbol('x2')
f1 = 3 * x1**2 - 2 * x2**2 - 1
f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
f = Matrix((f1, f2)).T
F = lambdify((x1, x2), f.T, modules='mpmath')
for x0 in [(-1, 1), (1, -2), (4, 4), (-4, -4)]:
x = nsolve(f, (x1, x2), x0, tol=1.e-8)
assert mnorm(F(*x),1) <= 1.e-10
# The Chinese mathematician Zhu Shijie was the very first to solve this
# nonlinear system 700 years ago (z was added to make it 3-dimensional)
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f1 = -x + 2*y
f2 = (x**2 + x*(y**2 - 2) - 4*y) / (x + 4)
f3 = sqrt(x**2 + y**2)*z
f = Matrix((f1, f2, f3)).T
F = lambdify((x, y, z), f.T, modules='mpmath')
def getroot(x0):
root = nsolve(f, (x, y, z), x0)
assert mnorm(F(*root),1) <= 1.e-8
return root
assert map(round, getroot((1, 1, 1))) == [2.0, 1.0, 0.0]
assert nsolve([Eq(f1), Eq(f2), Eq(f3)], [x, y, z], (1, 1, 1)) # just see that it works
a = Symbol('a')
assert nsolve(1/(0.001 + a)**3 - 6/(0.9 - a)**3, a, 0.3).ae(
mpf('0.31883011387318591'))
| 37.978261 | 90 | 0.543217 |
6d870364dcb64107caff92ceffbaef6264244e63 | 2,352 | py | Python | homeassistant/components/device_tracker/bbox.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 37 | 2018-05-22T07:17:26.000Z | 2022-03-03T13:14:46.000Z | homeassistant/components/device_tracker/bbox.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 125 | 2018-12-11T07:31:20.000Z | 2021-07-27T08:20:03.000Z | homeassistant/components/device_tracker/bbox.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 8 | 2018-05-30T20:05:26.000Z | 2021-02-19T14:17:05.000Z | """
Support for French FAI Bouygues Bbox routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.bbox/
"""
from collections import namedtuple
import logging
from datetime import timedelta
import homeassistant.util.dt as dt_util
from homeassistant.components.device_tracker import DOMAIN, DeviceScanner
from homeassistant.util import Throttle
REQUIREMENTS = ['pybbox==0.0.5-alpha']
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple('Device', ['mac', 'name', 'ip', 'last_update'])
class BboxDeviceScanner(DeviceScanner):
"""This class scans for devices connected to the bbox."""
def __init__(self, config):
"""Initialize the scanner."""
self.last_results = [] # type: List[Device]
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [result.name for result in self.last_results if
result.mac == device]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Check the Bbox for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning...")
import pybbox
box = pybbox.Bbox()
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device['active'] != 1:
continue
last_results.append(
Device(device['macaddress'], device['hostname'],
device['ipaddress'], now))
self.last_results = last_results
_LOGGER.info("Scan successful")
return True
| 28.337349 | 75 | 0.655612 |
e97258a9c3cf3c6a3199ed6280c6a1ddfab6ee1a | 308 | py | Python | my-project/app/main.py | athiranair2000/Log-Me | eb6cce52116beb20955a5f6d19eb2d510f663c8a | [
"MIT"
] | null | null | null | my-project/app/main.py | athiranair2000/Log-Me | eb6cce52116beb20955a5f6d19eb2d510f663c8a | [
"MIT"
] | 1 | 2019-10-07T13:21:40.000Z | 2019-10-07T13:42:56.000Z | my-project/app/main.py | athiranair2000/Log-Me | eb6cce52116beb20955a5f6d19eb2d510f663c8a | [
"MIT"
] | null | null | null | from flask import Blueprint
from . import db
main=Blueprint('main',__name__)
@main.route('/')
def index():
return render_template('index.html')
@auth.route('/login')
def index():
return render_template('login.html')
@auth.route('/signup')
def signup():
return render_template('SignUp.html')
| 17.111111 | 41 | 0.698052 |
63a7134e0ebbcc28b1cd1dcef523ea47918d7ff5 | 16,917 | py | Python | git/cmd.py | bu-ist/GitPython | 2fc864356ef1c4a9112dcefbae02a606df59840c | [
"BSD-3-Clause"
] | 1 | 2017-03-03T05:42:29.000Z | 2017-03-03T05:42:29.000Z | git/cmd.py | bu-ist/GitPython | 2fc864356ef1c4a9112dcefbae02a606df59840c | [
"BSD-3-Clause"
] | null | null | null | git/cmd.py | bu-ist/GitPython | 2fc864356ef1c4a9112dcefbae02a606df59840c | [
"BSD-3-Clause"
] | null | null | null | # cmd.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os, sys
from util import (
LazyMixin,
stream_copy
)
from exc import GitCommandError
from subprocess import (
call,
Popen,
PIPE
)
execute_kwargs = ('istream', 'with_keep_cwd', 'with_extended_output',
'with_exceptions', 'as_process',
'output_stream' )
__all__ = ('Git', )
def dashify(string):
return string.replace('_', '-')
class Git(LazyMixin):
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
__slots__ = ("_working_dir", "cat_file_all", "cat_file_header", "_version_info")
# CONFIGURATION
# The size in bytes read from stdout when copying git's output to another stream
max_chunk_size = 1024*64
# Enables debugging of GitPython's git commands
GIT_PYTHON_TRACE = os.environ.get("GIT_PYTHON_TRACE", False)
# Provide the full path to the git executable. Otherwise it assumes git is in the path
GIT_PYTHON_GIT_EXECUTABLE = os.environ.get("GIT_PYTHON_GIT_EXECUTABLE", 'git')
class AutoInterrupt(object):
"""Kill/Interrupt the stored process instance once this instance goes out of scope. It is
used to prevent processes piling up in case iterators stop reading.
Besides all attributes are wired through to the contained process object.
The wait method was overridden to perform automatic status code checking
and possibly raise."""
__slots__= ("proc", "args")
def __init__(self, proc, args ):
self.proc = proc
self.args = args
def __del__(self):
# did the process finish already so we have a return code ?
if self.proc.poll() is not None:
return
# can be that nothing really exists anymore ...
if os is None:
return
# try to kill it
try:
os.kill(self.proc.pid, 2) # interrupt signal
except AttributeError:
# try windows
# for some reason, providing None for stdout/stderr still prints something. This is why
# we simply use the shell and redirect to nul. Its slower than CreateProcess, question
# is whether we really want to see all these messages. Its annoying no matter what.
call(("TASKKILL /F /T /PID %s 2>nul 1>nul" % str(self.proc.pid)), shell=True)
# END exception handling
def __getattr__(self, attr):
return getattr(self.proc, attr)
def wait(self):
"""Wait for the process and return its status code.
:raise GitCommandError: if the return status is not 0"""
status = self.proc.wait()
if status != 0:
raise GitCommandError(self.args, status, self.proc.stderr.read())
# END status handling
return status
# END auto interrupt
class CatFileContentStream(object):
"""Object representing a sized read-only stream returning the contents of
an object.
It behaves like a stream, but counts the data read and simulates an empty
stream once our sized content region is empty.
If not all data is read to the end of the objects's lifetime, we read the
rest to assure the underlying stream continues to work"""
__slots__ = ('_stream', '_nbr', '_size')
def __init__(self, size, stream):
self._stream = stream
self._size = size
self._nbr = 0 # num bytes read
# special case: if the object is empty, has null bytes, get the
# final newline right away.
if size == 0:
stream.read(1)
# END handle empty streams
def read(self, size=-1):
bytes_left = self._size - self._nbr
if bytes_left == 0:
return ''
if size > -1:
# assure we don't try to read past our limit
size = min(bytes_left, size)
else:
# they try to read all, make sure its not more than what remains
size = bytes_left
# END check early depletion
data = self._stream.read(size)
self._nbr += len(data)
# check for depletion, read our final byte to make the stream usable by others
if self._size - self._nbr == 0:
self._stream.read(1) # final newline
# END finish reading
return data
def readline(self, size=-1):
if self._nbr == self._size:
return ''
# clamp size to lowest allowed value
bytes_left = self._size - self._nbr
if size > -1:
size = min(bytes_left, size)
else:
size = bytes_left
# END handle size
data = self._stream.readline(size)
self._nbr += len(data)
# handle final byte
if self._size - self._nbr == 0:
self._stream.read(1)
# END finish reading
return data
def readlines(self, size=-1):
if self._nbr == self._size:
return list()
# leave all additional logic to our readline method, we just check the size
out = list()
nbr = 0
while True:
line = self.readline()
if not line:
break
out.append(line)
if size > -1:
nbr += len(line)
if nbr > size:
break
# END handle size constraint
# END readline loop
return out
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def __del__(self):
bytes_left = self._size - self._nbr
if bytes_left:
# read and discard - seeking is impossible within a stream
# includes terminating newline
self._stream.read(bytes_left + 1)
# END handle incomplete read
def __init__(self, working_dir=None):
"""Initialize this instance with:
:param working_dir:
Git directory we should work in. If None, we always work in the current
directory as returned by os.getcwd().
It is meant to be the working tree directory if available, or the
.git directory in case of bare repositories."""
super(Git, self).__init__()
self._working_dir = working_dir
# cached command slots
self.cat_file_header = None
self.cat_file_all = None
def __getattr__(self, name):
"""A convenience method as it allows to call the command as if it was
an object.
:return: Callable object that will execute call _call_process with your arguments."""
if name[0] == '_':
return LazyMixin.__getattr__(self, name)
return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)
def _set_cache_(self, attr):
if attr == '_version_info':
# We only use the first 4 numbers, as everthing else could be strings in fact (on windows)
version_numbers = self._call_process('version').split(' ')[2]
self._version_info = tuple(int(n) for n in version_numbers.split('.')[:4])
else:
super(Git, self)._set_cache_(attr)
#END handle version info
@property
def working_dir(self):
""":return: Git directory we are working on"""
return self._working_dir
@property
def version_info(self):
"""
:return: tuple(int, int, int, int) tuple with integers representing the major, minor
and additional version numbers as parsed from git version.
This value is generated on demand and is cached"""
return self._version_info
def execute(self, command,
istream=None,
with_keep_cwd=False,
with_extended_output=False,
with_exceptions=True,
as_process=False,
output_stream=None,
**subprocess_kwargs
):
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_keep_cwd:
Whether to use the current working directory from os.getcwd().
The cmd otherwise uses its own working_dir that it has been initialized
with if possible.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if ouput_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
if self.GIT_PYTHON_TRACE and not self.GIT_PYTHON_TRACE == 'full':
print ' '.join(command)
# Allow the user to have the command executed in their working dir.
if with_keep_cwd or self._working_dir is None:
cwd = os.getcwd()
else:
cwd=self._working_dir
# Start the process
proc = Popen(command,
cwd=cwd,
stdin=istream,
stderr=PIPE,
stdout=PIPE,
close_fds=(os.name=='posix'),# unsupported on linux
**subprocess_kwargs
)
if as_process:
return self.AutoInterrupt(proc, command)
# Wait for the process to return
status = 0
stdout_value = ''
stderr_value = ''
try:
if output_stream is None:
stdout_value, stderr_value = proc.communicate()
# strip trailing "\n"
if stdout_value.endswith("\n"):
stdout_value = stdout_value[:-1]
if stderr_value.endswith("\n"):
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
stream_copy(proc.stdout, output_stream, self.max_chunk_size)
stdout_value = output_stream
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith("\n"):
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == 'full':
cmdstr = " ".join(command)
if stderr_value:
print "%s -> %d; stdout: '%s'; stderr: '%s'" % (cmdstr, status, stdout_value, stderr_value)
elif stdout_value:
print "%s -> %d; stdout: '%s'" % (cmdstr, status, stdout_value)
else:
print "%s -> %d" % (cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
raise GitCommandError(command, status, stderr_value)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, stderr_value)
else:
return stdout_value
def transform_kwargs(self, **kwargs):
"""Transforms Python style kwargs into git command line options."""
args = list()
for k, v in kwargs.items():
if len(k) == 1:
if v is True:
args.append("-%s" % k)
elif type(v) is not bool:
args.append("-%s%s" % (k, v))
else:
if v is True:
args.append("--%s" % dashify(k))
elif type(v) is not bool:
args.append("--%s=%s" % (dashify(k), v))
return args
@classmethod
def __unpack_args(cls, arg_list):
if not isinstance(arg_list, (list,tuple)):
return [ str(arg_list) ]
outlist = list()
for arg in arg_list:
if isinstance(arg_list, (list, tuple)):
outlist.extend(cls.__unpack_args( arg ))
# END recursion
else:
outlist.append(str(arg))
# END for each arg
return outlist
def _call_process(self, method, *args, **kwargs):
"""Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
is a dict of keyword arguments.
This function accepts the same optional keyword arguments
as execute().
``Examples``::
git.rev_list('master', max_count=10, header=True)
:return: Same as ``execute``"""
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
_kwargs = dict()
for kwarg in execute_kwargs:
try:
_kwargs[kwarg] = kwargs.pop(kwarg)
except KeyError:
pass
# Prepare the argument list
opt_args = self.transform_kwargs(**kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
args = opt_args + ext_args
call = [self.GIT_PYTHON_GIT_EXECUTABLE, dashify(method)]
call.extend(args)
return self.execute(call, **_kwargs)
def _parse_object_header(self, header_line):
"""
:param header_line:
<hex_sha> type_string size_as_int
:return: (hex_sha, type_string, size_as_int)
:raise ValueError: if the header contains indication for an error due to
incorrect input sha"""
tokens = header_line.split()
if len(tokens) != 3:
if not tokens:
raise ValueError("SHA could not be resolved, git returned: %r" % (header_line.strip()))
else:
raise ValueError("SHA %s could not be resolved, git returned: %r" % (tokens[0], header_line.strip()))
# END handle actual return value
# END error handling
if len(tokens[0]) != 40:
raise ValueError("Failed to parse header: %r" % header_line)
return (tokens[0], tokens[1], int(tokens[2]))
def __prepare_ref(self, ref):
# required for command to separate refs on stdin
refstr = str(ref) # could be ref-object
if refstr.endswith("\n"):
return refstr
return refstr + "\n"
def __get_persistent_cmd(self, attr_name, cmd_name, *args,**kwargs):
cur_val = getattr(self, attr_name)
if cur_val is not None:
return cur_val
options = { "istream" : PIPE, "as_process" : True }
options.update( kwargs )
cmd = self._call_process( cmd_name, *args, **options )
setattr(self, attr_name, cmd )
return cmd
def __get_object_header(self, cmd, ref):
cmd.stdin.write(self.__prepare_ref(ref))
cmd.stdin.flush()
return self._parse_object_header(cmd.stdout.readline())
def get_object_header(self, ref):
""" Use this method to quickly examine the type and size of the object behind
the given ref.
:note: The method will only suffer from the costs of command invocation
once and reuses the command in subsequent calls.
:return: (hexsha, type_string, size_as_int)"""
cmd = self.__get_persistent_cmd("cat_file_header", "cat_file", batch_check=True)
return self.__get_object_header(cmd, ref)
def get_object_data(self, ref):
""" As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del(stream)
return (hexsha, typename, size, data)
def stream_object_data(self, ref):
"""As get_object_header, but returns the data as a stream
:return: (hexsha, type_string, size_as_int, stream)
:note: This method is not threadsafe, you need one independent Command instance
per thread to be safe !"""
cmd = self.__get_persistent_cmd("cat_file_all", "cat_file", batch=True)
hexsha, typename, size = self.__get_object_header(cmd, ref)
return (hexsha, typename, size, self.CatFileContentStream(size, cmd.stdout))
def clear_cache(self):
"""Clear all kinds of internal caches to release resources.
Currently persistent commands will be interrupted.
:return: self"""
self.cat_file_all = None
self.cat_file_header = None
return self
| 31.212177 | 105 | 0.693385 |
aa981dfa79733c3174c0ae5a76aab6557b73cc33 | 3,053 | py | Python | src/explainer/metrics.py | saromanov/explainer | aaa5eeb0316802779891119ed80e946b1b1b39a6 | [
"MIT"
] | null | null | null | src/explainer/metrics.py | saromanov/explainer | aaa5eeb0316802779891119ed80e946b1b1b39a6 | [
"MIT"
] | null | null | null | src/explainer/metrics.py | saromanov/explainer | aaa5eeb0316802779891119ed80e946b1b1b39a6 | [
"MIT"
] | null | null | null | from typing import List, Dict
import pandas as pd
from parse import Analyzer
from task import Task
from serializer import Serializer
RawMetrics = List[Analyzer]
Tasks = List[Task]
class Metrics:
''' getting analyzer objects
for cobstruct metrics output
'''
def __init__(self, raw_metrics:RawMetrics):
self._dframes, self._data = self._to_data_frame_and_dict(raw_metrics)
def _to_data_frame_and_dict(self, data:RawMetrics):
''' converting of list of raw metrics
to pandas data frame and dictionary
'''
frames = []
data_resp = {}
for tasks in data:
result = {}
name = 'default'
for t in tasks:
names = t.report_names()
report = t.report()
name = t.title_name()
for r in names:
if r not in result:
result[r] = [report[r]]
else:
result[r].append(report[r])
df = pd.DataFrame(result)
df.name = name
frames.append(df)
data_resp[name] = df
return frames, data_resp
def __getitem__(self, name) -> pd.DataFrame:
return self._data[name]
def __str__(self) -> str:
return 'Number of tasks: {0}'.format(len(self._data))
def stat(self, name) -> Dict[str, float]:
'''
return of the basic statistics about execution
'''
return {'mean': self.mean(name), 'median': self.median(name)}
def _apply_stat(self, name, func, *args, **kwargs) -> float:
'''
general method for applying stats methods
from pandas data frame
'''
task = kwargs.get('task')
if not task:
raise Exception('task name is not defined')
return getattr(self._data[task][name], func)()
def median(self, name, *args, **kwargs) -> float:
''' return median value from results
'''
return self._apply_stat(name, 'median', *args, **kwargs)
def mean(self, name, *args, **kwargs) -> float:
''' return mean value from results
'''
return self._apply_stat(name, 'mean', *args, **kwargs)
def std(self, name, *args, **kwargs) -> float:
''' return std value from results
'''
return self._apply_stat(name, 'std', *args, **kwargs)
class MetricsStore(Serializer):
def __init__(self, task_name, metric_names, method_names, metric:Metrics, task:Task, /):
self.task_name = task_name
self.metrics = self._set_metrics(metric_names, method_names, metric, task)
def _set_metrics(self, metric_names, method_names, metrics:Metrics, task:Task):
result = {}
for m in metric_names:
for method_name in method_names:
result[f'{method_name}'] = getattr(metrics, method_name)(m, task=task)
return result
def from_csv(path) -> pd.DataFrame:
return pd.read_csv(path) | 32.136842 | 92 | 0.572224 |
6bad296316b09c7766e2b6d8aad090b445ad3925 | 2,433 | py | Python | tests/tests.py | sanketsaurav/django-redis-ratelimit | 4c901704dd59a7a6058c5b27b7490e0dc939c897 | [
"MIT"
] | null | null | null | tests/tests.py | sanketsaurav/django-redis-ratelimit | 4c901704dd59a7a6058c5b27b7490e0dc939c897 | [
"MIT"
] | null | null | null | tests/tests.py | sanketsaurav/django-redis-ratelimit | 4c901704dd59a7a6058c5b27b7490e0dc939c897 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.views import View
from redis_ratelimit import ratelimit
from redis_ratelimit.exceptions import RateLimited
from redis_ratelimit.utils import parse_rate
from redis_ratelimit.decorators import ignore_redis_errors
from redis.exceptions import TimeoutError
factory = RequestFactory()
class RateParsingTests(TestCase):
def test_rate_parsing(self):
tests = (
('100/s', (100, 1)),
('100/10s', (100, 10)),
('100/m', (100, 60)),
('400/10m', (400, 10 * 60)),
('600/h', (600, 60 * 60)),
('800/d', (800, 24 * 60 * 60)),
)
for input, output in tests:
assert output == parse_rate(input)
class DecoratorTests(TestCase):
def test_no_rate(self):
@ratelimit()
def view(request):
return True
req = factory.get('/')
assert view(req)
class RateLimitTests(TestCase):
def test_method_decorator(self):
@ratelimit(rate='5/s')
def view(request):
return True
class DynamicUrlPattern:
urlpatterns = [url(r'', view)]
with override_settings(ROOT_URLCONF=DynamicUrlPattern):
for _ in range(5):
req = factory.get('/')
view(req)
with self.assertRaises(RateLimited):
req = factory.get('/')
view(req)
def test_cbv_decorator(self):
class Cbv(View):
@ratelimit(rate='5/s')
def get(self, request):
return True
class DynamicUrlPattern:
urlpatterns = [url(r'', Cbv.as_view())]
with override_settings(ROOT_URLCONF=DynamicUrlPattern):
for _ in range(5):
req = factory.get('/')
Cbv.as_view()(req)
with self.assertRaises(RateLimited):
req = factory.get('/')
Cbv.as_view()(req)
class IgnoreRedisErrorsTest(TestCase):
def test_invokes_function(self):
@ignore_redis_errors
def fake_rate_limited():
return True
assert fake_rate_limited()
def test_error(self):
@ignore_redis_errors
def fake_rate_limited():
raise TimeoutError
assert fake_rate_limited() == False
| 26.736264 | 63 | 0.58282 |
a9f3d24d64a2bee5e7fc574baf7724b0df8ff5e6 | 1,490 | py | Python | algorithm/association_rules.py | Sirius207/Apriori-Algorithm | d299575c46ae4eee28ee5ccc3d9cadefbd21c8d3 | [
"MIT"
] | 5 | 2019-03-06T02:15:48.000Z | 2021-10-01T20:01:34.000Z | algorithm/association_rules.py | Sirius207/Frequent-pattern-Algorithm | d299575c46ae4eee28ee5ccc3d9cadefbd21c8d3 | [
"MIT"
] | null | null | null | algorithm/association_rules.py | Sirius207/Frequent-pattern-Algorithm | d299575c46ae4eee28ee5ccc3d9cadefbd21c8d3 | [
"MIT"
] | null | null | null | # pattern format: apple-egg-pork
def str_to_set(set_string):
return set(set_string.split('-'))
def find_subset_string(pattern):
subset = find_subset(list(pattern))
# pop origin set
subset.pop()
return [
'-'.join(items)
for items in subset
if len(items)
]
def find_subset(old_set):
if len(old_set) == 0:
return[set]
elif len(old_set) == 1:
return [[]] + [old_set]
else:
rest = find_subset(old_set[1:])
a_list = []
for item in rest:
b_list = [old_set[0]]
b_list += item
a_list.append(b_list)
return rest + a_list
def find_rules(fp_dict, min_confidence):
rules = []
pattern_string_list = fp_dict.keys()
for pattern_string in fp_dict:
pattern_set = str_to_set(pattern_string)
subsets_string = find_subset_string(pattern_set)
for subset_string in subsets_string:
if subset_string in pattern_string_list:
pattern_support = fp_dict[pattern_string]
subset_support = fp_dict[subset_string]
confidence = pattern_support / subset_support
if confidence >= min_confidence:
difference_set = str_to_set(pattern_string) - str_to_set(subset_string)
rule = '{}->{}'.format(subset_string, '-'.join(difference_set))
rules.append([rule,confidence,pattern_support])
return rules
| 27.090909 | 91 | 0.6 |
1090d06af511d0c369e841770251735d919f31a1 | 12,168 | py | Python | cifar10_modelwb_defensewb_table1.py | swadeykgp/SymDNN | c489d3d313fe7b35f5a2747ed9705423f5492725 | [
"MIT"
] | null | null | null | cifar10_modelwb_defensewb_table1.py | swadeykgp/SymDNN | c489d3d313fe7b35f5a2747ed9705423f5492725 | [
"MIT"
] | null | null | null | cifar10_modelwb_defensewb_table1.py | swadeykgp/SymDNN | c489d3d313fe7b35f5a2747ed9705423f5492725 | [
"MIT"
] | null | null | null | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import json
import os
import sys
import time
import warnings
warnings.filterwarnings('ignore')
import torchvision.utils
from torchvision import models
import torchattacks
from torchattacks import *
from torchvision import datasets, transforms
import numpy as np
#import matplotlib.pyplot as plt
import random
import faiss
import sys
#sys.path.insert(1, './cifar10')
sys.path.insert(1, './core')
from patchutils_new import symdnn_purify
import math
class BPDAattack(object):
def __init__(self, model=None, defense=None, device=None, epsilon=None, learning_rate=0.5,
max_iterations=100, clip_min=0, clip_max=1):
self.model = model
self.epsilon = epsilon
self.loss_fn = nn.CrossEntropyLoss(reduction='sum')
self.defense = defense
self.clip_min = clip_min
self.clip_max = clip_max
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.device = device
def generate(self, x, y):
"""
Given examples (X_nat, y), returns their adversarial
counterparts with an attack length of epsilon.
"""
adv = x.detach().clone()
adv_purified = x.detach().clone()
lower = np.clip(x.detach().cpu().numpy() - self.epsilon, self.clip_min, self.clip_max)
upper = np.clip(x.detach().cpu().numpy() + self.epsilon, self.clip_min, self.clip_max)
for i in range(self.MAX_ITERATIONS):
#adv_purified = self.defense(adv)
xsym = symdnn_purify(adv, n_clusters, index, centroid_lut, patch_size, stride, channel_count, ana=False, multi=False, instr=False, randomize=True, rlevel=25, rbalance=True, pdf=None)
adv_purified.data = xsym.data
adv_purified = adv.detach()
adv_purified.requires_grad_()
adv_purified.retain_grad()
scores = self.model(adv_purified)
loss = self.loss_fn(scores, y)
loss.backward()
grad_sign = adv_purified.grad.data.sign()
# early stop, only for batch_size = 1
# p = torch.argmax(F.softmax(scores), 1)
# if y != p:
# break
adv += self.LEARNING_RATE * grad_sign
adv_img = np.clip(adv.detach().cpu().numpy(), lower, upper)
adv = torch.Tensor(adv_img).to(self.device)
return adv
np.random.seed(0)
use_cuda=False
device='cpu'
batch_size = 1
#batch_size_vanilla = 64
batch_size_vanilla = 1
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
#CHANGEME - put the dataset location
testset = torchvision.datasets.CIFAR10(root='../../dataset', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
testloader_vanilla = torch.utils.data.DataLoader(testset, batch_size=batch_size_vanilla, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
channel_count = 3
stride = 0
n_clusters = 2048
patch_size = (2, 2)
location=False
from modeldefs_wb import *
# Base model for Cifar-10 (data [0,1])
pretrained_base_clampled_gradinit = './cifar10/cifar10_resnet_gradinit_sc_232.pt'
net_std = resnet20()
net_std.load_state_dict(torch.load(pretrained_base_clampled_gradinit))
net_std.eval()
index = faiss.read_index('./cifar10/kmeans_img_k2_s0_c2048_v1_softclamp.index')
centroid_lut = index.reconstruct_n(0, n_clusters)
# Lets check the kind of prediction the net_std is doing
correct = 0
total = 0
net_std.eval()
# Define a custom function that will clamp the images between 0 & 1 , without being too harsh as torch.clamp
def softclamp01(image_tensor):
image_tensor_shape = image_tensor.shape
image_tensor = image_tensor.view(image_tensor.size(0), -1)
image_tensor -= image_tensor.min(1, keepdim=True)[0]
image_tensor /= image_tensor.max(1, keepdim=True)[0]
image_tensor = image_tensor.view(image_tensor_shape)
return image_tensor
print("PyTorch", torch.__version__)
print("Torchvision", torchvision.__version__)
print("Torchattacks", torchattacks.__version__)
print("Numpy", np.__version__)
bpda_adversary2 = BPDAattack(net_std, None, None, epsilon=2/255, learning_rate=0.5, max_iterations=100)
bpda_adversary4 = BPDAattack(net_std, None, None, epsilon=4/255, learning_rate=0.5, max_iterations=100)
bpda_adversary8 = BPDAattack(net_std, None, None, epsilon=8/255, learning_rate=0.5, max_iterations=100)
bpda_adversary16 = BPDAattack(net_std, None, None, epsilon=16/255, learning_rate=0.5, max_iterations=100)
atks = [
bpda_adversary4.generate,
bpda_adversary8.generate,
bpda_adversary16.generate,
TIFGSM(net_std, eps=8/255, alpha=2/255, steps=100, diversity_prob=0.5),
AutoAttack(net_std, eps=8/255, n_classes=10, version='standard'), # take this at last if time permits
DIFGSM(net_std, eps=8/255, alpha=2/255, steps=100, diversity_prob=0.5, resize_rate=0.9),
MIFGSM(net_std, eps=8/255, alpha=2/255, steps=100, decay=0.1),
RFGSM(net_std, eps=8/255, alpha=2/255, steps=100),
EOTPGD(net_std, eps=8/255, alpha=2/255, steps=100, eot_iter=2),
APGD(net_std, eps=8/255, steps=100, eot_iter=1, n_restarts=1, loss='ce'),
APGD(net_std, eps=8/255, steps=100, eot_iter=1, n_restarts=1, loss='dlr'),
APGDT(net_std, eps=8/255, steps=100, eot_iter=1, n_restarts=1),
Jitter(net_std, eps=8/255, alpha=2/255, steps=40, scale=10, std=0.1, random_start=True),
CW(net_std, c=1, lr=0.01, steps=100, kappa=0),
FAB(net_std, eps=8/255, steps=100, n_classes=10, n_restarts=1, targeted=False),
FAB(net_std, eps=8/255, steps=100, n_classes=10, n_restarts=1, targeted=True),
Square(net_std, eps=8/255, n_queries=5000, n_restarts=1, loss='ce'),
DeepFool(net_std, steps=100),
TIFGSM(net_std, eps=4/255, alpha=2/255, steps=100, diversity_prob=0.5),
AutoAttack(net_std, eps=4/255, n_classes=10, version='standard'), # take this at last if time permits
DIFGSM(net_std, eps=4/255, alpha=2/255, steps=100, diversity_prob=0.5, resize_rate=0.9),
MIFGSM(net_std, eps=4/255, alpha=2/255, steps=100, decay=0.1),
RFGSM(net_std, eps=4/255, alpha=2/255, steps=100),
EOTPGD(net_std, eps=4/255, alpha=2/255, steps=100, eot_iter=2),
APGD(net_std, eps=4/255, steps=100, eot_iter=1, n_restarts=1, loss='dlr'),
APGDT(net_std, eps=4/255, steps=100, eot_iter=1, n_restarts=1),
Jitter(net_std, eps=4/255, alpha=2/255, steps=40, scale=10, std=0.1, random_start=True),
APGD(net_std, eps=4/255, steps=100, eot_iter=1, n_restarts=1, loss='ce')
]
atk_id = 0
#CHANGEME - select the number of examples to use - 10 means 1000 images, set 5 for 2000 images
#random_indices = list(range(0, len(testset), 5))
random_indices = list(range(0, len(testset), 10))
print(len(random_indices))
#test_subset = torch.utils.data.Subset(testset, random_indices)
#sub_indices = list(range(5))
testset_subset = torch.utils.data.Subset(testset, random_indices)
testloader_subset = torch.utils.data.DataLoader(testset_subset, batch_size=1, shuffle=False)
testloader_subset_vanilla = torch.utils.data.DataLoader(testset_subset, batch_size=batch_size_vanilla, shuffle=False)
print("Adversarial Image & Predicted Label for Symbolic inference")
def analyse_internals(atk, atk_id, rlevel1, rlevel2):
print("-"*70)
#print(atk)
atk_name = str(atk).split('(')[0]
print("Attack params:",atk)
#exit()
correct_base_clean = 0
base_clean = 0
correct_base_perturbed = 0
base_perturbed = 0
correct_sym_clean = 0
sym_clean = 0
correct_sym_robust = 0
sym_robust = 0
multisym_robust = 0
randomized_robust = 0
randomized_fixed_robust = 0
total = 0
net_std.eval()
for images, labels in testloader_subset_vanilla:
#for images, labels in testloader_subset:
#for images, labels in testloader:
start = time.time()
#print(" ******++++++++++++++============= Start of Test image ================+++++++++++******")
#print(" ******++++++++++++++============= Start of Test image ================+++++++++++******")
X1 = images
X1 = softclamp01(X1)
y = labels.to(device)
output = net_std.forward(X1)
for idx, i in enumerate(output):
if torch.argmax(i) == y[idx]:
base_clean += 1
#print("classification: Model: clean base gradinit success. Test Image #: {}, Mispredicted label: {}".format(total+1, torch.argmax(i)))
#else:
#print("Misclassification: Model: clean base gradinit. Test Image #: {}, Mispredicted label: {}".format(total+1, torch.argmax(i)))
# Attacked base gradinit inference
X3_in = softclamp01(images)
X3 = atk(X3_in, labels)
output = net_std.forward(X3)
for idx, i in enumerate(output):
if torch.argmax(i) == y[idx]:
base_perturbed += 1
#else:
# Whenever there is an error, print the image
#print("Misclassification: Model: perturbed base gradinit model. Test Image #: {}, Mispredicted label: {}".format(total+1, torch.argmax(i)))
## Attacked symbolic inference
#pfm = X3.data.cpu().numpy().copy()
##print(images.shape)
##print(X3.shape)
#Xsym = symdnn_purify(pfm, n_clusters, index, centroid_lut, patch_size, stride, channel_count)
##print(Xsym.shape)
#output = net_std.forward(Xsym)
#for idx, i in enumerate(output):
# if torch.argmax(i) == y[idx]:
# sym_robust += 1
# #else:
# # Whenever there is an error, print the image
# #print("Misclassification: Model: Perturbed symbolic. Test Image #: {}, Mispredicted label: {}".format(total+1, torch.argmax(i)))
## randomized purification
#pfm = X3.data.cpu().numpy().copy()
#Xsym = symdnn_purify(pfm, n_clusters, index, centroid_lut, patch_size, stride, channel_count,ana=False, multi=False, instr=False, randomize=True, rlevel=rlevel1, rbalance=True, pdf=None)
#output = net_std.forward(Xsym)
#for idx, i in enumerate(output):
# if torch.argmax(i) == y[idx]:
# randomized_robust += 1
# #else:
# # Whenever there is an error, print the image
# #print("Misclassification: Model: Perturbed randomized symbolic. Test Image #: {}, Mispredicted label: {}".format(total+1, torch.argmax(i)))
## randomized fixed purification
#pfm = X3.data.cpu().numpy().copy()
#Xsym = symdnn_purify(pfm, n_clusters, index, centroid_lut, patch_size, stride, channel_count,ana=False, multi=False, instr=False, randomize=True, rlevel=rlevel2, rbalance=False, pdf=None)
#output = net_std.forward(Xsym)
#for idx, i in enumerate(output):
# if torch.argmax(i) == y[idx]:
# randomized_fixed_robust += 1
# #else:
total += batch_size_vanilla
print("WB defense Gradinit model accuracy:{}".format(100 * float(base_clean) / total))
print("WB defense Gradinit model accuracy after attack :{}".format(100 * float(base_perturbed) / total))
##print(" ******++++++++++++++============= End of Test image:{} ================+++++++++++******".format(total))
#print('Attack Name: {}'.format(atk))
#print('Attack prarms: {}'.format(atk))
print('Defense prarms: {},{}'.format(rlevel1,rlevel2))
print("Final WB defense Gradinit model accuracy:{}".format(100 * float(base_clean) / total))
print("Final WB defense Gradinit model accuracy after attack :{}".format(100 * float(base_perturbed) / total))
for aattkk in atks:
analyse_internals(aattkk, atk_id, 25, 25)
atk_id +=1
| 40.832215 | 208 | 0.655243 |
3057751eefd99124f474e8611465daf1a33b24f4 | 5,548 | py | Python | examples/vds3.py | triumphyuan/idapython | 081b988a03b88867786ad4131269db6930637a5b | [
"BSD-3-Clause"
] | 25 | 2016-06-07T15:41:57.000Z | 2021-12-17T11:03:42.000Z | examples/vds3.py | triumphyuan/idapython | 081b988a03b88867786ad4131269db6930637a5b | [
"BSD-3-Clause"
] | 1 | 2018-01-23T05:39:50.000Z | 2018-01-23T05:39:50.000Z | examples/vds3.py | triumphyuan/idapython | 081b988a03b88867786ad4131269db6930637a5b | [
"BSD-3-Clause"
] | 30 | 2016-01-27T22:47:30.000Z | 2022-03-11T19:56:59.000Z | """ Invert the then and else blocks of a cif_t.
Author: EiNSTeiN_ <einstein@g3nius.org>
This is a rewrite in Python of the vds3 example that comes with hexrays sdk.
The main difference with the original C code is that when we create the inverted
condition object, the newly created cexpr_t instance is given to the hexrays and
must not be freed by swig. To achieve this, we have to change the 'thisown' flag
when appropriate. See http://www.swig.org/Doc1.3/Python.html#Python_nn35
"""
import idautils
import idaapi
import idc
NETNODE_NAME = '$ hexrays-inverted-if'
inverter_actname = "vds3:invert"
class invert_action_handler_t(idaapi.action_handler_t):
def __init__(self, inverter):
idaapi.action_handler_t.__init__(self)
self.inverter = inverter
def activate(self, ctx):
vdui = idaapi.get_tform_vdui(ctx.form)
self.inverter.invert_if_event(vdui)
return 1
def update(self, ctx):
vdui = idaapi.get_tform_vdui(ctx.form)
if vdui:
return idaapi.AST_ENABLE_FOR_FORM
else:
return idaapi.AST_DISABLE_FOR_FORM
class hexrays_callback_info(object):
def __init__(self):
self.vu = None
self.node = idaapi.netnode()
if not self.node.create(NETNODE_NAME):
# node exists
self.load()
else:
self.stored = []
return
def load(self):
self.stored = []
try:
data = self.node.getblob(0, 'I')
if data:
self.stored = eval(data)
print 'Invert-if: Loaded %s' % (repr(self.stored), )
except:
print 'Failed to load invert-if locations'
traceback.print_exc()
return
return
def save(self):
try:
self.node.setblob(repr(self.stored), 0, 'I')
except:
print 'Failed to save invert-if locations'
traceback.print_exc()
return
return
def invert_if(self, cfunc, insn):
if insn.opname != 'if':
return False
cif = insn.details
if not cif.ithen or not cif.ielse:
return False
idaapi.qswap(cif.ithen, cif.ielse)
cond = idaapi.cexpr_t(cif.expr)
notcond = idaapi.lnot(cond)
cond.thisown = 0 # the new wrapper 'notcond' now holds the reference to the cexpr_t
cif.expr.swap(notcond)
return True
def add_location(self, ea):
if ea in self.stored:
self.stored.remove(ea)
else:
self.stored.append(ea)
self.save()
return
def find_if_statement(self, vu):
vu.get_current_item(idaapi.USE_KEYBOARD)
item = vu.item
if item.is_citem() and item.it.op == idaapi.cit_if and item.it.to_specific_type.cif.ielse is not None:
return item.it.to_specific_type
if vu.tail.citype == idaapi.VDI_TAIL and vu.tail.loc.itp == idaapi.ITP_ELSE:
# for tail marks, we know only the corresponding ea,
# not the pointer to if-statement
# find it by walking the whole ctree
class if_finder_t(idaapi.ctree_visitor_t):
def __init__(self, ea):
idaapi.ctree_visitor_t.__init__(self, idaapi.CV_FAST | idaapi.CV_INSNS)
self.ea = ea
self.found = None
return
def visit_insn(self, i):
if i.op == idaapi.cit_if and i.ea == self.ea:
self.found = i
return 1 # stop enumeration
return 0
iff = if_finder_t(vu.tail.loc.ea)
if iff.apply_to(vu.cfunc.body, None):
return iff.found
return
def invert_if_event(self, vu):
cfunc = vu.cfunc.__deref__()
i = self.find_if_statement(vu)
if not i:
return False
if self.invert_if(cfunc, i):
vu.refresh_ctext()
self.add_location(i.ea)
return True
def restore(self, cfunc):
class visitor(idaapi.ctree_visitor_t):
def __init__(self, inverter, cfunc):
idaapi.ctree_visitor_t.__init__(self, idaapi.CV_FAST | idaapi.CV_INSNS)
self.inverter = inverter
self.cfunc = cfunc
return
def visit_insn(self, i):
try:
if i.op == idaapi.cit_if and i.ea in self.inverter.stored:
self.inverter.invert_if(self.cfunc, i)
except:
traceback.print_exc()
return 0 # continue enumeration
visitor(self, cfunc).apply_to(cfunc.body, None)
return
def event_callback(self, event, *args):
if event == idaapi.hxe_populating_popup:
form, phandle, vu = args
res = idaapi.attach_action_to_popup(vu.ct, None, inverter_actname)
elif event == idaapi.hxe_maturity:
cfunc, maturity = args
if maturity == idaapi.CMAT_FINAL:
self.restore(cfunc)
return 0
if idaapi.init_hexrays_plugin():
i = hexrays_callback_info()
idaapi.register_action(
idaapi.action_desc_t(
inverter_actname,
"Invert then/else",
invert_action_handler_t(i),
"I"))
idaapi.install_hexrays_callback(i.event_callback)
else:
print 'invert-if: hexrays is not available.'
| 27.60199 | 110 | 0.576424 |
b92643da0b06c09937b7e848c6190c92c44754f2 | 28,477 | py | Python | pynitrokey/start/gnuk_token.py | fayrlight/pynitrokey | c6a93da7a811d34213746b60fab22affb3616a88 | [
"Apache-2.0",
"MIT"
] | 15 | 2020-08-05T14:37:37.000Z | 2022-02-20T13:47:41.000Z | pynitrokey/start/gnuk_token.py | fayrlight/pynitrokey | c6a93da7a811d34213746b60fab22affb3616a88 | [
"Apache-2.0",
"MIT"
] | 153 | 2020-06-22T13:09:41.000Z | 2022-03-31T10:25:14.000Z | pynitrokey/start/gnuk_token.py | fayrlight/pynitrokey | c6a93da7a811d34213746b60fab22affb3616a88 | [
"Apache-2.0",
"MIT"
] | 4 | 2021-04-06T07:08:59.000Z | 2022-02-14T14:26:38.000Z | """
gnuk_token.py - a library for Gnuk Token
Copyright (C) 2011, 2012, 2013, 2015, 2017, 2018
Free Software Initiative of Japan
Author: NIIBE Yutaka <gniibe@fsij.org>
This file is a part of Gnuk, a GnuPG USB Token implementation.
Gnuk is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Gnuk is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from struct import *
import binascii
import usb, time
from array import array
# Possible Gnuk Token products
from pynitrokey.start.usb_strings import get_dict_for_device
USB_PRODUCT_LIST=[
{ 'vendor' : 0x234b, 'product' : 0x0000 }, # FSIJ Gnuk Token
{ 'vendor' : 0x20a0, 'product' : 0x4211 }, # Nitrokey Start
{ 'vendor' : 0x1209, 'product' : 0x2440 }, # GnuPG e.V.
]
USB_PRODUCT_LIST_TUP = [
(0x234b, 0x0000), # FSIJ Gnuk Token
(0x20a0, 0x4211), # Nitrokey Start
(0x1209, 0x2440), # GnuPG e.V.
]
# USB class, subclass, protocol
CCID_CLASS = 0x0B
CCID_SUBCLASS = 0x00
CCID_PROTOCOL_0 = 0x00
HID_CLASS = 0x03
HID_SUBCLASS_NO_BOOT = 0x00
HID_PROTOCOL_0 = 0x00
def icc_compose(msg_type, data_len, slot, seq, param, data):
return pack('<BiBBBH', msg_type, data_len, slot, seq, 0, param) + data
def iso7816_compose(ins, p1, p2, data, cls=0x00, le=None):
data_len = len(data)
if data_len == 0:
if not le:
return pack('>BBBB', cls, ins, p1, p2)
else:
return pack('>BBBBB', cls, ins, p1, p2, le)
else:
if not le:
return pack('>BBBBB', cls, ins, p1, p2, data_len) + data
else:
return pack('>BBBBB', cls, ins, p1, p2, data_len) \
+ data + pack('>B', le)
# This class only supports Gnuk (for now)
class gnuk_token(object):
def __init__(self, device, configuration, interface):
"""
__init__(device, configuration, interface) -> None
Initialize the device.
device: usb.Device object.
configuration: configuration number.
interface: usb.Interface object representing the interface and altenate setting.
"""
if interface.interfaceClass != CCID_CLASS:
raise ValueError("Wrong interface class")
if interface.interfaceSubClass != CCID_SUBCLASS:
raise ValueError("Wrong interface sub class")
self.__devhandle = device.open()
self.__devhandle.claimInterface(interface)
self.__devhandle.setAltInterface(interface)
self.__intf = interface.interfaceNumber
self.__alt = interface.alternateSetting
self.__conf = configuration
self.__hid_intf = None
for intf in configuration.interfaces:
for alt in intf:
if alt.interfaceClass == HID_CLASS and \
alt.interfaceSubClass == HID_SUBCLASS_NO_BOOT and \
alt.interfaceProtocol == HID_PROTOCOL_0:
self.__hid_intf = alt.interfaceNumber
self.__bulkout = 1
self.__bulkin = 0x81
self.__timeout = 10000
self.__seq = 0
self.logger = logging.getLogger('gnuk_token')
def set_logger(self, logger: logging.Logger):
self.logger = logger.getChild('gnuk_token')
def local_print(self, message: str, verbose=False):
self.logger.debug('print: {}'.format(message))
if verbose:
print(message)
def get_string(self, num):
return self.__devhandle.getString(num, 512)
def increment_seq(self):
self.__seq = (self.__seq + 1) & 0xff
def reset_device(self):
try:
self.__devhandle.reset()
except:
pass
def release_gnuk(self):
self.__devhandle.releaseInterface()
def stop_gnuk(self):
self.__devhandle.releaseInterface()
if self.__hid_intf:
self.__devhandle.detachKernelDriver(self.__hid_intf)
self.__devhandle.setConfiguration(0)
return
def mem_info(self):
mem = self.__devhandle.controlMsg(requestType = 0xc0, request = 0,
buffer = 8, value = 0, index = 0,
timeout = 10)
start = ((mem[3]*256 + mem[2])*256 + mem[1])*256 + mem[0]
end = ((mem[7]*256 + mem[6])*256 + mem[5])*256 + mem[4]
return (start, end)
def download(self, start, data, verbose=False, progress_func=None):
addr = start
addr_end = (start + len(data)) & 0xffffff00
i = int((addr - 0x20000000) / 0x100)
j = 0
self.local_print("start %08x" % addr, verbose)
self.local_print("end %08x" % addr_end)
if progress_func:
progress_func(0)
while addr < addr_end:
if progress_func:
progress_func((addr-start)/(addr_end-start))
self.local_print("# %08x: %d : %d" % (addr, i, 256), verbose)
self.__devhandle.controlMsg(requestType = 0x40, request = 1,
buffer = data[j*256:j*256+256],
value = i, index = 0, timeout = 10)
i = i+1
j = j+1
addr = addr + 256
residue = len(data) % 256
if residue != 0:
self.local_print("# %08x: %d : %d" % (addr, i, residue), verbose)
self.__devhandle.controlMsg(requestType = 0x40, request = 1,
buffer = data[j*256:],
value = i, index = 0, timeout = 10)
def execute(self, last_addr):
i = int((last_addr - 0x20000000) / 0x100)
o = (last_addr - 0x20000000) % 0x100
self.__devhandle.controlMsg(requestType = 0x40, request = 2,
buffer = None, value = i, index = o,
timeout = 10)
def icc_get_result(self):
usbmsg = self.__devhandle.bulkRead(self.__bulkin, 1024, self.__timeout)
if len(usbmsg) < 10:
self.local_print(usbmsg, True)
raise ValueError("icc_get_result")
msg = array('B', usbmsg)
msg_type = msg[0]
data_len = msg[1] + (msg[2]<<8) + (msg[3]<<16) + (msg[4]<<24)
slot = msg[5]
seq = msg[6]
status = msg[7]
error = msg[8]
chain = msg[9]
data = msg[10:]
# XXX: check msg_type, data_len, slot, seq, error
return (status, chain, data)
def icc_get_status(self):
msg = icc_compose(0x65, 0, 0, self.__seq, 0, b"")
self.__devhandle.bulkWrite(self.__bulkout, msg, self.__timeout)
self.increment_seq()
status, chain, data = self.icc_get_result()
# XXX: check chain, data
return status
def icc_power_on(self):
msg = icc_compose(0x62, 0, 0, self.__seq, 0, b"")
self.__devhandle.bulkWrite(self.__bulkout, msg, self.__timeout)
self.increment_seq()
status, chain, data = self.icc_get_result()
# XXX: check status, chain
self.atr = data
return self.atr
def icc_power_off(self):
msg = icc_compose(0x63, 0, 0, self.__seq, 0, b"")
self.__devhandle.bulkWrite(self.__bulkout, msg, self.__timeout)
self.increment_seq()
status, chain, data = self.icc_get_result()
# XXX: check chain, data
return status
def icc_send_data_block(self, data):
msg = icc_compose(0x6f, len(data), 0, self.__seq, 0, data)
self.__devhandle.bulkWrite(self.__bulkout, msg, self.__timeout)
self.increment_seq()
return self.icc_get_result()
def icc_send_cmd(self, data):
status, chain, data_rcv = self.icc_send_data_block(data)
if chain == 0:
while status == 0x80:
status, chain, data_rcv = self.icc_get_result()
return data_rcv
elif chain == 1:
d = data_rcv
while True:
msg = icc_compose(0x6f, 0, 0, self.__seq, 0x10, b"")
self.__devhandle.bulkWrite(self.__bulkout, msg, self.__timeout)
self.increment_seq()
status, chain, data_rcv = self.icc_get_result()
# XXX: check status
d += data_rcv
if chain == 2:
break
elif chain == 3:
continue
else:
raise ValueError("icc_send_cmd chain")
return d
else:
raise ValueError("icc_send_cmd")
def cmd_get_response(self, expected_len):
result = array('B')
while True:
cmd_data = iso7816_compose(0xc0, 0x00, 0x00, b'') + pack('>B', expected_len)
response = self.icc_send_cmd(cmd_data)
result += response[:-2]
sw = response[-2:]
if sw[0] == 0x90 and sw[1] == 0x00:
return result
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
else:
expected_len = sw[1]
def cmd_verify(self, who, passwd):
cmd_data = iso7816_compose(0x20, 0x00, 0x80+who, passwd)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return True
def cmd_read_binary(self, fileid):
cmd_data = iso7816_compose(0xb0, 0x80+fileid, 0x00, b'')
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return self.cmd_get_response(sw[1])
def cmd_write_binary(self, fileid, data, is_update):
count = 0
data_len = len(data)
if is_update: # overwrite existing file -> update
ins = 0xd6
else:
ins = 0xd0 # write file, and break if exist already
while count*256 < data_len:
if count == 0:
if len(data) < 128:
cmd_data0 = iso7816_compose(ins, 0x80+fileid, 0x00, data[:128])
cmd_data1 = None
else:
cmd_data0 = iso7816_compose(ins, 0x80+fileid, 0x00, data[:128], 0x10)
cmd_data1 = iso7816_compose(ins, 0x80+fileid, 0x00, data[128:256])
else:
if len(data[256*count:256*count+128]) < 128:
cmd_data0 = iso7816_compose(ins, count, 0x00, data[256*count:256*count+128])
cmd_data1 = None
else:
cmd_data0 = iso7816_compose(ins, count, 0x00, data[256*count:256*count+128], 0x10)
cmd_data1 = iso7816_compose(ins, count, 0x00, data[256*count+128:256*(count+1)])
sw = self.icc_send_cmd(cmd_data0)
if len(sw) != 2:
raise ValueError("cmd_write_binary 0")
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("cmd_write_binary 0", "%02x%02x" % (sw[0], sw[1]))
if cmd_data1:
sw = self.icc_send_cmd(cmd_data1)
if len(sw) != 2:
raise ValueError("cmd_write_binary 1", sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("cmd_write_binary 1", "%02x%02x" % (sw[0], sw[1]))
count += 1
def cmd_select_openpgp(self):
cmd_data = iso7816_compose(0xa4, 0x04, 0x00, b"\xD2\x76\x00\x01\x24\x01")
r = self.icc_send_cmd(cmd_data)
if len(r) < 2:
raise ValueError(r)
sw = r[-2:]
r = r[0:-2]
if sw[0] == 0x61:
self.cmd_get_response(sw[1])
return True
elif sw[0] == 0x90 and sw[1] == 0x00:
return True
else:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
def cmd_get_data(self, tagh, tagl):
cmd_data = iso7816_compose(0xca, tagh, tagl, b"")
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if sw[0] == 0x90 and sw[1] == 0x00:
return array('B')
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return self.cmd_get_response(sw[1])
def cmd_set_identity(self, ident):
cmd_data = iso7816_compose(0x85, 0x00, ident, b"")
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return True
def cmd_change_reference_data(self, who, data):
cmd_data = iso7816_compose(0x24, 0x00, 0x80+who, data)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return True
def cmd_put_data(self, tagh, tagl, content):
cmd_data = iso7816_compose(0xda, tagh, tagl, content)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return True
def cmd_put_data_odd(self, tagh, tagl, content):
cmd_data0 = iso7816_compose(0xdb, tagh, tagl, content[:128], 0x10)
cmd_data1 = iso7816_compose(0xdb, tagh, tagl, content[128:])
sw = self.icc_send_cmd(cmd_data0)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
sw = self.icc_send_cmd(cmd_data1)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return True
def cmd_reset_retry_counter(self, how, who, data):
cmd_data = iso7816_compose(0x2c, how, who, data)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return True
def cmd_pso(self, p1, p2, data):
cmd_data = iso7816_compose(0x2a, p1, p2, data)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if sw[0] == 0x90 and sw[1] == 0x00:
return array('B')
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return self.cmd_get_response(sw[1])
def cmd_pso_longdata(self, p1, p2, data):
cmd_data0 = iso7816_compose(0x2a, p1, p2, data[:128], 0x10)
cmd_data1 = iso7816_compose(0x2a, p1, p2, data[128:])
sw = self.icc_send_cmd(cmd_data0)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
sw = self.icc_send_cmd(cmd_data1)
if len(sw) != 2:
raise ValueError(sw)
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return self.cmd_get_response(sw[1])
def cmd_internal_authenticate(self, data):
cmd_data = iso7816_compose(0x88, 0, 0, data)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if sw[0] == 0x90 and sw[1] == 0x00:
return array('B')
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return self.cmd_get_response(sw[1])
def cmd_genkey(self, keyno):
if keyno == 1:
data = b'\xb6\x00'
elif keyno == 2:
data = b'\xb8\x00'
else:
data = b'\xa4\x00'
cmd_data = iso7816_compose(0x47, 0x80, 0, data)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if sw[0] == 0x90 and sw[1] == 0x00:
return array('B')
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
pk = self.cmd_get_response(sw[1])
return (pk[9:9+256], pk[9+256+2:9+256+2+3])
def cmd_get_public_key(self, keyno):
if keyno == 1:
data = b'\xb6\x00'
elif keyno == 2:
data = b'\xb8\x00'
else:
data = b'\xa4\x00'
cmd_data = iso7816_compose(0x47, 0x81, 0, data)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
elif sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
pk = self.cmd_get_response(sw[1])
return (pk[9:9+256], pk[9+256+2:9+256+2+3])
def cmd_put_data_remove(self, tagh, tagl):
cmd_data = iso7816_compose(0xda, tagh, tagl, b"")
sw = self.icc_send_cmd(cmd_data)
if sw[0] != 0x90 and sw[1] != 0x00:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
def cmd_put_data_key_import_remove(self, keyno):
if keyno == 1:
keyspec = b"\xb6\x00" # SIG
elif keyno == 2:
keyspec = b"\xb8\x00" # DEC
else:
keyspec = b"\xa4\x00" # AUT
cmd_data = iso7816_compose(0xdb, 0x3f, 0xff, b"\x4d\x02" + keyspec)
sw = self.icc_send_cmd(cmd_data)
if sw[0] != 0x90 and sw[1] != 0x00:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
def cmd_get_challenge(self):
cmd_data = iso7816_compose(0x84, 0x00, 0x00, '')
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if sw[0] != 0x61:
raise ValueError("%02x%02x" % (sw[0], sw[1]))
return self.cmd_get_response(sw[1])
def cmd_external_authenticate(self, keyno, signed):
cmd_data = iso7816_compose(0x82, 0x00, keyno, signed[0:128], cls=0x10)
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
cmd_data = iso7816_compose(0x82, 0x00, keyno, signed[128:])
sw = self.icc_send_cmd(cmd_data)
if len(sw) != 2:
raise ValueError(sw)
if not (sw[0] == 0x90 and sw[1] == 0x00):
raise ValueError("%02x%02x" % (sw[0], sw[1]))
class regnual(object):
def __init__(self, dev):
conf = dev.configurations[0]
intf_alt = conf.interfaces[0]
intf = intf_alt[0]
if intf.interfaceClass != 0xff:
raise ValueError("Wrong interface class")
self.__devhandle = dev.open()
self.__devhandle.claimInterface(intf)
self.logger = logging.getLogger('regnual')
def set_logger(self, logger: logging.Logger):
self.logger = logger.getChild('regnual')
def local_print(self, message: str, verbose=False):
self.logger.debug('print: {}'.format(message))
if verbose:
print(message)
def mem_info(self):
mem = self.__devhandle.controlMsg(requestType = 0xc0, request = 0,
buffer = 8, value = 0, index = 0,
timeout = 10000)
start = ((mem[3]*256 + mem[2])*256 + mem[1])*256 + mem[0]
end = ((mem[7]*256 + mem[6])*256 + mem[5])*256 + mem[4]
return (start, end)
def download(self, start, data, verbose=False, progress_func = None):
addr = start
addr_end = (start + len(data)) & 0xffffff00
i = int((addr - 0x08000000) / 0x100)
j = 0
self.local_print("start %08x" % addr, verbose)
self.local_print("end %08x" % addr_end, verbose)
if progress_func:
progress_func(0)
while addr < addr_end:
if progress_func:
progress_func((addr-start)/(addr_end-start))
self.local_print("# %08x: %d: %d : %d" % (addr, i, j, 256), verbose)
self.__devhandle.controlMsg(requestType = 0x40, request = 1,
buffer = data[j*256:j*256+256],
value = 0, index = 0, timeout = 10000)
crc32code = crc32(data[j*256:j*256+256])
res = self.__devhandle.controlMsg(requestType = 0xc0, request = 2,
buffer = 4, value = 0, index = 0,
timeout = 10000)
r_value = ((res[3]*256 + res[2])*256 + res[1])*256 + res[0]
if (crc32code ^ r_value) != 0xffffffff:
self.local_print("failure")
self.__devhandle.controlMsg(requestType = 0x40, request = 3,
buffer = None,
value = i, index = 0, timeout = 10000)
time.sleep(0.010)
res = self.__devhandle.controlMsg(requestType = 0xc0, request = 2,
buffer = 4, value = 0, index = 0,
timeout = 10000)
r_value = ((res[3]*256 + res[2])*256 + res[1])*256 + res[0]
if r_value == 0:
self.local_print("failure")
i = i+1
j = j+1
addr = addr + 256
residue = len(data) % 256
if residue != 0:
self.local_print("# %08x: %d : %d" % (addr, i, residue), verbose)
self.__devhandle.controlMsg(requestType = 0x40, request = 1,
buffer = data[j*256:],
value = 0, index = 0, timeout = 10000)
crc32code = crc32(data[j*256:].ljust(256,b'\xff'))
res = self.__devhandle.controlMsg(requestType = 0xc0, request = 2,
buffer = 4, value = 0, index = 0,
timeout = 10000)
r_value = ((res[3]*256 + res[2])*256 + res[1])*256 + res[0]
if (crc32code ^ r_value) != 0xffffffff:
self.local_print("failure")
self.__devhandle.controlMsg(requestType = 0x40, request = 3,
buffer = None,
value = i, index = 0, timeout = 10000)
time.sleep(0.010)
res = self.__devhandle.controlMsg(requestType = 0xc0, request = 2,
buffer = 4, value = 0, index = 0,
timeout = 10000)
r_value = ((res[3]*256 + res[2])*256 + res[1])*256 + res[0]
if r_value == 0:
self.local_print("failure")
def protect(self):
self.__devhandle.controlMsg(requestType = 0x40, request = 4,
buffer = None, value = 0, index = 0,
timeout = 10000)
time.sleep(0.100)
res = self.__devhandle.controlMsg(requestType = 0xc0, request = 2,
buffer = 4, value = 0, index = 0,
timeout = 10000)
r_value = ((res[3]*256 + res[2])*256 + res[1])*256 + res[0]
if r_value == 0:
self.local_print("protection failure")
def finish(self):
self.__devhandle.controlMsg(requestType = 0x40, request = 5,
buffer = None, value = 0, index = 0,
timeout = 10000)
def reset_device(self):
try:
self.__devhandle.reset()
except:
pass
def compare(data_original, data_in_device):
if data_original == data_in_device:
return True
raise ValueError("verify failed")
def gnuk_devices():
busses = usb.busses()
for bus in busses:
devices = bus.devices
for dev in devices:
for config in dev.configurations:
for intf in config.interfaces:
for alt in intf:
if alt.interfaceClass == CCID_CLASS and \
alt.interfaceSubClass == CCID_SUBCLASS and \
alt.interfaceProtocol == CCID_PROTOCOL_0 and \
(dev.idVendor,dev.idProduct) in USB_PRODUCT_LIST_TUP:
yield dev, config, alt
def gnuk_devices_by_vidpid():
try:
busses = usb.busses()
except usb.core.NoBackendError:
print("Warning: no backend was found to use for communication. "
"Please refer to documentation how to install additional libraries.")
return []
for bus in busses:
devices = bus.devices
for dev in devices:
for cand in USB_PRODUCT_LIST:
if dev.idVendor != cand['vendor']:
continue
if dev.idProduct != cand['product']:
continue
yield dev
break
def get_gnuk_device(verbose=True, logger: logging.Logger=None):
icc = None
for (dev, config, intf) in gnuk_devices():
try:
icc = gnuk_token(dev, config, intf)
icc.set_logger(logger)
if logger:
logger.debug('{} {} {}'.format(dev.filename, config.value, intf.interfaceNumber))
if verbose:
try:
d = get_dict_for_device(dev)
print(f'Device: {d["Product"]} {d["Serial"]}')
except:
print(f'Device: name: "{dev.filename}", c/i: {config.value}/{intf.interfaceNumber}')
break
except:
pass
if not icc:
raise ValueError("No ICC present")
status = icc.icc_get_status()
if status == 0:
pass # It's ON already
elif status == 1:
icc.icc_power_on()
else:
raise ValueError("Unknown ICC status", status)
return icc
SHA256_OID_PREFIX="3031300d060960864801650304020105000420"
def UNSIGNED(n):
return n & 0xffffffff
def crc32(bytestr):
crc = binascii.crc32(bytestr)
return UNSIGNED(crc)
def parse_kdf_data(kdf_data):
if len(kdf_data) == 90:
single_salt = True
elif len(kdf_data) == 110:
single_salt = False
else:
raise ValueError("length does not much", kdf_data)
if kdf_data[0:2] != b'\x81\x01':
raise ValueError("data does not much")
algo = kdf_data[2]
if kdf_data[3:5] != b'\x82\x01':
raise ValueError("data does not much")
subalgo = kdf_data[5]
if kdf_data[6:8] != b'\x83\x04':
raise ValueError("data does not much")
iters = unpack(">I", kdf_data[8:12])[0]
if kdf_data[12:14] != b'\x84\x08':
raise ValueError("data does not much")
salt = kdf_data[14:22]
if single_salt:
salt_reset = None
salt_admin = None
if kdf_data[22:24] != b'\x87\x20':
raise ValueError("data does not much")
hash_user = kdf_data[24:56]
if kdf_data[56:58] != b'\x88\x20':
raise ValueError("data does not much")
hash_admin = kdf_data[58:90]
else:
if kdf_data[22:24] != b'\x85\x08':
raise ValueError("data does not much")
salt_reset = kdf_data[24:32]
if kdf_data[32:34] != b'\x86\x08':
raise ValueError("data does not much")
salt_admin = kdf_data[34:42]
if kdf_data[42:44] != b'\x87\x20':
raise ValueError("data does not much")
hash_user = kdf_data[44:76]
if kdf_data[76:78] != b'\x88\x20':
raise ValueError("data does not much")
hash_admin = kdf_data[78:110]
return ( algo, subalgo, iters, salt, salt_reset, salt_admin,
hash_user, hash_admin )
| 38.586721 | 104 | 0.535766 |
a5415919b334f2f7f1baaec17e5eefe8c34b8c25 | 1,054 | py | Python | ibpy_native/error.py | Devtography/ibpy_native | e3e2a406a8db9bb338953be6dc195b8099379acb | [
"Apache-2.0"
] | 6 | 2020-07-09T20:55:41.000Z | 2022-01-22T15:43:29.000Z | ibpy_native/error.py | Devtography/ibpy_native | e3e2a406a8db9bb338953be6dc195b8099379acb | [
"Apache-2.0"
] | 1 | 2021-02-28T13:37:43.000Z | 2021-02-28T13:37:43.000Z | ibpy_native/error.py | Devtography/ibpy_native | e3e2a406a8db9bb338953be6dc195b8099379acb | [
"Apache-2.0"
] | 5 | 2020-05-24T19:15:06.000Z | 2022-01-22T15:43:35.000Z | """Code implementation of error related stuffs."""
import enum
from typing import Any
class IBErrorCode(enum.IntEnum):
"""Error codes."""
# Error codes defined by IB
DUPLICATE_TICKER_ID = 102
DUPLICATE_ORDER_ID = 103
INVALID_CONTRACT = 200
ORDER_REJECTED = 201
ORDER_MESSAGE = 399
NOT_CONNECTED = 504
# Self-defined error codes
REQ_TIMEOUT = 50504
RES_NO_CONTENT = 50204
RES_UNEXPECTED = 50214
QUEUE_IN_USE = 50400
RES_NOT_FOUND = 50404
UNKNOWN = 50500
class IBError(Exception):
"""Error object to handle the error retruns from IB."""
def __init__(self, rid: int, err_code: int, err_str: str,
err_extra: Any=None):
self.rid = rid
self.err_code = err_code
self.err_str = err_str
self.err_extra = err_extra
super().__init__(err_str)
def __str__(self):
# override method
error_msg = ("IB error - ID %d: code %d - %s"
% (self.rid, self.err_code, self.err_str))
return error_msg
| 26.35 | 63 | 0.63093 |
7e57b4a16efe3de48d7d2c19d3dc32d2faa1a9f8 | 997 | py | Python | src/sentry/api/endpoints/organization_config_integrations.py | Ali-Tahir/sentry | aa7b306c5ea671ac002a3524982563679557cb31 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/organization_config_integrations.py | Ali-Tahir/sentry | aa7b306c5ea671ac002a3524982563679557cb31 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/organization_config_integrations.py | Ali-Tahir/sentry | aa7b306c5ea671ac002a3524982563679557cb31 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from rest_framework.response import Response
from django.conf import settings
from sentry import integrations, features
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize, IntegrationProviderSerializer
class OrganizationConfigIntegrationsEndpoint(OrganizationEndpoint):
def get(self, request, organization):
has_catchall = features.has(
"organizations:internal-catchall", organization, actor=request.user
)
providers = []
for provider in integrations.all():
if not has_catchall and provider.key in settings.SENTRY_INTERNAL_INTEGRATIONS:
continue
providers.append(provider)
providers.sort(key=lambda i: i.key)
serialized = serialize(
providers, organization=organization, serializer=IntegrationProviderSerializer()
)
return Response({"providers": serialized})
| 31.15625 | 92 | 0.725176 |
5928d71b5ff638df0407f8aa232d6e7aa04460d4 | 4,817 | py | Python | alarms/migrations/0001_initial.py | fcurella/clock-api | 57c16e83cdb405feea268c6a03959207a12cb4d0 | [
"MIT"
] | 5 | 2020-05-26T20:03:44.000Z | 2020-09-13T19:51:41.000Z | alarms/migrations/0001_initial.py | fcurella/clock-api | 57c16e83cdb405feea268c6a03959207a12cb4d0 | [
"MIT"
] | null | null | null | alarms/migrations/0001_initial.py | fcurella/clock-api | 57c16e83cdb405feea268c6a03959207a12cb4d0 | [
"MIT"
] | 1 | 2021-11-20T01:34:45.000Z | 2021-11-20T01:34:45.000Z | # Generated by Django 3.0.5 on 2020-05-26 15:58
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_lifecycle.mixins
import model_utils.fields
import timezone_field.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("django_celery_beat", "0012_periodictask_expire_seconds"),
]
operations = [
migrations.CreateModel(
name="Alarm",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("schedule", models.TextField()),
(
"schedule_type",
models.CharField(
choices=[("crontab", "crontab")],
default="crontab",
max_length=20,
),
),
("timezone", timezone_field.fields.TimeZoneField(default="UTC")),
("active", models.BooleanField(default=False)),
(
"custom_attributes",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, default=dict
),
),
("pre_fire", models.DurationField(blank=True, null=True)),
("post_fire", models.DurationField(blank=True, null=True)),
],
options={"abstract": False,},
bases=(django_lifecycle.mixins.LifecycleModelMixin, models.Model),
),
migrations.CreateModel(
name="Sound",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("name", models.CharField(default="", max_length=100)),
("audio", models.FileField(upload_to="")),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="Interval",
fields=[
(
"alarm_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="alarms.Alarm",
),
),
("duration", models.DurationField()),
],
options={"abstract": False,},
bases=("alarms.alarm",),
),
migrations.AddField(
model_name="alarm",
name="sound",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.SET_DEFAULT,
to="alarms.Sound",
),
),
migrations.AddField(
model_name="alarm",
name="task",
field=models.OneToOneField(
blank=True,
on_delete=django.db.models.deletion.PROTECT,
to="django_celery_beat.PeriodicTask",
),
),
]
| 33.685315 | 81 | 0.419763 |
4829a9638f3663b158b09c061df56fccaf260551 | 66,193 | py | Python | src/rosegraphics.py | LiChen2000/01-IntroductionToPython | cdb363d4d90f4800f0f9c38b33c50b49109a41a3 | [
"MIT"
] | null | null | null | src/rosegraphics.py | LiChen2000/01-IntroductionToPython | cdb363d4d90f4800f0f9c38b33c50b49109a41a3 | [
"MIT"
] | null | null | null | src/rosegraphics.py | LiChen2000/01-IntroductionToPython | cdb363d4d90f4800f0f9c38b33c50b49109a41a3 | [
"MIT"
] | null | null | null | """
rosegraphics.py - a simple Graphics library for Python.
Its key feature is:
-- USING this library provides a simple introduction to USING objects.
Other key features include:
-- It has a rich set of classes, methods and instance variables.
In addition to classes like Circles that are natural for students,
it has other kinds of classes like RoseWindow and SimpleTurtle
to provide a richer set of examples than "just" a graphics library.
-- It allows one to do a reasonable set of graphics operations
with reasonable efficiency. The API mimics Java's Shape API
for the most part.
-- It is built on top of tkinter and its extension ttk
(the standard graphics libraries that come with Python).
-- Unlike tkinter, it is NOT event-driven and hence can be used
before students see that paradigm. (There is a behind-the-scenes
facility for listening for and responding to events,
for those who want to do so.)
-- It attempts to be as bullet-proof as possible, to make it easy
for beginners to use it. In particular, it attempts to provide
reasonable error messages when a student misuses the API.
-- It was inspired by zellegraphics but is a complete re-implementation
that attempts to:
-- Be more bullet-proof.
-- Provide a richer set of examples for using objects.
-- Have an API that is more like Java's Shape API than tkinter's
(older) API.
-- While it can serve as an example for defining classes,
it is NOT intended to do so for beginners.
It is excellent for helping students learn to USE objects;
it is NOT perfect for helping students learn to WRITE CLASSES.
See the MAIN function below for typical examples of its use.
Authors: David Mutchler, Mark Hays, Michael Wollowswki, Matt Boutell,
Chandan Rupakheti, Claude Anderson and their colleagues,
with thanks to John Zelle for inspiration and hints.
First completed version: September 2014.
"""
import tkinter
from tkinter import font as tkinter_font
import time
import turtle
# ----------------------------------------------------------------------
# All the windows that are constructed during a run share the single
# _master_Tk (a tkinter.Tk object)
# as their common root. The first construction of a RoseWindow
# sets this _master_Tk to a Tkinter.Tk object.
# ----------------------------------------------------------------------
_master_Tk = None
# ----------------------------------------------------------------------
# RoseWindow is the top-level object. It starts with a single RoseCanvas.
# ----------------------------------------------------------------------
class RoseWindow(object):
"""
A RoseWindow is a window that pops up when constructed.
It can have RoseWidgets on it and starts by default with
a single RoseCanvas upon which one can draw shapes.
To construct a RoseWindow, use:
- rg.RoseWindow()
or use any of its optional arguments, as in these examples:
window = rg.RoseWindow(400, 300) # 400 wide by 300 tall
window = rg.RoseWindow(400, 300, 'Funny window') # with a title
Instance variables include:
width: width of this window (in pixels)
height: width of this window (in pixels)
title: displayed on the window's bar
widgets: the things attached to this window
"""
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
"""
Pops up a tkinter.Toplevel window with (by default)
a RoseCanvas (and associated tkinter.Canvas) on it.
Arguments are:
-- width, height: dimensions of the window (in pixels).
-- title: title displayed on the windoww.
-- color: background color of the window
-- canvas_color: background color of the canvas
displayed on the window by default
-- make_initial_canvas:
-- If True, a default canvas is placed on the window.
-- Otherwise, no default canvas is placed on the window.
If this is the first RoseWindow constructed, then a
hidden Tk object is constructed to control the event loop.
Preconditions:
:type width: int
:type height: int
:type title: str
:type color: Color
:type canvas_color: Color
:type make_initial_canvas: bool
"""
# check_types([(width, (int, float)),
# (height, (int, float)),
# (title, (Color, str)
# --------------------------------------------------------------
# The _master_Tk controls the mainloop for ALL the RoseWindows.
# If this is the first RoseWindow constructed in this run,
# then construct the _master_Tk object.
# --------------------------------------------------------------
global _master_Tk
if not _master_Tk:
_master_Tk = tkinter.Tk()
_master_Tk.withdraw()
else:
time.sleep(0.1) # Helps the window appear on TOP of Eclipse
# --------------------------------------------------------------
# Has a tkinter.Toplevel, and a tkinter.Canvas on the Toplevel.
# --------------------------------------------------------------
self.toplevel = tkinter.Toplevel(_master_Tk,
background=color,
width=width, height=height)
self.toplevel.title(title)
self._is_closed = False
self.toplevel.protocol("WM_DELETE_WINDOW", self.close)
# FIXME: The next two need to be properties to have
# setting happen correctly. Really belongs to RoseCanvas.
# See comments elsewhere on this.
self.width = width
self.height = height
if make_initial_canvas:
self.initial_canvas = RoseCanvas(self, width, height,
canvas_color)
else:
self.initial_canvas = None
self.widgets = [self.initial_canvas]
# FIXME: Do any other tailoring of the toplevel as desired,
# e.g. borderwidth and style...
# --------------------------------------------------------------
# Catch mouse clicks and key presses.
# --------------------------------------------------------------
self.mouse = Mouse()
self.keyboard = Keyboard()
self.toplevel.bind('<Button>', self._on_mouse_click)
self.toplevel.bind('<KeyPress>', self._on_key_press)
self.update()
def close(self):
""" Closes this RoseWindow. """
if self.toplevel:
self.toplevel.destroy()
self.toplevel = None
self.update()
self._is_closed = True
def update(self):
"""
Checks for and handles events that has happened
in this RoseWindow (e.g. mouse clicks, drawing shapes).
"""
global _master_Tk
_master_Tk.update()
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to RoseCanvas objects associated
with this RoseWindow, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: float
"""
for widget in self.widgets:
if type(widget) == RoseCanvas:
widget.render()
self.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def close_on_mouse_click(self):
"""
Displays a message at the bottom center of the window and waits
for the user to click the mouse anywhere in the window.
Then closes this RoseWindow.
Returns an rg.Point that specifies where the user clicked the mouse.
"""
message = 'To exit, click anywhere in this window'
click_position = self.continue_on_mouse_click(message=message,
close_it=True)
return click_position
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
"""
Displays a message at the bottom center of the window
and waits for the user to click the mouse, then erases the message.
Optional parameters let you:
-- Display a different message
-- Place the message at a different place in the window
(xpos and ypos are as in Text)
-- Close the window after the mouse is clicked (and ignore
the GraphicsError that results if the user instead chooses
to click the X in the window)
-- NOT erase the message when done
"""
if self._is_closed:
return
if x_position is None:
x_position = self.width / 2
if y_position is None:
y_position = self.height - 20
anchor_point = Point(x_position, y_position)
text = Text(anchor_point, message)
# FIXME: Really should do all this on a per-RoseCanvas basis.
if self.initial_canvas:
text.attach_to(self.initial_canvas)
self.initial_canvas._renderShape(text, render_NOW=True)
click_position = self.get_next_mouse_click()
if erase_it and self.initial_canvas:
text.detach_from(self.initial_canvas)
if close_it:
self.close() # then close the window
return click_position
def get_next_mouse_click(self):
"""
Waits for the user to click in the window. Then returns the rg.Point
that represents the point where the user clicked.
Example:
If this method is called and then the user clicks near
he upper-right corner of a 300 x 500 window,
this function would return something like rg.Point(295, 5).
"""
self.mouse.position = None
while True:
if self._is_closed:
return None
if self.mouse.position is not None:
break
self.update()
time.sleep(.05) # allow time for other events to be handled
click_point = self.mouse.position
self.mouse.position = None
return click_point
def _on_mouse_click(self, event):
self.mouse._update(event)
def _on_key_press(self, event):
self.keyboard._update(event)
# def add_canvas(self, width=None, height=None, background_color=0):
# FIXME: Set defaults based on the main canvas.
# new_canvas = RoseCanvas(self, background_color='white')
# self.widgets.append(new_canvas)
#
# _root.update()
def __serialize_shapes(self):
"""
Returns a list of strings representing the shapes in sorted order.
"""
return _serialize_shapes(self)
class RoseWidget(object):
"""
A Widget is a thing that one can put on a Window,
e.g. a Canvas, FortuneTeller, etc.
"""
def __init__(self, window):
self._window = window
def get_window(self):
return self._window
class RoseCanvas(RoseWidget):
defaults = {'colors': [None, 'yellow', 'light blue', 'dark grey']}
count = 0
"""
A RoseCanvas is a RoseWidget (i.e., a thing on a RoseWindow)
upon which one can draw shapes and other Drawable things.
"""
def __init__(self, window, width=200, height=200,
background_color=0):
super().__init__(window)
RoseCanvas.count = RoseCanvas.count + 1
# FIXME: Deal with default background colors.
# FIXME: Store background color as a property
# so that modifying it changes the tkinter canvas.
# Ditto width and height.
# if background_color == 0:
# index = RoseCanvas.count % len(defaults['colors'])
# self.background_color = defaults['colors'][index]
# else:
# self.background_color = background_color
tk_canvas = tkinter.Canvas(window.toplevel,
width=width, height=height,
background=background_color)
self._tkinter_canvas = tk_canvas
# FIXME: Automate gridding better.
self._tkinter_canvas.grid(padx=5, pady=5)
self.shapes = []
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to this RoseCanvas, then draws
all those Shapes. After doing so, pauses the given number of seconds.
:type seconds_to_pause: float
"""
self._update_shapes()
self._window.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def _renderShape(self, shape, render_NOW=False):
"""Renders a shape."""
coordinates = shape._get_coordinates_for_drawing()
options = shape._get_options_for_drawing()
if shape.shape_id_by_canvas[self] is None:
shape.shape_id_by_canvas[self] = \
shape._method_for_drawing(self._tkinter_canvas, *coordinates)
try:
self._tkinter_canvas.coords(shape.shape_id_by_canvas[self],
*coordinates)
except tkinter.TclError:
msg = 'Could not place the shape\n'
msg += 'on the given window.\n'
msg += 'Did you accidentally close a window\n'
msg += 'that later needed to be rendered again?'
raise Exception(msg) from None
self._tkinter_canvas.itemconfigure(shape.shape_id_by_canvas[self],
options)
if render_NOW:
# redraw NOW
self._window.update()
def _draw(self, shape):
"""Queues a shape for being drawn. Does NOT draw it just yet."""
shapeInList = False
for listShape in self.shapes:
if listShape is shape:
shapeInList = True
break
if not shapeInList:
shape.shape_id_by_canvas[self] = None
self.shapes.append(shape)
def _undraw(self, shape):
if shape in self.shapes:
for i in range(len(self.shapes)):
if self.shapes[i] is shape:
self._tkinter_canvas.delete(shape.shape_id_by_canvas[self])
del self.shapes[i]
break
def _update_shapes(self):
for shape in self.shapes:
self._renderShape(shape)
class Mouse(object):
def __init__(self):
self.position = None
def _update(self, event):
self.position = Point(event.x, event.y)
class Keyboard(object):
def __init__(self):
self.key_pressed = None
def _update(self, event):
pass
class __FreezeClass__ (type):
"""Prevents class variable assignment."""
def __setattr__(self, name, _ignored): # last parameter is the value
err = "You tried to set the instance variable '" + name + "'\n"
err += " on the CLASS '" + self.__name__ + "'"
err += ", which is not an OBJECT.\n"
err += " Did you forget the () after the word "
err += self.__name__ + ",\n"
err += " on the line where you constructed the object?"
raise SyntaxError(err)
class _Shape(object, metaclass=__FreezeClass__):
"""
A Shape is a thing that can be drawn on a RoseCanvas
(which itself draws on a tkinter Canvas).
Its constructor provides the tkinter method to be used to
draw this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image, Line, Path, Polygon,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: None.
Public methods: attach_to.
"""
def __init__(self, method_for_drawing):
""" Arguments:
-- the tkinter method for drawing the Shape.
"""
self._method_for_drawing = method_for_drawing
self.shape_id_by_canvas = {}
def __eq__(self, other):
"""
Two Shape objects are equal (==) if all their attributes
are equal to each other.
"""
# check before we go deleting keys that may or may not exist
if(not isinstance(other, self.__class__)):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
del self_dict["shape_id_by_canvas"]
del other_dict["shape_id_by_canvas"]
return (self_dict == other_dict)
def __ne__(self, other):
return not self.__eq__(other)
def attach_to(self, window_or_canvas):
"""
'draws' this Shape. More precisely:
Attaches this Shape to the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered, this shape
will appear on that RoseWindow/RoseCanvas.
"""
if isinstance(window_or_canvas, RoseWindow):
window_or_canvas = window_or_canvas.initial_canvas
window_or_canvas._draw(self)
def detach_from(self, rose_canvas):
"""
'undraws' this Shape. More precisely:
Detaches this Shape from the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered,
this shape will no longer appear
on that RoseWindow/RoseCanvas.
"""
if type(rose_canvas) == RoseWindow:
rose_canvas = rose_canvas.initial_canvas
rose_canvas._undraw(self)
class _ShapeWithOutline(object):
"""
A Shape that has an interior (which can be filled with a color)
and an outline (which has a color and thickness).
This abstract type has concrete subclasses that include:
Arc, Circle, Ellipse, Image, Line, Path,
Polygon, Rectangle, Square, Text and Window.
Public data attributes: fill_color, outline_color, outline_thickness.
Public methods: _initialize_options.
"""
defaults = {'fill_color': None,
'outline_color': 'black',
'outline_thickness': 1}
def _initialize_options(self):
self.fill_color = _ShapeWithOutline.defaults['fill_color']
self.outline_color = _ShapeWithOutline.defaults['outline_color']
self.outline_thickness = _ShapeWithOutline.defaults[
'outline_thickness']
def _get_options_for_drawing(self):
options = {'fill': self.fill_color,
'outline': self.outline_color,
'width': self.outline_thickness}
# If a color is None, that means transparent here:
for option in ('fill', 'outline'):
if not options[option]:
options[option] = ''
return options
class _ShapeWithThickness(object):
"""
A Shape that can be (and almost always is) filled with a color
and has a thickness but no outline.
This abstract type has concrete subclasses that include:
Line and Path.
Public data attributes: color, thickness.
Public methods: _initialize_options.
"""
defaults = {'color': 'black',
'thickness': 1,
'arrow': None}
def _initialize_options(self):
self.color = _ShapeWithThickness.defaults['color']
self.thickness = _ShapeWithThickness.defaults['thickness']
self.arrow = _ShapeWithThickness.defaults['arrow']
def _get_options_for_drawing(self):
options = {'fill': self.color,
'width': self.thickness,
'arrow': self.arrow}
# If a color is None, that means 'black' here:
if options['fill'] is None:
options['fill'] = 'black'
return options
class _ShapeWithText(object):
"""
A Shape that has text and a font for displaying that text.
This abstract type has concrete subclasses that include:
Text.
Public data attributes: font_family, font_size,
is_bold, is_italic, is_underline, is_overstrike.
Public methods: _initialize_options.
"""
# FIXME: Add more to the above docstring.
defaults = {'font_family': 'helvetica',
'font_size': 14,
'weight': 'normal',
'slant': 'roman',
'underline': 0,
'overstrike': 0,
'justify': tkinter.CENTER,
'text_box_width': None,
'text_color': 'black',
'text': ''}
def _initialize_options(self):
self.font_family = _ShapeWithText.defaults['font_family']
self.font_size = _ShapeWithText.defaults['font_size']
self.is_bold = _ShapeWithText.defaults['weight'] == 'bold'
self.is_italic = _ShapeWithText.defaults['slant'] == 'italic'
self.is_underline = _ShapeWithText.defaults['underline'] == 1
self.is_overstrike = _ShapeWithText.defaults['overstrike'] == 1
self.justify = _ShapeWithText.defaults['justify']
self.text_box_width = _ShapeWithText.defaults['text_box_width']
self.text_color = _ShapeWithText.defaults['text_color']
self.text = _ShapeWithText.defaults['text']
def _get_options_for_drawing(self):
weight = 'bold' if self.is_bold else 'normal'
slant = 'italic' if self.is_italic else 'roman'
underline = 1 if self.is_underline else 0
overstrike = 1 if self.is_overstrike else 0
font = tkinter_font.Font(family=self.font_family,
size=self.font_size,
weight=weight,
slant=slant,
underline=underline,
overstrike=overstrike)
options = {'font': font,
'justify': self.justify,
'fill': self.text_color,
'text': self.text}
if self.text_box_width:
options['width'] = self.text_box_width
return options
class _ShapeWithCenter(_Shape):
"""
A Shape that has a center (and for which moving its center
moves the entire Shape). Its constructor provides the center
of the Shape along with its method for drawing this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: center.
Public methods: move_by, move_center_to.
"""
def __init__(self, center, method_for_drawing):
"""
Arguments:
-- the Point that is the center of the Shape
(the Shape stores a CLONE of that Point)
-- the tkinter method for drawing the Shape.
"""
# Clone the center argument, so that if the caller
# mutates the argument, it does NOT affect this Shape.
super().__init__(method_for_drawing)
self.center = center.clone()
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.center.move_by(dx, dy)
def move_center_to(self, x, y):
"""
Moves this _Shape's center to (x, y),
thus translating the entire Shape
by however much its center moved.
:type x: float
:type y: float
"""
self.center.move_to(x, y)
class _RectangularShape(_Shape):
"""
A _Shape determined by its rectangular bounding box (plus possibly
other information).
Concrete sub-classes include: rg.Ellipse, rg.Rectangle.
Examples:
These all assume that the variable shape is a _RectangularShape
(e.g. an rg.Ellipse or a rg.Rectangle):
The methods in these examples all return rg.Point objects that are
copies of a corner/center of the _RectangularShape:
ul = shape.get_upper_left_corner()
ur = shape.get_upper_right_corner()
ll = shape.get_lower_left_corner()
lr = shape.get_lower_right_corner()
center = shape.get_center()
The methods in these examples return a positive number:
h = shape.get_height()
w = shape.get_width()
The method in this example returns an rg.Rectangle that encloses
this _RectangularShape:
bbox = shape.get_bounding_box()
This example moves this _RectangularShape right 100 and up 50:
shape.move_by(100, -50)
This example does the same thing another way:
shape.corner_1 = shape.corner_1 + 100
shape.corner_2 = shape.corner_2 - 50
"""
def __init__(self, corner_1, corner_2, method_for_drawing):
"""
:type corner_1: Point
:type corner_2: Point
:type method_for_drawing: callable(int, int, int, int) -> int
"""
super().__init__(method_for_drawing)
self.corner_1 = corner_1.clone()
self.corner_2 = corner_2.clone()
self._update_corners()
def __repr__(self):
""" Returns a string representation of this shape. """
f_string = ''
f_string += '{}: corner_1=({}, {}), corner_2=({}, {}),'
f_string += ' fill_color={},'
f_string += ' outline_color={}, outline_thickness={}.'
return f_string.format(self.__class__.__name__,
self.corner_1.x, self.corner_1.y,
self.corner_2.x, self.corner_2.y,
self.fill_color, self.outline_color,
self.outline_thickness)
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.corner_1.x += dx
self.corner_1.y += dy
self.corner_2.x += dx
self.corner_2.y += dy
def clone(self):
"""
Returns a copy of this _RectangularShape.
"""
return self.__class__(self.corner_1.clone(),
self.corner_2.clone())
def get_upper_left_corner(self):
"""
Returns a copy of the ** upper-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_left_corner
def get_lower_left_corner(self):
"""
Returns a copy of the ** lower-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_left_corner
def get_upper_right_corner(self):
"""
Returns a copy of the ** upper-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_right_corner
def get_lower_right_corner(self):
"""
Returns a copy of the ** lower-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_right_corner
def get_center(self):
"""
Returns a copy of the ** center ** of this _RectanglarShape.
The returned value is an rg.Point.
"""
return Point((self.corner_1.x + self.corner_2.x) / 2,
(self.corner_1.y + self.corner_2.y) / 2)
def get_height(self):
"""
Returns the height (i.e., the size in
the y-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.y - self.corner_2.y)
def get_width(self):
"""
Returns the width (i.e., the size in
the x-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.x - self.corner_2.x)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this _RectangularShape.
"""
return Rectangle(self.corner_1, self.corner_2)
def _update_corners(self):
min_x = min(self.corner_1.x, self.corner_2.x)
min_y = min(self.corner_1.y, self.corner_2.y)
max_x = max(self.corner_1.x, self.corner_2.x)
max_y = max(self.corner_1.y, self.corner_2.y)
self._upper_left_corner = Point(min_x, min_y)
self._upper_right_corner = Point(max_x, min_y)
self._lower_left_corner = Point(min_x, max_y)
self._lower_right_corner = Point(max_x, max_y)
def _get_coordinates_for_drawing(self):
return [self.get_upper_left_corner().x,
self.get_upper_left_corner().y,
self.get_lower_right_corner().x,
self.get_lower_right_corner().y]
class Arc(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Bitmap(_Shape):
""" Not yet implemented. """
class Circle(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an circle.
To construct a Circle, use:
- rg.Circle(center, radius)
where center is an rg.Point object
and radius is a positive integer.
For example:
- rg.Circle(rg.Point(100, 75), 30)
specifies the circle whose center
is at (100, 75) and whose radius is 30.
Instance variables include:
center: An rg.Point that specifies
the center of the Circle.
radius: The radius of the Circle.
fill_color:
The Circle is filled with this color.
Example: circle.fill_color = 'green'
outline_color:
The outline of the Circle is this color.
Example: circle.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Circle.
Examples:
circle = rg.Circle(rg.Point(100, 75), 30)
print(circle.center, circle.radius)
circle.fill_color = 'blue'
circle.outline_color = 'black'
circle.outline_thickness = 5
window = rg.RoseWindow()
circle.attach_to(window)
circle.move_center_to(300, 200)
circle.move_by(-50, 60)
# Another way to move the Circle:
x = circle.center.x
y = circle.center.y
circle.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, radius):
"""
:type center: rg.Point
:type radius: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The radius is also stored in an instance variable:
self.radius = radius
def __repr__(self):
""" Returns a string representation of this Circle. """
f_string = ''
f_string += 'Circle: center=({}, {}), radius={}, fill_color={}, '
f_string += 'outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.radius,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Circle. """
return Circle(self.center, self.radius)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this Circle.
"""
c1 = Point(self.center.x - self.radius,
self.center.y - self.radius)
c2 = Point(self.center.x + self.radius,
self.center.y + self.radius)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Ellipse(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is an ellipse (aka oval).
To construct an Ellipse, use:
- rg.Ellipse(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the imaginery rectangle that
encloses the Ellipse.
For example:
- rg.Ellipse(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the ellipse whose imaginery
rectangle that encloses the ellipse:
- has upper-left corner (100, 50) and
- lower-right corner(300, 200).
Another example:
- rg.Ellipse(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same ellipse.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the imaginery rectangle
that encloses the Ellipse.
corner_2: An rg.Point that specifies an
opposite corner of the imaginery rectangle
that encloses the Ellipse.
fill_color:
The Ellipse is filled with this color.
Example: ellipse.fill_color = 'green'
outline_color:
The outline of the Ellipse is this color.
Example: ellipse.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Ellipse.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
ellipse = rg.Rectangle(p1, p2)
print(ellipse.corner_1, ellipse.corner_2)
ellipse.fill_color = 'blue'
ellipse.outline_color = 'black'
ellipse.outline_thickness = 5
window = rg.RoseWindow()
ellipse.attach_to(window)
ellipse.move_to(300, 200)
ellipse.move_by(-50, 60)
# Another way to move the Ellipse:
ellipse.corner_1 = rect.corner_1 - 50
ellipse.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = ellipse.get_upper_left_corner()
ur = ellipse.get_upper_right_corner()
ll = ellipse.get_lower_left_corner()
lr = ellipse.get_lower_right_corner()
center = ellipse.get_center()
# To get the width/height (always positive):
h = ellipse.get_height()
w = ellipse.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
class Line(_Shape, _ShapeWithThickness):
"""
A Shape that is a line segment.
To construct a Line, use:
- rg.Line(start, end)
where start and end are rg.Point objects
that specify the endpoints of the Line.
For example:
- rg.Line(rg.Point(100, 50),
- rg.Point(200, 30)
specifies the Line that starts at (100, 50)
and ends at (200, 30).
Another example:
- rg.Line(rg.Point(200, 30),
- rg.Point(100, 50)
specifies the Line that is the same as the
previous example except that the start and
end points are reversed. This is important
if the Line's "arrow" type is not None.
Instance variables include:
start:
The rg.Point that is one end of the Line.
end:
The rg.Point that is the other end of the Line.
color: The Line is drawn with this color.
thickness: The thickness (in pixels) of the Line.
arrow: Specifies whether or not the Line
is drawn as an arrow. Possible values are:
- None draw the Line without arrow-heads
- 'first' draw an arrow-head at the start
- 'last' draw an arrow-head at the end
- 'both' draw an arrow-head at both
For example, if my_line is a Line, then
- my_line.arrow = 'last'
makes the Line be drawn as an arrow
from its start point to its end point.
Examples:
start = rg.Point(100, 50)
end = rg.Point(200, 30)
line = rg.Line(start, end)
line.color = 'blue'
line.thickness = 3
line.arrow = 'both' # A double-sided arrow
line.arrow = None # Just a line (no arrow)
line.arrow = 'first' # Arrow from end to start
line.arrow = 'last' # Arrow from start to end
window = rg.RoseWindow()
line.attach_to(window)
line.move_by(-50, 60)
"""
def __init__(self, start, end):
"""
:type start: rg.Point
:type end: rg.Point
"""
super().__init__(tkinter.Canvas.create_line)
# The following sets default values for:
# self.color
# self.thickness
# self.arrow
super()._initialize_options()
# The other instance variables are the endpoints:
self.start = start.clone()
self.end = end.clone()
def __repr__(self):
""" Returns a string representation of this Line. """
f_string = ''
f_string += 'Line: start=({}, {}), end=({}, {}), color={}, '
f_string += 'thickness={}, arrow={}.'
return f_string.format(self.start.x, self.start.y,
self.end.x, self.end.y,
self.color, self.thickness, self.arrow)
def clone(self):
""" Returns a copy of this Line. """
return Line(self.start, self.end)
def move_by(self, dx, dy):
"""
Moves both endpoints of this Line
(and hence the entire Line as well)
to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Line.
:type dx: float
:type dy: float
"""
self.start.move_by(dx, dy)
self.end.move_by(dx, dy)
def get_midpoint(self):
"""
Returns an rg.Point at the midpoint (center) of this Line.
"""
return Point((self.start.x + self.end.x) / 2,
(self.start.y + self.end.y) / 2)
def _get_coordinates_for_drawing(self):
return [self.start.x,
self.start.y,
self.end.x,
self.end.y]
class Path(_Shape, _ShapeWithThickness):
""" Not yet implemented. """
class Point(_Shape, _ShapeWithOutline):
"""
A Shape that is a point in two-dimensional space.
It is drawn as a small circle (dot).
To construct a Point, use:
- rg.Point(x, y)
where x and y are the Point's coordinates.
For example:
- rg.Point(100, 50)
specifies the point whose x value is 100
and whose y value is 50.
Instance variables include the following:
x: The x-coordinate of the Point.
y: The y-coordinate of the Point.
fill_color:
The Point is filled with this color.
Note that a Point is drawn as a small, filled
circle, which is why it has a fill_color, etc.
Example: p.fill_color = 'green'
outline_color:
The outline of the Point is this color.
Example: p.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Point.
Examples:
p = rg.Point(100, 50)
print(p.x, p.y)
window = rg.RoseWindow()
p.attach_to(window)
p.move_to(300, 200)
p.move_by(-50, 60)
# Another way to move the Point:
p.x = p.x - 50
p.y = p.y + 60
p.fill_color = 'blue'
p.outline_color = 'black'
p.outline_thickness = 1
"""
defaults = {'width_for_drawing': 5,
'height_for_drawing': 5,
'fill_color': 'black',
'outline_color': 'black',
'outline_thickness': 1}
def __init__(self, x, y):
"""
:type x: float
:type y: float
"""
super().__init__(tkinter.Canvas.create_oval)
self.fill_color = Point.defaults['fill_color']
self.outline_color = Point.defaults['outline_color']
self.outline_thickness = Point.defaults['outline_thickness']
self.x = x
self.y = y
self.width_for_drawing = Point.defaults['width_for_drawing']
self.height_for_drawing = Point.defaults['height_for_drawing']
def __repr__(self):
""" Returns a string representation of this Point. """
return 'Point({:.1f}, {:.1f})'.format(self.x, self.y)
def clone(self):
""" Returns a copy of this Point. """
return Point(self.x, self.y)
def move_by(self, dx, dy):
"""
Moves this Point to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Point.
:type dx: float
:type dy: float
"""
self.x = self.x + dx
self.y = self.y + dy
def move_to(self, x, y):
"""
Moves this Point to (x, y).
Does NOT return a value; instead, it mutates this Point.
:type x: float
:type y: float
"""
self.x = x
self.y = y
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses
this Point (viewing it as a dot).
"""
c1 = Point(self.x - self.width_for_drawing / 2,
self.y - self.width_for_drawing / 2)
c2 = Point(self.x + self.height_for_drawing / 2,
self.y + self.height_for_drawing / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Polygon(_Shape, _ShapeWithOutline):
""" Not yet implemented. """
class Rectangle(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is a rectangle.
To construct a Rectangle, use:
- rg.Rectangle(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the rectangle.
For example:
- rg.Rectangle(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the rectangle:
- whose upper-left corner is (100, 50) and
- whose lower-right corner is (300, 200).
Another example:
- rg.Rectangle(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same rectangle.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the Rectangle.
corner_2: An rg.Point that specifies
an opposite corner of the Rectangle.
fill_color:
The Rectangle is filled with this color.
Example: rect.fill_color = 'green'
outline_color:
The outline of the Rectangle is this color.
Example: rect.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Rectangle.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
rect = rg.Rectangle(p1, p2)
print(rect.corner_1, rect.corner_2)
rect.fill_color = 'blue'
rect.outline_color = 'black'
rect.outline_thickness = 5
window = rg.RoseWindow()
rect.attach_to(window)
rect.move_to(300, 200)
rect.move_by(-50, 60)
# Another way to move the Rectangle:
rect.corner_1 = rect.corner_1 - 50
rect.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = rect.get_upper_left_corner()
ur = rect.get_upper_right_corner()
ll = rect.get_lower_left_corner()
lr = rect.get_lower_right_corner()
center = rect.get_center()
# To get the width/height (always positive):
h = rect.get_height()
w = rect.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
def get_bounding_box(self):
"""
Returns a new rg.Rectangle with the same corners as this one.
"""
return self.clone()
class RoundedRectangle(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Square(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an square.
To construct a Square, use:
- rg.Square(center, length_of_each_side)
where center is an rg.Point object
and length_of_each_side is a positive integer.
For example:
- rg.Square(rg.Point(100, 75), 60)
specifies the square whose center
is at (100, 75) and whose length of
each side is 60. Its corners are at:
(70, 35), (70, 105), (130, 35), (130, 105).
Instance variables include:
center: An rg.Point that specifies
the center of the Square.
radius: The length of each side of the Square.
fill_color:
The Square is filled with this color.
Example: square.fill_color = 'green'
outline_color:
The outline of the Square is this color.
Example: square.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Square.
Examples:
square = rg.Square(rg.Point(100, 75), 60)
print(square.center, square.length_of_each_side)
square.fill_color = 'blue'
square.outline_color = 'black'
square.outline_thickness = 5
window = rg.RoseWindow()
square.attach_to(window)
square.move_center_to(300, 200)
square.move_by(-50, 60)
# Another way to move the Square:
x = square.center.x
y = square.center.y
square.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, length_of_each_side):
"""
:type center: rg.Point
:type length_of_each_side: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The length of each side is also stored in an instance variable
self.length_of_each_side = length_of_each_side
def __repr__(self):
""" Returns a string representation of this Square. """
f_string = ''
f_string += 'Square: center=({}, {}), side-lengths={}, '
f_string += 'fill_color={}, outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.length_of_each_side,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Square. """
return Square(self.center, self.length_of_each_side)
def get_bounding_box(self):
"""
Returns a rg.Rectangle with the same corners as this Square.
"""
c1 = Point(self.center.x - self.length_of_each_side / 2,
self.center.y - self.length_of_each_side / 2)
c2 = Point(self.center.x + self.length_of_each_side / 2,
self.center.y + self.length_of_each_side / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Text(_ShapeWithCenter, _ShapeWithText):
"""
A Shape that has a string of text on it, displayed horizontally.
Its constructor specifies the rg.Point at which the text
is centered and the string that is to be displayed.
Public data attributes: center (an rg.Point),
font_size (an integer, 5 to 80 or so are reasonable values),
is_bold (True if the text is to be displayed in BOLD, else False),
is_italic (True or False),
is_underline (True or False),
is _overstrike (True or False),
text_color (color used to display the text, default is 'black')
text (the string to be displayed).
Public methods: attach_to, move_by, move_center_to.
"""
def __init__(self, center, text):
"""
The first argument must be a rg.Point.
The second argument must be a string.
When this Text object is rendered on a window,
the string (2nd argument) is drawn horizontally on the window,
centered at the rg.Point that is the 1st argument.
Preconditions:
:type center: rg.Point
:type text str
"""
super().__init__(center, tkinter.Canvas.create_text)
super()._initialize_options()
self.text = text
# FIXME: Allow __init__ to set the options.
def __repr__(self):
return "Text displaying '{}' at position {}".format(self.text,
self.center)
# FIXME: Have repr include characteristics??
# FIXME: Do a clone?
# def clone(self):
# return Square(self.center, self.length_of_each_side)
# def get_bounding_box(self):
# return Rectangle(self.center,
# 2 * self.length_of_each_side,
# 2 * self.length_of_each_side)
# FIXME: Implement bounding_box using the tkinter function for it.
def _get_coordinates_for_drawing(self):
return [self.center.x, self.center.y]
# Mark: Window/RoseWindow naming collision is causing mass confusion.
# class Window(_Shape):
# """ Not yet implemented. """
# default_options = {}
# CONSIDER: Are these right for here?
class Button(_Shape):
""" Not yet implemented. """
default_options = {}
class Entry(_Shape):
""" Not yet implemented. """
default_options = {}
class Color(object):
"""
A Color represents a fill or outline color created from custom
amounts of red, green, and blue light. The arguments are:
- The RED component (0-255),
- the GREEN component (0-255),
- the BLUE component (0-255).
This Color can be passed to RoseGraphics colors
such as fill_color and outline_color.
"""
def __init__(self, red, green=None, blue=None):
self.red = red
self.green = green
self.blue = blue
def __repr__(self):
return "#{:02x}{:02x}{:02x}".format(self.red, self.green, self.blue)
# begin STUB code for testing
class _RoseWindowStub(RoseWindow):
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
canvas_color = "white" # FIXME
self._is_closed = False
self.width = width
self.height = height
self.initial_canvas = _RoseCanvasStub(
self, width, height, canvas_color)
def render(self, seconds_to_pause=None):
pass
def get_next_mouse_click(self):
return Point(0, 0)
def close_on_mouse_click(self):
return None
def continue_on_mouse_click(self,
message=('To continue, ' +
'click anywhere in this window'),
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
return None
def _serialize_shapes(self):
"""
Returns a list of strings representing the shapes in sorted order.
"""
return _serialize_shapes(self)
class _RoseCanvasStub(RoseCanvas):
def __init__(self, window, width, height, canvas_color):
# super().__init__(window, width, height, canvas_color)
# canvases.append(self)
self.shapes = []
def _draw(self, shape):
# super()._draw(shape)
self.shapes.append(shape)
def render(self, seconds_to_pause=None):
# super().render() # don't pause
pass
class TurtleWindow(object):
def __init__(self):
self._screen = turtle.Screen()
turtle.Turtle._screen = self._screen
def close_on_mouse_click(self):
message = 'To exit, click anywhere in this window'
self.display_message(message, Point(0, 280))
self._screen.exitonclick()
# We may need the statement:
# turtle.TurtleScreen._RUNNING = True
# in case we open a subsequent TurtleWindow during this run.
# The turtle library seems not to allow for that possibility
# (it uses a CLASS variable _RUNNING where I would have expected
# an INSTANCE variable).
# The next statement appeared to have a visible effect
# (something flashed) but nothing worse. At time time
# it is commented-out, since we need only a single TurtleWindow.
# turtle.TurtleScreen._RUNNING = True
def display_message(self, message, point):
""" Displays the given message at the given Point. """
self._screen._canvas.create_text(point.x, point.y, text=message)
def delay(self, milliseconds=None):
self._screen.delay(milliseconds)
def tracer(self, n=None, delay=None):
self._screen.tracer(n, delay)
def update(self):
self._screen.update()
class ShapesWindow(RoseWindow):
pass
class SimpleTurtle(object):
"""
A SimpleTurtle is a Turtle with restricted (simpler) functionality.
It can move forward/backward (units are pixels), turn (spin)
left/right (units are degrees), and more.
To construct a SimpleTurtle, use:
rg.SimpleTurtle(shape)
where shape is OPTIONAL and can be any of: 'turtle'
'arrow' 'classic' 'square' 'circle' 'triangle' 'blank'
Instance variables include:
speed: An integer from 1 (slowest) to 10 (fastest) that
determines how fast the SimpleTurtle moves.
pen: an rg.Pen object (see example below) that determines
the color and thickness of the line
that the SimpleTurtle draws when moving
paint_bucket: an rg.PaintBucket object (see example below)
that determines the color with which the SimpleTurtle
"fills" shapes indicated by using the begin_fill and
end_fill methods.
Examples:
natacha = rg.SimpleTurtle()
natacha.forward(100)
boris = rg.SimpleTurtle('turtle')
boris.speed = 8
boris.pen = rg.Pen('blue', 5) # blue line 5 pixels thick
boris.paint_bucket = rg.PaintBucket('red')
# Moves with pen down, then with pen up, then with pen down again:
boris.left(90)
boris.forward(-300)
boris.pen_up()
boris.go_to(rg.Point(100, -50)
boris.pen_down()
boris.backward(75)
# Moves with the enclosed space "filled" with the paint_bucket
boris.begin_fill()
... movements ...
boris.end_fill()
"""
def __init__(self, shape='classic'):
"""
What comes in:
A turtle.Shape that determines how the Turtle looks.
Defaults to a Bitmap of the "classic" Turtle (an arrowhead) from
early Turtle Graphics. See above for other shapes that are allowed.
Side effects: Constructs and stores in self._turtle the "real" Turtle
to do all the work on behalf of this SimpleTurtle. This (purposely)
restricts what this SimpleTurtle knows and can do.
:type shape: str
"""
self.speed = 1
self.pen = Pen('black', 1)
self.paint_bucket = PaintBucket('black')
self._turtle = turtle.Turtle(shape)
self._update_real_turtle()
def forward(self, distance):
"""
Makes this SimpleTurtle go forward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.forward(200)
"""
self._update_real_turtle()
self._turtle.forward(distance)
def backward(self, distance):
"""
Makes this SimpleTurtle go backward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.backward(200)
"""
self._update_real_turtle()
self._turtle.backward(distance)
def left(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) left the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.left(45)
"""
self._update_real_turtle()
self._turtle.left(angle)
def right(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) right the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.right(45)
"""
self._update_real_turtle()
self._turtle.right(angle)
def go_to(self, point):
"""
Makes this SimpleTurtle go to the given rg.Point.
(0, 0) is at the center of the window.
Example (assuming sally is an rg.SimpleTurtle):
sally.go_to(rg.Point(100, -50))
"""
self._update_real_turtle()
self._turtle.goto(point.x, point.y)
def set_heading(self, to_angle):
self._update_real_turtle()
self._turtle.setheading(to_angle)
def draw_circle(self, radius):
"""
Makes this SimpleTurtle draw a circle with the given radius.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_circle(40)
"""
self._update_real_turtle()
self._turtle.circle(radius)
def draw_square(self, length_of_sides):
"""
Makes this SimpleTurtle draw a square with the given value
for the length of each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_square(100)
"""
for _ in range(4):
self.forward(length_of_sides)
self.left(90)
def draw_regular_polygon(self, number_of_sides, length_of_sides):
"""
Makes this SimpleTurtle draw a regular polygon with the given
number of sides and the given length for each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_polygon(8, 75) # octogon
sally.draw_polygon(3, 75) # triangle
"""
for _ in range(number_of_sides):
self.forward(length_of_sides)
self.left(360 / number_of_sides)
def pen_up(self):
"""
Lifts up this SimpleTurtle's pen. Subsequent movements
will NOT draw a line (until pen_down is called).
Example (assuming sally is an rg.SimpleTurtle):
sally.pen_up()
"""
self._update_real_turtle()
self._turtle.penup()
def pen_down(self):
"""
Puts down this SimpleTurtle's pen. Subsequent movements
WILL draw a line using this SimpleTurtle's pen (until pen_up
is called). Example (assuming sally is an rg.SimpleTurtle):
sally.pen_down()
"""
self._update_real_turtle()
self._turtle.pendown()
def x_cor(self):
"""
Returns the x-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
x = sally.x_cor()
"""
return self._turtle.xcor()
def y_cor(self):
"""
Returns the y-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
y = sally.y_cor()
"""
return self._turtle.ycor()
def begin_fill(self):
"""
Begins "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.begin_fill()
def end_fill(self):
"""
Completes "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.end_fill()
def clear(self):
""" Not yet implemented. """
def clone(self):
""" Not yet implemented. """
pass
def write_text(self):
""" Not yet implemented. """
pass
def _update_real_turtle(self):
self._turtle.pencolor(self.pen.color)
self._turtle.pensize(self.pen.thickness)
self._turtle.fillcolor(self.paint_bucket.color)
self._turtle.speed(self.speed)
class Pen(object):
"""
A Pen has a color and thickness.
SimpleTurtles use a Pen for drawing lines.
To construct a Pen, use:
rg.Pen(color, thickness)
where color is a color (e.g. 'red')
and thickness is a small positive integer.
Instance variables are:
color: The color of the Pen
thickness: The thickness of the Pen
Examples:
thick_blue = rg.Pen('blue', 14)
thin_red = rg.Pen('red', 1)
"""
def __init__(self, color, thickness):
self.thickness = thickness
self.color = color
class PaintBucket(object):
"""
A PaintBucket has a color.
SimpleTurtles use a PaintBucket for filling shapes with color.
To construct a PaintBucket, use:
rg.PaintBucket(color)
where color is a color (e.g. 'red').
Instance variables are:
color: The color of the PaintBucket
Example:
paint = rg.PaintBucket('green')
"""
def __init__(self, color):
self.color = color
# ----------------------------------------------------------------------
# At the risk of not being Pythonic, we provide a simple type-checking
# facility that attempts to provide meaningful error messages to
# students when they pass arguments that are not of the expected type.
# ----------------------------------------------------------------------
class WrongTypeException(Exception):
""" Not yet implemented. """
pass
def check_types(pairs):
""" Not yet implemented fully. """
for pair in pairs:
value = pair[0]
expected_type = pair[1]
if not isinstance(value, expected_type):
raise WrongTypeException(pair)
# ----------------------------------------------------------------------
# Serialization facility
# ----------------------------------------------------------------------
def _serialize_shapes(self):
""" Returns a list of strings representing the shapes in sorted order. """
# Idea: dump all the stats on all shapes,
# then return a sorted list for easy comparison.
# Problem: the order in which keys appear in dictionaries is random!
# Solution: sort keys and manually print
shapes = [shape.__dict__ for shape in self.initial_canvas.shapes]
keys_by_shape = [sorted(shape) for shape in shapes]
for k in range(len(shapes)):
shapes[k]['_method_for_drawing'] = None
shapes[k]['shape_id_by_canvas'] = None
result = []
for k in range(len(keys_by_shape)):
shape = shapes[k]
result.append([])
for key in keys_by_shape[k]:
result[-1].append(str(key) + ":" + str(shape[key]))
result[-1] = str(result[-1])
return "\n".join(sorted(result))
# FIXME (errors):
# -- clone() does not really make a copy; it just makes a new one
# but without cloning all the attributes.
# -- _ShapeWithCenter claims that things like Ellipse are subclasses,
# but they are not at this point, I think. In general, need to
# deal with overlap between _ShapeWithCenter and _RectangularShape.
# KEEP both of them to have some classes have corner_1 and corner_2
# while others have center and ...
# FIXME (things that have yet to be implemented):
# -- Allow multiple canvasses.
# -- Better close_on ... ala zellegraphics.
# -- Keyboard.
# -- Better Mouse.
# -- Add type hints.
# -- Catch all Exceptions and react appropriately.
# -- Implement unimplemented classes.
# -- Add and allow FortuneTellers and other non-canvas classes.
| 31.884875 | 85 | 0.597344 |
f9f2114d4ca03eae96b5dce76803bfc217d40654 | 11,185 | py | Python | plugin/hover.py | quangbuule/LSP | 3137a455ed04f8809bd8e85941786fb25826d1ea | [
"MIT"
] | null | null | null | plugin/hover.py | quangbuule/LSP | 3137a455ed04f8809bd8e85941786fb25826d1ea | [
"MIT"
] | null | null | null | plugin/hover.py | quangbuule/LSP | 3137a455ed04f8809bd8e85941786fb25826d1ea | [
"MIT"
] | null | null | null | import mdpopups
import sublime
import sublime_plugin
import webbrowser
import os
from html import escape
from .code_actions import actions_manager, run_code_action_or_command
from .code_actions import CodeActionOrCommand
from .core.configurations import is_supported_syntax
from .core.popups import popups
from .core.protocol import Request, DiagnosticSeverity, Diagnostic, DiagnosticRelatedInformation, Point
from .core.registry import session_for_view, LspTextCommand, windows
from .core.settings import client_configs, settings
from .core.typing import List, Optional, Any, Dict
from .core.views import text_document_position_params
from .diagnostics import filter_by_point, view_diagnostics
SUBLIME_WORD_MASK = 515
class HoverHandler(sublime_plugin.ViewEventListener):
def __init__(self, view: sublime.View) -> None:
self.view = view
@classmethod
def is_applicable(cls, view_settings: dict) -> bool:
if 'hover' in settings.disabled_capabilities:
return False
syntax = view_settings.get('syntax')
if syntax:
return is_supported_syntax(syntax, client_configs.all)
else:
return False
def on_hover(self, point: int, hover_zone: int) -> None:
if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():
return
self.view.run_command("lsp_hover", {"point": point})
_test_contents = [] # type: List[str]
class_for_severity = {
DiagnosticSeverity.Error: 'errors',
DiagnosticSeverity.Warning: 'warnings',
DiagnosticSeverity.Information: 'info',
DiagnosticSeverity.Hint: 'hints'
}
class GotoKind:
__slots__ = ("lsp_name", "label", "subl_cmd_name")
def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:
self.lsp_name = lsp_name
self.label = label
self.subl_cmd_name = subl_cmd_name
goto_kinds = [
GotoKind("definition", "Definition", "definition"),
GotoKind("typeDefinition", "Type Definition", "type_definition"),
GotoKind("declaration", "Declaration", "declaration"),
GotoKind("implementation", "Implementation", "implementation")
]
class LspHoverCommand(LspTextCommand):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._base_dir = None # type: Optional[str]
def is_likely_at_symbol(self, point: int) -> bool:
word_at_sel = self.view.classify(point)
return bool(word_at_sel & SUBLIME_WORD_MASK)
def run(self, edit: sublime.Edit, point: Optional[int] = None) -> None:
hover_point = point or self.view.sel()[0].begin()
self._base_dir = windows.lookup(self.view.window()).get_project_path(self.view.file_name() or "")
self._hover = None # type: Optional[Any]
self._actions_by_config = {} # type: Dict[str, List[CodeActionOrCommand]]
self._diagnostics_by_config = {} # type: Dict[str, List[Diagnostic]]
if self.is_likely_at_symbol(hover_point):
self.request_symbol_hover(hover_point)
self._diagnostics_by_config = filter_by_point(view_diagnostics(self.view),
Point(*self.view.rowcol(hover_point)))
if self._diagnostics_by_config:
self.request_code_actions(hover_point)
self.request_show_hover(hover_point)
def request_symbol_hover(self, point: int) -> None:
# todo: session_for_view looks up windowmanager twice (config and for sessions)
# can we memoize some part (eg. where no point is provided?)
session = session_for_view(self.view, 'hoverProvider', point)
if session:
document_position = text_document_position_params(self.view, point)
if session.client:
session.client.send_request(
Request.hover(document_position),
lambda response: self.handle_response(response, point))
def request_code_actions(self, point: int) -> None:
actions_manager.request(self.view, point, lambda response: self.handle_code_actions(response, point),
self._diagnostics_by_config)
def handle_code_actions(self, responses: Dict[str, List[CodeActionOrCommand]], point: int) -> None:
self._actions_by_config = responses
self.request_show_hover(point)
def handle_response(self, response: Optional[Any], point: int) -> None:
self._hover = response
self.request_show_hover(point)
def symbol_actions_content(self) -> str:
actions = []
for goto_kind in goto_kinds:
if self.has_client_with_capability(goto_kind.lsp_name + "Provider"):
actions.append("<a href='{}'>{}</a>".format(goto_kind.lsp_name, goto_kind.label))
if self.has_client_with_capability('referencesProvider'):
actions.append("<a href='{}'>{}</a>".format('references', 'References'))
if self.has_client_with_capability('renameProvider'):
actions.append("<a href='{}'>{}</a>".format('rename', 'Rename'))
return "<p class='actions'>" + " | ".join(actions) + "</p>"
def format_diagnostic_related_info(self, info: DiagnosticRelatedInformation) -> str:
file_path = info.location.file_path
if self._base_dir and file_path.startswith(self._base_dir):
file_path = os.path.relpath(file_path, self._base_dir)
location = "{}:{}:{}".format(file_path, info.location.range.start.row+1, info.location.range.start.col+1)
return "<a href='location:{}'>{}</a>: {}".format(location, location, escape(info.message))
def format_diagnostic(self, diagnostic: 'Diagnostic') -> str:
diagnostic_message = escape(diagnostic.message, False).replace('\n', '<br>')
related_infos = [self.format_diagnostic_related_info(info) for info in diagnostic.related_info]
related_content = "<pre class='related_info'>" + "<br>".join(related_infos) + "</pre>" if related_infos else ""
if diagnostic.source:
return "<pre class=\"{}\">[{}] {}{}</pre>".format(class_for_severity[diagnostic.severity],
diagnostic.source, diagnostic_message, related_content)
else:
return "<pre class=\"{}\">{}{}</pre>".format(class_for_severity[diagnostic.severity], diagnostic_message,
related_content)
def diagnostics_content(self) -> str:
formatted = []
for config_name in self._diagnostics_by_config:
by_severity = {} # type: Dict[int, List[str]]
formatted.append("<div class='diagnostics'>")
for diagnostic in self._diagnostics_by_config[config_name]:
by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))
for severity, items in by_severity.items():
formatted.append("<div>")
formatted.extend(items)
formatted.append("</div>")
if config_name in self._actions_by_config:
action_count = len(self._actions_by_config[config_name])
if action_count > 0:
formatted.append("<div class=\"actions\"><a href='{}:{}'>{} ({})</a></div>".format(
'code-actions', config_name, 'Code Actions', action_count))
formatted.append("</div>")
return "".join(formatted)
def hover_content(self) -> str:
contents = [] # type: List[Any]
if isinstance(self._hover, dict):
response_content = self._hover.get('contents')
if response_content:
if isinstance(response_content, list):
contents = response_content
else:
contents = [response_content]
formatted = []
for item in contents:
value = ""
language = None
if isinstance(item, str):
value = item
else:
value = item.get("value")
language = item.get("language")
if language:
formatted.append("```{}\n{}\n```\n".format(language, value))
else:
formatted.append(value)
if formatted:
frontmatter_config = mdpopups.format_frontmatter({'allow_code_wrap': True})
return mdpopups.md2html(self.view, frontmatter_config + "\n".join(formatted))
return ""
def request_show_hover(self, point: int) -> None:
sublime.set_timeout(lambda: self.show_hover(point), 50)
def show_hover(self, point: int) -> None:
contents = self.diagnostics_content() + self.hover_content()
if contents and settings.show_symbol_action_links:
contents += self.symbol_actions_content()
_test_contents.clear()
_test_contents.append(contents) # for testing only
if contents:
mdpopups.show_popup(
self.view,
contents,
css=popups.stylesheet,
md=False,
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point,
wrapper_class=popups.classname,
max_width=800,
on_navigate=lambda href: self.on_hover_navigate(href, point))
def on_hover_navigate(self, href: str, point: int) -> None:
for goto_kind in goto_kinds:
if href == goto_kind.lsp_name:
self.run_command_from_point(point, "lsp_symbol_" + goto_kind.subl_cmd_name)
return
if href == 'references':
self.run_command_from_point(point, "lsp_symbol_references")
elif href == 'rename':
self.run_command_from_point(point, "lsp_symbol_rename")
elif href.startswith('code-actions'):
_, config_name = href.split(":")
titles = [command["title"] for command in self._actions_by_config[config_name]]
sel = self.view.sel()
sel.clear()
sel.add(sublime.Region(point, point))
self.view.show_popup_menu(titles, lambda i: self.handle_code_action_select(config_name, i))
elif href.startswith('location'):
_, file_path, location = href.split(":", 2)
file_path = os.path.join(self._base_dir, file_path) if self._base_dir else file_path
window = self.view.window()
if window:
window.open_file(file_path + ":" + location, sublime.ENCODED_POSITION | sublime.TRANSIENT)
else:
webbrowser.open_new_tab(href)
def handle_code_action_select(self, config_name: str, index: int) -> None:
if index > -1:
selected = self._actions_by_config[config_name][index]
run_code_action_or_command(self.view, config_name, selected)
def run_command_from_point(self, point: int, command_name: str, args: Optional[Any] = None) -> None:
sel = self.view.sel()
sel.clear()
sel.add(sublime.Region(point, point))
self.view.run_command(command_name, args)
| 42.528517 | 119 | 0.630755 |
4d1e5fc06539122ad7e47b936e473d1e4b92bc67 | 2,122 | py | Python | src/server.py | sawshep/spit-legacy | 9a2838aeef453e84a087df971970f513ee5df6bf | [
"MIT"
] | 2 | 2020-03-24T23:32:25.000Z | 2020-03-25T04:03:48.000Z | src/server.py | sawshep/spit-legacy | 9a2838aeef453e84a087df971970f513ee5df6bf | [
"MIT"
] | 1 | 2020-03-14T03:19:27.000Z | 2020-04-01T19:33:33.000Z | src/server.py | sawshep/spit-legacy | 9a2838aeef453e84a087df971970f513ee5df6bf | [
"MIT"
] | null | null | null | '''server.py
This module holds the Server class,
which is used to establish connection between 2 clients using TCP sockets.'''
# From Python standard library
import threading
import pickle
import socket
# My modules
import config
import gamedata
class Server:
'''TCP socket server for game clients to connect to.'''
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind(('', config.SERVER_PORT))
#This is a dictionary of connected client socket objects
self.clients = {0: None, 1: None}
self.ready = False
self.deck = gamedata.make_deck()
self.listen()
def listen(self):
'''Listens for up to 2 connections and creates a new I/O thread for each one.'''
self.socket.listen(2)
print('Open to connections...')
while True:
client, address = self.socket.accept()
client_id = 0 if not self.clients[0] else 1
client.send(pickle.dumps(client_id))
print(f'Sent ID to {address}')
client.send(pickle.dumps(self.deck))
print(f'Sent deck to {address}')
self.clients[client_id] = client
print(f'{address} connected to the server')
threading.Thread(target=self.io_thread, args=(client, address, client_id,)).start()
# The idea for threading input/output for each connected user came from the official socket documentation
def io_thread(self, client, address, client_id):
'''Controls the I/O of information for each client socket, and handles disconnects'''
connected = True
while connected:
if self.clients[int(not client_id)]:
try:
data = client.recv(2048)
if data:
self.clients[int(not client_id)].send(data)
else:
connected = False
except:
connected = False
client.close()
self.clients[client_id] = None
print(f'{address} disconnected')
Server()
| 35.966102 | 109 | 0.604618 |
3f6fa63a08233d8ddb817c891f813882cf245e76 | 254 | py | Python | setup.py | awans/k | 9327d89d246855c3daa6d470304089e08a93df5f | [
"MIT"
] | 4 | 2016-10-09T03:03:07.000Z | 2020-07-03T09:25:16.000Z | setup.py | awans/k | 9327d89d246855c3daa6d470304089e08a93df5f | [
"MIT"
] | null | null | null | setup.py | awans/k | 9327d89d246855c3daa6d470304089e08a93df5f | [
"MIT"
] | 1 | 2022-03-29T01:11:43.000Z | 2022-03-29T01:11:43.000Z | from setuptools import setup
setup(
name="key",
version="0.4",
description="Short getters for python",
url="https://github.com/awans/key",
author="Andrew Wansley",
author_email="andrew.wansley@gmail.com",
license="MIT",
packages=["k"]
)
| 19.538462 | 42 | 0.685039 |
97d1757a9cefd59c24d8bef33d54543f0b1c0311 | 1,524 | py | Python | setup.py | pituganov/telelog | b80ad94c77297eb8a0004bfd0f018ad3584fe32d | [
"MIT"
] | 3 | 2019-08-27T12:35:52.000Z | 2020-06-03T14:42:46.000Z | setup.py | pituganov/telelog | b80ad94c77297eb8a0004bfd0f018ad3584fe32d | [
"MIT"
] | null | null | null | setup.py | pituganov/telelog | b80ad94c77297eb8a0004bfd0f018ad3584fe32d | [
"MIT"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='telelog',
version='0.1.2',
description='Extension for tqdm progressbar in Telegram',
license='MPLv2.0, MIT Licences',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ermakovpetr/tg_tqdm',
author='Petr Ermakov',
author_email='ermakov.pd+github@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='progressbar progressmeter progress bar meter'
' rate eta console terminal time telegram',
packages=['telelog'] + ['telelog.' + i for i in find_packages('telelog')],
install_requires=['tqdm', 'telepot', 'python-dotenv'],
project_urls={
'Source': 'https://github.com/ermakovpetr/tg_tqdm/',
},
)
| 33.866667 | 78 | 0.656824 |
0e239ac5b5a90f5ef7e3933ebc2797e64439856b | 553 | py | Python | tests/test_points.py | annettekurian/python-taiga | 5d5897fe3be01f06d434d649bc7dd7dc76fe28a1 | [
"MIT"
] | null | null | null | tests/test_points.py | annettekurian/python-taiga | 5d5897fe3be01f06d434d649bc7dd7dc76fe28a1 | [
"MIT"
] | 1 | 2018-05-27T11:37:47.000Z | 2018-05-27T11:41:49.000Z | tests/test_points.py | annettekurian/python-taiga | 5d5897fe3be01f06d434d649bc7dd7dc76fe28a1 | [
"MIT"
] | null | null | null | import unittest
from mock import patch
from taiga.models import Point, Points
from taiga.requestmaker import RequestMaker
class TestPoints(unittest.TestCase):
@patch('taiga.models.base.ListResource._new_resource')
def test_create_point(self, mock_new_resource):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
mock_new_resource.return_value = Point(rm)
Points(rm).create(1, 'Point 1', 4)
mock_new_resource.assert_called_with(
payload={'project': 1, 'name': 'Point 1', 'value': 4}
)
| 29.105263 | 65 | 0.688969 |
4b91ed327f29db2328645585185d874ed4c29417 | 118,112 | py | Python | OpenModal/gui/widgets/geometry.py | MonashSmartStructures/OpenModal | a76a258c420954eab4a8b4ef37b487616c9f6c62 | [
"CNRI-Python"
] | 85 | 2016-12-04T10:34:08.000Z | 2022-03-26T18:03:47.000Z | OpenModal/gui/widgets/geometry.py | MonashSmartStructures/OpenModal | a76a258c420954eab4a8b4ef37b487616c9f6c62 | [
"CNRI-Python"
] | 55 | 2016-12-02T15:01:15.000Z | 2022-01-07T11:10:26.000Z | OpenModal/gui/widgets/geometry.py | gusshmn/OpenModal | a76a258c420954eab4a8b4ef37b487616c9f6c62 | [
"CNRI-Python"
] | 51 | 2016-12-30T16:33:36.000Z | 2021-11-13T11:05:34.000Z |
# Copyright (C) 2014-2017 Matjaž Mršnik, Miha Pirnat, Janko Slavič, Blaž Starc (in alphabetic order)
#
# This file is part of OpenModal.
#
# OpenModal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# OpenModal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenModal. If not, see <http://www.gnu.org/licenses/>.
from OpenModal.gui.widgets.animation import TableModel,Model
from PyQt5 import QtCore, QtGui,QtWidgets
import pyqtgraph as pg
import numpy as np
from numpy.core.umath_tests import inner1d
import time
import pandas as pd
from pyqtgraph.parametertree import Parameter, ParameterTree
from OpenModal.anim_tools import AnimWidgBase
import os
from OpenModal.keys import keys
import qtawesome as qta
from OpenGL.GL import *
from functools import partial
from OpenModal.gui.templates import COLOR_PALETTE, LIST_FONT_FAMILY, LIST_FONT_SIZE, MENUBAR_WIDTH
from string import Template
SHADER='OpenModal'
GLOPTS= {
GL_DEPTH_TEST: True,
GL_BLEND: False,
GL_ALPHA_TEST: False,
GL_CULL_FACE: False}
#'glLightModeli':(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)}
SMOOTH=True
COMPUTENORMALS=True
DRAW_EDGES_NODES=False
DRAW_EDGES_ELEMENTS=True
DRAW_EDGES_GCS=False
# ## Switch to using white background and black foreground
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
class CustomQTableView(QtWidgets.QTableView):
def __init__(self,parent):
super(self.__class__, self).__init__(parent)
self.catch=False #for catching right/left arrow keypress events in editor mode
self.keys = [QtCore.Qt.Key_Left,
QtCore.Qt.Key_Right]
def focusInEvent(self, event):
self.catch = False
return QtWidgets.QTableView.focusInEvent(self, event)
def focusOutEvent(self, event):
self.catch = True
return QtWidgets.QTableView.focusOutEvent(self, event)
def event(self, event):
if self.catch and event.type() == QtCore.QEvent.KeyRelease and event.key() in self.keys:
self._moveCursor(event.key())
return QtWidgets.QTableView.event(self,event)
def keyPressEvent(self, event):
if not self.catch:
return QtWidgets.QTableView.keyPressEvent(self, event)
self._moveCursor(event.key())
def _moveCursor(self, key):
row = self.currentIndex().row()
col = self.currentIndex().column()
if key == QtCore.Qt.Key_Left and col > 0:
col -= 1
elif key == QtCore.Qt.Key_Right and col < (self.model().columnCount()-1):
col += 1
elif key == QtCore.Qt.Key_Up and row > 0:
row -= 1
elif key == QtCore.Qt.Key_Down and row < (self.model().rowCount()-1):
row += 1
else:
return
self.setCurrentIndex(self.model().createIndex(row,col))
self.edit(self.currentIndex())
def mousePressEvent(self,event):
"""
Reimplement mousePressEvent in order to deselect rows when clicking into blank space
"""
if self.indexAt(event.pos()).isValid():
super(self.__class__, self).mousePressEvent(event)
else:
#user has clicked into blank space...clear selection and send signal
self.selectionModel().clearSelection()
class GeometryWidget(AnimWidgBase):
# def __init__(self, modaldata_object,status_bar,language, preferences=dict(), desktop_widget=None, parent=None):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setContentsMargins(0, 0, 0, 0)
self.selection=[] # list of nodes clicked with mouse in 3d view
self.selection_ind=[] # list of indicies of selected nodes
self.selection_color=[] # original model color
self.selected_elem_ids=[] #element ids of elements selected in element table view
self.selected_elem_col=None #color of selected elements
self.activated_models=[] # model_ids of currently activated models
#default widget mode!
self.widget_mode = 'nodes'
#connect cliked signal from 3d view
self.model_view.clicked_signal.clicked.connect(self.mouse_clicked)
def color_selected_node(self,test,nodes_index,nodes):
#index of the node in the original dataframe
ind=nodes_index[test]
#check if node was already selected, if it was...deselect it
if ind in self.selection_ind:
#index already selected -> deselect it
loc=self.selection_ind.index(ind)
del self.selection_ind[loc]
del self.selection[loc]
#if ind already in selection -> set default color
self.modaldata.tables['geometry'].ix[ind, 'clr_r']=self.selection_color[0]
self.modaldata.tables['geometry'].ix[ind, 'clr_g']=self.selection_color[1]
self.modaldata.tables['geometry'].ix[ind, 'clr_b']=self.selection_color[2]
self.modaldata.tables['geometry'].ix[ind, 'clr_a']=self.selection_color[3]
else:
#index not yet selected -> add it to selection
self.selection.append(nodes.iloc[test][['node_nums','x','y','z','model_id','color']].values[0])
self.selection_ind.append(ind)
self.selection_color=[self.modaldata.tables['geometry'].ix[ind, 'clr_r'].values[0],
self.modaldata.tables['geometry'].ix[ind, 'clr_g'].values[0],
self.modaldata.tables['geometry'].ix[ind, 'clr_b'].values[0],
self.modaldata.tables['geometry'].ix[ind, 'clr_a'].values[0]]
self.modaldata.tables['geometry'].ix[ind, 'clr_r']=1
self.modaldata.tables['geometry'].ix[ind, 'clr_g']=0
self.modaldata.tables['geometry'].ix[ind, 'clr_b']=0
self.modaldata.tables['geometry'].ix[ind, 'clr_a']=1
def handle_node_clicked(self):
'''
Check if click was near a node, if it was then add it to selection, if coincident with previously selected node,
deselect it. Also node is colored if selected.
:return:
'''
#get cube size for determening selection sphere size
for model_id, model_obj in self.models.items():
if model_obj.activated:
cube_scale, lcs_scale=model_obj.get_cube_and_lcs_scale()
#look only among activated models
act_mod=[]
for model_id, model_obj in self.models.items():
if model_obj.activated:
act_mod.append(model_obj.model_id)
nodes=self.modaldata.tables['geometry'][self.modaldata.tables['geometry']['x'].notnull()]
nodes=nodes[nodes['model_id'].isin(act_mod)]
nodes_index=nodes.index.values
ind=-1
node_data=nodes.ix[:,['x','y','z']].values
# CHECK if nodes are near clicked point
start_point=self.model_view.ray[0] #get ray data from 3d view widget
ray_dir=self.model_view.ray[1]
#sel_sph_r=0.05 # selection sphere radius
sel_sph_r=cube_scale*3
aux_1=-node_data+start_point
aux_1=aux_1.astype(np.float64)
b=inner1d(ray_dir,aux_1)
c=inner1d(aux_1,aux_1)-sel_sph_r**2
#get boolean array - true means that node is under mouse
test=(b**2-c)>=0
#check for coincident nodes!
coincident_nodes=np.sum(test)-1 # =0 if only one node clicked, >0 if multiple nodes clicked
if coincident_nodes==0:
self.color_selected_node(test,nodes_index,nodes)
self.plot_activated_models()
elif coincident_nodes>0:
#TODO: handle this!
print('multiple nodes clicked! - NOT HANDLED YET')
elif coincident_nodes==-1:
#TODO: handle this!
print('blank space clicked')
def clear_node_selection(self):
"""
Clear selected nodes and restore node colors to default
:return:
"""
self.selection=[]
for ind in self.selection_ind:
self.modaldata.tables['geometry'].ix[ind, 'clr_r']=self.selection_color[0]
self.modaldata.tables['geometry'].ix[ind, 'clr_g']=self.selection_color[1]
self.modaldata.tables['geometry'].ix[ind, 'clr_b']=self.selection_color[2]
self.modaldata.tables['geometry'].ix[ind, 'clr_a']=self.selection_color[3]
self.selection_ind=[]
self.selection_color=[]
self.plot_activated_models()
def mouse_clicked(self):
"""
For 2D plot cross hair selection
:param evt:
:return:
"""
#only select nodes if widget is not in geometry mode
if self.widget_mode!='nodes':
self.handle_node_clicked()
if self.widget_mode=='lines':
nr_of_nodes=2
if len(self.selection)==nr_of_nodes:
self.addElement(nr_of_nodes)
self.clear_node_selection()
if self.widget_mode=='elements':
nr_of_nodes=3
if len(self.selection)==nr_of_nodes:
self.addElement(nr_of_nodes)
self.clear_node_selection()
def addElement(self,nr_of_nodes):
"""
Add selection data to modal_data object as new element
:return:
"""
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
node_ind=0
element_id=1
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
node_ind= self.modaldata.tables['elements_values'].index.max() + 1
#store data data from selection
#store element
model_id=self.selection[0][4]
def get_color(id,elem_type):
for model_id, model_obj in self.models.items():
if model_id==id:
if elem_type=='triangle':
color=model_obj.cur_triangle_color
elif elem_type=='line':
color=model_obj.cur_line_color
return color
aux_color=self.selection[0][5]
if nr_of_nodes==3:
element_descriptor='triangle'
color=get_color(model_id,element_descriptor)
self.modaldata.tables['elements_index'].loc[ind]=[model_id, element_id, element_descriptor, aux_color, nr_of_nodes,
color.red() / 255., color.green() / 255., color.blue() / 255., color.alpha() / 255.]
#store nodes
for i in range(nr_of_nodes):
node_id=self.selection[i][0]
node_pos=i
self.modaldata.tables['elements_values'].loc[node_ind]=[model_id, element_id, node_id, node_pos]
node_ind=node_ind+1
if nr_of_nodes==2:
element_descriptor='line'
color=get_color(model_id,element_descriptor)
self.modaldata.tables['elements_index'].loc[ind]=[model_id, element_id, element_descriptor, aux_color, nr_of_nodes,
color.red() / 255., color.green() / 255., color.blue() / 255., color.alpha() / 255.]
#store nodes
for i in range(nr_of_nodes):
node_id=self.selection[i][0]
node_pos=i
self.modaldata.tables['elements_values'].loc[node_ind]=[model_id, element_id, node_id, node_pos]
node_ind=node_ind+1
## for line the third node is same as second
#update table model
self.populate_elem_table_view([model_id])
def delete_selection_aux(self):
"""
Delete selection in table view via context menu
:return:
"""
if self.gcs_type==0:
self.delete_selection(self.geom_table_view,self.geom_table_model)
if self.gcs_type==1:
self.delete_selection(self.cyl_geom_table_view,self.cyl_geom_table_model)
def delete_selection(self,geom_table_view,geom_table_model):
if self.widget_mode=='nodes':
cells=geom_table_view.selectedIndexes()
cells.sort()
# start index is where first cell is selected (caution: table view shows only a view into table model,
# selections indexes are relative to current view!)
curr_row=cells[0].model().datatable.index.values[0]
cols=[]
rows=[]
for cell in cells:
rows.append(curr_row+cell.row())
cols.append(cells[0].model().datatable.columns[cell.column()])
geom_table_model.datatable.ix[rows,cols]=np.nan
geom_table_model.dataIn.ix[rows,cols]=np.nan # this is necessary as update method does not work with NANs
geom_table_model.dataIn.update(geom_table_model.datatable)
geom_table_model.dataChanged.emit(geom_table_model.createIndex(0, 0),
geom_table_model.createIndex(geom_table_model.rowCount(0),
geom_table_model.columnCount(0)))
geom_table_model.layoutChanged.emit()
if self.widget_mode=='lines' or self.widget_mode=='elements':
rows=self.elem_table_view.selectionModel().selectedRows()
rows.sort()
el_id_list=[]
for row in rows:
el_id_list.append(self.elem_table_model.datatable['element_id'].iloc[[row.row()]].values[0])
element_id_mask=self.modaldata.tables['elements_values']['element_id'].isin(el_id_list)
self.modaldata.tables['elements_values'].drop(self.modaldata.tables['elements_values']['element_id'].index[element_id_mask], inplace=True)
element_id_mask=self.elem_table_model.datatable['element_id'].isin(el_id_list)
self.elem_table_model.datatable.drop(self.elem_table_model.datatable['element_id'].index[element_id_mask],inplace=True) # change stuff in GUI
self.elem_table_model.dataIn.update(self.elem_table_model.datatable)
element_id_mask=self.elem_table_model.dataIn['element_id'].isin(el_id_list)
self.elem_table_model.dataIn.drop(self.elem_table_model.dataIn['element_id'].index[element_id_mask],inplace=True) # change stuff directly in modal data obj
#PyQt
self.elem_table_model.dataChanged.emit(self.elem_table_model.createIndex(0, 0),
self.elem_table_model.createIndex(self.elem_table_model.rowCount(0),
self.elem_table_model.columnCount(0)))
self.elem_table_model.layoutChanged.emit()
def copy_selection(self):
if self.gcs_type==0:
cells=self.geom_table_view.selectedIndexes()
elif self.gcs_type==1:
cells=self.cyl_geom_table_view.selectedIndexes()
cells.sort()
curr_row=cells[0].row()
text=''
for cell in cells:
if len(text)==0:
text=str(cell.data())
else:
if cell.row()!=curr_row:
#text=text+' \\n '
text=text+os.linesep # os independent newline seperator
curr_row=curr_row+1
else:
text=text+'\t'
text=text+str(cell.data())
QtCore.QCoreApplication.instance().clipboard().setText(text)
def paste_selection(self):
text=QtCore.QCoreApplication.instance().clipboard().text()
lines=text.splitlines()
if self.gcs_type==0:
cells=self.geom_table_view.selectedIndexes()
elif self.gcs_type==1:
cells=self.cyl_geom_table_view.selectedIndexes()
cells.sort()
# start index is where first cell is selected (caution: table view shows only a view into table model,
# selections indexes are relative to current view!)
curr_row=cells[0].model().datatable.index.values[0]+cells[0].row()
curr_col=cells[0].column()
# get selection dimensions
num_of_cols=len(lines[0].split('\t'))
num_of_rows=len(lines)
# expand table if number of rows in clipboard is larger than current table size
for model_id in self.activated_models:
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_index=self.modaldata.tables['geometry'].ix[model_mask].index
if (curr_row+num_of_rows)>len(node_index):
# add rows for selected model
rows_to_add=curr_row+num_of_rows-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# duplicate stuff from clipboard based on the selection size
# we want to copy rows
if num_of_cols==(cells[-1].column()-cells[0].column()+1):
copy_rows=(cells[-1].row()-cells[0].row()+1)/num_of_rows
if copy_rows>1:
lines=lines*np.floor(copy_rows)
# we want to copy columns
elif num_of_rows==(cells[-1].row()-cells[0].row()+1):
copy_cols=(cells[-1].column()-cells[0].column()-num_of_cols+1)/num_of_cols
if copy_cols>0:
lines=[(i+('\t'+i)*np.floor(copy_cols)) for i in lines]
for line in lines:
data=line.split('\t')
for val in data:
if val=='':
#skip empty cell
curr_col=curr_col+1
else:
try:
if self.gcs_type==0:
self.geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val))
if self.gcs_type==1:
self.cyl_geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val))
except ValueError:
if self.gcs_type==0:
self.geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val.replace(',', '.')))
if self.gcs_type==1:
self.cyl_geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val.replace(',', '.')))
curr_col=curr_col+1
curr_col=cells[0].column() #restart column index
curr_row=curr_row+1
if self.gcs_type==0:
self.geom_table_model.dataIn.update(self.geom_table_model.datatable)
self.geom_table_model.dataChanged.emit(self.geom_table_model.createIndex(0, 0),
self.geom_table_model.createIndex(self.geom_table_model.rowCount(0),
self.geom_table_model.columnCount(0)))
self.geom_table_model.layoutChanged.emit()
if self.gcs_type==1:
self.cyl_geom_table_model.dataIn.update(self.geom_table_model.datatable)
self.cyl_geom_table_model.dataChanged.emit(self.cyl_geom_table_model.createIndex(0, 0),
self.cyl_geom_table_model.createIndex(self.cyl_geom_table_model.rowCount(0),
self.cyl_geom_table_model.columnCount(0)))
self.cyl_geom_table_model.layoutChanged.emit()
def keyPressEvent(self,evt):
"""
Catch Ctrl+C and Ctrl+V to handle copying from clipboard
Catch Delete to delete values in selected cells
:param evt:
:return:
"""
if evt.key()==QtCore.Qt.Key_C and evt.modifiers()==QtCore.Qt.ControlModifier:
self.copy_selection()
if evt.key()==QtCore.Qt.Key_V and evt.modifiers()==QtCore.Qt.ControlModifier:
self.paste_selection()
if evt.key()==QtCore.Qt.Key_Delete:
self.delete_selection_aux()
super(self.__class__,self).keyPressEvent(evt)
def create_toolbar_actions(self):
super(self.__class__,self).create_toolbar_actions()
self.act_new_model = QtWidgets.QAction('New model', self,
statusTip='Create new model', triggered=self.new_model)
self.act_delete_model = QtWidgets.QAction('Delete model', self,
statusTip='Delete model', triggered=self.delete_model_dialog)
self.act_nodes_mode = QtWidgets.QAction('Nodes', self,
statusTip='Geometry input mode', triggered=self.nodes_data_mode)
self.act_lines_mode = QtWidgets.QAction('Lines', self,
statusTip='Lines input mode', triggered=self.lines_data_mode)
self.act_elements_mode = QtWidgets.QAction('Elements', self,
statusTip='Elements input mode', triggered=self.elements_data_mode)
def create_model_view_actions(self):
super(self.__class__,self).create_model_view_actions()
self.elem_desel_act = QtWidgets.QAction('Deselect elements', self, checkable=False,
statusTip='Clear element selection', triggered=partial(self.handle_elem_select,True))
def nodes_data_mode(self):
self.elem_table_view.hide()
if self.gcs_type==0:
self.geom_table_view.show()
self.cyl_geom_table_view.hide()
#cartesian gcs
self.geom_table_model.update(self.modaldata.tables['geometry'], self.activated_models, self.fields)
elif self.gcs_type==1:
self.cyl_geom_table_view.show()
self.geom_table_view.hide()
#cylindrical csys
self.cyl_geom_table_model.update(self.modaldata.tables['geometry'], self.activated_models, self.cyl_fields)
self.widget_mode = 'nodes'
self._button3.setChecked(True)
self._button6.setChecked(False)
self._button4.setChecked(False)
def lines_data_mode(self):
self.elem_table_view.show()
self.geom_table_view.hide()
self.cyl_geom_table_view.hide()
self.widget_mode = 'lines'
self._button3.setChecked(False)
self._button6.setChecked(True)
self._button4.setChecked(False)
def elements_data_mode(self):
self.elem_table_view.show()
self.geom_table_view.hide()
self.cyl_geom_table_view.hide()
self.widget_mode = 'elements'
self._button3.setChecked(False)
self._button6.setChecked(False)
self._button4.setChecked(True)
def model_view_context_menu(self, pos):
menu = QtWidgets.QMenu()
menu.addAction(self.act_fit_view)
menu.addAction(self.elem_desel_act)
display_menu = menu.addMenu('Display')
display_menu.addAction(self.plot_nodes_act)
display_menu.addAction(self.plot_lines_act)
display_menu.addAction(self.plot_elements_act)
display_menu.addAction(self.plot_node_lcs_act)
display_menu.addAction(self.plot_node_labels_act)
display_menu.addAction(self.plot_gcs_act)
#display_menu.addMenu('Trace lines')
color_menu = menu.addMenu('Colors')
color_menu.addAction(self.node_color_act)
color_menu.addAction(self.line_color_act)
color_menu.addAction(self.elem_color_act)
csys_menu = menu.addMenu('Change csys')
csys_menu.addAction(self.cart_csys_act)
csys_menu.addAction(self.cyl_csys_act)
menu.exec_(QtGui.QCursor.pos())
def paintEvent(self, event):
# button sizes
w = 140 #this is overridden by css
h = 33
border_thk=1
# app window size
window_width=self.rect().width()
window_height=self.rect().height()
# global positioning of buttons
x_margin=20
x = (window_width - w - x_margin-2*border_thk)
y = 0.2*window_height
offset=h+5
# relative positioning of buttons
self._button.setGeometry(x,y,w,h)
self._button5.setGeometry(x,y+offset,w,h)
self._button2.setGeometry(x,y+2*offset,w,h)
self._b_geom_prim.setGeometry(x,y+3*offset,w,h)
# positioning of elements/geometry table
table_width=window_width*0.6
table_height=window_height*0.3
table_x=window_width/2-table_width/2
table_y=0.68*window_height
x_btn=window_width/2-1.5*w-5
y_btn=40
self._button3.setGeometry(x_btn,table_y-y_btn,w,h)
self._button6.setGeometry(x_btn+w+5,table_y-y_btn,w,h)
self._button4.setGeometry(x_btn+2*w+10,table_y-y_btn,w,h)
self.cyl_geom_table_view.setGeometry(table_x,table_y,table_width,table_height)
self.cyl_geom_table_view.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.geom_table_view.setGeometry(table_x,table_y,table_width,table_height)
self.geom_table_view.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.elem_table_view.setGeometry(table_x,table_y,table_width,table_height)
self.elem_table_view.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
# selected model label
#self._label.setGeometry(window_width/2-self._label.width()/2,0.1*window_height,200,20)
# create buttons for available models
offset=0
x = x_margin
y = 0.2*window_height
for model_id,button in self.model_buttons.items():
width = self._button.width()+20
height = self._button.height()
button.setGeometry(x,y+offset,width,height)
offset=offset+height+5
def table_view_context_menu(self, pos):
menu = QtWidgets.QMenu()
menu.addAction(self.act_delete)
menu.addAction(self.act_copy)
menu.addAction(self.act_paste)
menu.addAction(self.elem_desel_act)
menu.addAction(self.add_rows_act)
menu.exec_(QtGui.QCursor.pos())
def model_btn_context_menu(self, pos):
#get model button which was right clicked
self.sending_button = self.sender()
menu = QtWidgets.QMenu()
menu.addAction(self.act_model_rename)
menu.exec_(QtGui.QCursor.pos())
def model_btn_context_menu_act(self):
self.act_model_rename = QtWidgets.QAction('Rename', self, statusTip='Rename model', triggered=self.rename_model)
def context_menu_act(self):
self.act_delete = QtWidgets.QAction('Delete', self, statusTip='Delete selection', triggered=self.delete_selection_aux)
self.act_copy = QtWidgets.QAction('Copy', self, statusTip='Copy selection', triggered=self.copy_selection)
self.act_paste = QtWidgets.QAction('Paste', self, statusTip='Paste selection', triggered=self.paste_selection)
self.add_rows_act = QtWidgets.QAction('Add 100 rows', self, checkable=False,
statusTip='Add 100 blank rows', triggered=partial(self.add_geom_rows,rows_to_add=100))
def create_layout(self):
"""
Create layout of the central Qwidget and add widgets
:return:
"""
super(self.__class__,self).create_layout()
self._button = QtWidgets.QPushButton(qta.icon('fa.plus-circle', color='white'),'New model', self)
self._button.setObjectName('medium')
self._button.clicked.connect(self.new_model)
self._button5 = QtWidgets.QPushButton(qta.icon('fa.trash-o', color='white'),'Delete model', self)
self._button5.setObjectName('medium')
self._button5.clicked.connect(self.delete_model_dialog)
self._button2 = QtWidgets.QPushButton(qta.icon('fa.search', color='white'),'Fit view', self)
self._button2.setObjectName('medium')
self._button2.clicked.connect(self.autofit_3d_view)
self._b_geom_prim= QtWidgets.QPushButton(qta.icon('fa.industry', color='white'),'Create geometry', self)
self._b_geom_prim.setObjectName('medium')
self._b_geom_prim.clicked.connect(self.create_geom_primitive)
self._button3 = QtWidgets.QPushButton('Add nodes', self)
self._button3.setObjectName('table_button')
self._button3.setCheckable(True)
self._button3.setChecked(True)
self._button3.clicked.connect(self.nodes_data_mode)
self._button6 = QtWidgets.QPushButton('Add lines', self)
self._button6.setObjectName('table_button')
self._button6.setCheckable(True)
self._button6.clicked.connect(self.lines_data_mode)
self._button4 = QtWidgets.QPushButton('Add triangles', self)
self._button4.setObjectName('table_button')
self._button4.setCheckable(True)
self._button4.clicked.connect(self.elements_data_mode)
# common for both tables
self.context_menu_act() #create actions for table context menu
# Context menu actions for model buttons
self.model_btn_context_menu_act()
# geometry Table (cartesian coordinate system)
self.geom_table_model = TableModel(self)
self.fields = ['node_nums','x', 'y', 'z','thz', 'thy', 'thx' , 'model_id']
self.geom_table_model.update(self.modaldata.tables['geometry'], [0], self.fields) # show some data
self.geom_table_view = CustomQTableView(self)
self.geom_table_view.setModel(self.geom_table_model)
self.geom_table_model.dataChanged.connect(self.geometry_changed)
self.geom_table_view.setSortingEnabled(False)
self.geom_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.geom_table_view.customContextMenuRequested.connect(self.table_view_context_menu)
#replace header from dataframe with custom one
self.geom_table_model.header_labels=[keys['node_nums']['15'],
keys['x']['15'],
keys['y']['15'],
keys['z']['15'],
keys['thz']['15'],
keys['thy']['15'],
keys['thx']['15'] ,
keys['model_id']['15']]
# geometry Table (cylindrical coordinate system)
self.cyl_geom_table_model = TableModel(self)
self.cyl_fields = ['node_nums','r', 'phi', 'z','cyl_thz', 'thy', 'thx' , 'model_id']
self.cyl_geom_table_model.update(self.modaldata.tables['geometry'], [0], self.cyl_fields) # show some data
self.cyl_geom_table_view = CustomQTableView(self)
self.cyl_geom_table_view.setModel(self.cyl_geom_table_model)
self.cyl_geom_table_model.dataChanged.connect(self.geometry_changed)
self.cyl_geom_table_view.hide()
self.cyl_geom_table_view.setSortingEnabled(False)
self.cyl_geom_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.cyl_geom_table_view.customContextMenuRequested.connect(self.table_view_context_menu)
#replace header from dataframe with custom one
self.cyl_geom_table_model.header_labels=[keys['node_nums']['15'],
keys['r']['15'],
keys['phi']['15'],
keys['z']['15'],
keys['cyl_thz']['15'],
keys['thy']['15'],
keys['thx']['15'] ,
keys['model_id']['15']]
# elements Table
self.elem_table_model = TableModel(self)
#print(self.modal_data.tables['analysis_index'])
self.elem_fields = ['model_id', 'element_id', 'element_descriptor', 'color',
'nr_of_nodes']
self.elem_table_model.update(self.modaldata.tables['elements_index'], [0], self.elem_fields) # show some data
self.elem_table_view = CustomQTableView(self)
self.elem_table_view.setModel(self.elem_table_model)
self.elem_table_view.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.elem_table_model.dataChanged.connect(self.plot_activated_models)
self.elem_table_view.setMinimumHeight(150)
self.elem_table_view.setSortingEnabled(True)
self.elem_table_view.hide()
self.elem_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.elem_table_view.customContextMenuRequested.connect(self.table_view_context_menu)
#replace header from dataframe with custom one
self.elem_table_model.header_labels=[keys['model_id']['15'],
keys['element_id']['15'],
keys['element_descriptor']['15'],
keys['color']['15'],
keys['nr_of_nodes']['15']]
self.elem_table_view.setColumnHidden(3,True) #hide color
self.elem_table_view.setColumnHidden(4,True) #hide nr_of_nodes
selection = self.elem_table_view.selectionModel()
selection.selectionChanged.connect(self.handle_elem_select)
def restore_elem_color(self,elem_ids):
"""
Change element color to original (before selection)
:param elem_ids:
:return:
"""
#restore color
element_id_mask=self.modaldata.tables['elements_index']['element_id'].isin(elem_ids)
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_r']=self.selected_elem_col[0] # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_g']=self.selected_elem_col[1] # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_b']=self.selected_elem_col[2] # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_a']=self.selected_elem_col[3] # alpha values 0-1
def change_elem_color(self,elem_ids,selection_color):
#change color of selected elements
element_id_mask=self.modaldata.tables['elements_index']['element_id'].isin(elem_ids)
#store current element color
self.selected_elem_col= self.modaldata.tables['elements_index'][element_id_mask][['clr_r', 'clr_g', 'clr_b', 'clr_a']].values[0, :]
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_r']= selection_color[0] / 255. # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_g']= selection_color[1] / 255. # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_b']= selection_color[2] / 255. # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_a']= selection_color[3] / 255. # alpha values 0-1
def handle_elem_select(self,deselect=False):
"""
Change color of elements selected in element table
:return:
"""
#element selection color
#TODO: move this to color pallete?
#TODO: fix - selecting mulitple lines and triangles changes their color - element color change must be element type sensitive
rgba_color = QtGui.QColor(255, 0, 0, 255)
rgba_color = pg.colorTuple(rgba_color)
rows=self.elem_table_view.selectionModel().selectedRows()
rows.sort()
new_elem_ids=[] # newly selected elements
for row in rows:
new_elem_ids.append(self.elem_table_model.datatable['element_id'].iloc[[row.row()]].values[0])
if deselect==True:
self.restore_elem_color(self.selected_elem_ids)
self.selected_elem_ids=[]
else:
#restore color of previously selected elements
if len(self.selected_elem_ids)!=0:
self.restore_elem_color(self.selected_elem_ids)
#change color of selected elements
if len(new_elem_ids)!=0:
self.change_elem_color(new_elem_ids,rgba_color)
# store current selection
self.selected_elem_ids=new_elem_ids
self.plot_activated_models(wheel_event=True)
def create_geom_primitive(self):
"""
Create geometry primitives (nodes + triangles) for currently active model
:return:
"""
response,input_data=dialog_geom_primitives.return_data()
if response==1:
if input_data['geom_type']=='line':
self.create_line_geom(input_data)
if input_data['geom_type']=='plane':
self.create_plane_geom(input_data)
if input_data['geom_type']=='box':
self.create_box_geom(input_data)
if input_data['geom_type']=='cylinder':
self.create_cyl_geom(input_data)
def create_line_geom(self,line_data):
"""
Create line geometry based on user input (for currently active model)
:return:
"""
xs=float(line_data['xs']) # s = start point
ys=float(line_data['ys'])
zs=float(line_data['zs'])
xe=float(line_data['xe']) # e = end point
ye=float(line_data['ye'])
ze=float(line_data['ze'])
num_of_points=int(line_data['num_of_points'])
start_num=float(line_data['start_num'])
s_point=np.array((xs,ys,zs))
e_point=np.array((xe,ye,ze))
for model_id in self.activated_models:
node_nums=np.arange(start_num,start_num+num_of_points)
line_vec=(e_point-s_point)
dir_arr=np.tile(line_vec,(num_of_points,1))
div_arr=np.linspace(0,1,num_of_points)
div_arr_rshp=np.tile(div_arr.reshape(num_of_points,1),3)
nodes=np.tile(s_point,(num_of_points,1))+div_arr_rshp*dir_arr
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask & node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['x']=nodes[:,0]
df['y']=nodes[:,1]
df['z']=nodes[:,2]
#TODO: oritent lcs according to line orientation
df['thx']=0
df['thy']=0
df['thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate r,phi from x,y
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arcsin(df['y']/df['r'])*180./np.pi
df['cyl_thz']= 0
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
#self.modal_data.tables['geometry']=pd.concat([self.modal_data.tables['geometry'],df])
#create element data
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='line'
nr_of_nodes=2
tot_num_of_elem=num_of_points-1 #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
#self.modal_data.tables['elements_index'].update(df_elem)#,overwrite=True)
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
#store nodes
#tot_elem_nums=circ_div*(height_div-1)*2 #total number of elements
#elem_nums=np.arange(element_id,element_id+tot_elem_nums+1)
#walk through nodes and store elements
pos_1=[]
pos_2=[]
node_number=start_num
for i in range(1,int(num_of_points)):
pos_1.append(node_number)
pos_2.append(node_number+1)
node_number=node_number+1
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,2)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
#df_elem_nodes['model_id']=model_id
df_elem_nodes['element_id']=np.tile(elem_ids,2)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2],len(pos_1)) #node position in element
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def add_geom_rows(self,rows_to_add=100):
"""
Add 100 blank rows to geometry table of selected model id
:return:
"""
for model_id in self.activated_models:
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
if len(self.modaldata.tables['geometry'][model_mask].index)==0:
ind=0
node_num=0
else:
ind= self.modaldata.tables['geometry'][model_mask].index.max() + 1
node_num = self.modaldata.tables['geometry'].ix[model_mask,'node_nums'].max() + 1
node_nums=np.arange(node_num,node_num+rows_to_add)
node_index=np.arange(ind,ind+rows_to_add)
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
if len(self.modaldata.tables['geometry'].index)==0:
self.modaldata.tables['geometry']=df
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['geometry']=pd.concat([self.modaldata.tables['geometry'], df])
#refresh
self.populate_table_view(self.activated_models)
def create_plane_nodes_df(self,plane_orient,len1,len2,div1,div2,start_num,model_id,x_offset,y_offset,z_offset):
maximum_number_of_nodes=div1*div2
node_nums=np.arange(start_num,start_num+maximum_number_of_nodes)
len1_arr = np.linspace(0, len1, div1)
len2_arr = np.linspace(0, len2, div2)
if plane_orient=='XY':
xx, yy = np.meshgrid(len1_arr, len2_arr)
zz=np.zeros((maximum_number_of_nodes))
if plane_orient=='YZ':
yy, zz = np.meshgrid(len1_arr, len2_arr)
xx=np.zeros((maximum_number_of_nodes))
if plane_orient=='ZX':
zz, xx = np.meshgrid(len1_arr, len2_arr)
yy=np.zeros((maximum_number_of_nodes))
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['x']=xx.flatten()+x_offset
df['y']=yy.flatten()+y_offset
df['z']=zz.flatten()+z_offset
df['thx']=0
df['thy']=0
df['thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate r,phi from x,y
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arcsin(df['y']/df['r'])*180./np.pi
df['cyl_thz']= 0
return df
def create_plane_elem_df(self,div1,div2,start_num,model_id,custom_num=None):
#create element data
#get next pandas index
if custom_num==None:
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
else:
ind=custom_num['ind']
element_id=custom_num['element_id']
elem_node_ind=custom_num['elem_node_ind']
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='triangle'
nr_of_nodes=3
tot_num_of_elem=(div1-1)*(div2-1)*2 #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
#store element nodes
#walk through nodes and store elements
pos_1=[]
pos_2=[]
pos_3=[]
node_number=start_num
k=0
for i in range(1,int(div2+1)): # len1 division
for j in range(1,int(div1+1)): # len2 divisions
if j==div1:
#last column
pass
else:
if i==(div2): #vertical
#last row/last column
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(node_number+1+div1)
pos_1.append(node_number)
pos_2.append(node_number+div1)
pos_3.append(node_number+1+div1)
node_number=node_number+1
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,3)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
#df_elem_nodes['model_id']=model_id
df_elem_nodes['element_id']=np.tile(elem_ids,3)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2+pos_3) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2,3],len(pos_1)) #node position in element
return df_elem,df_elem_nodes
def create_plane_geom(self,plane_data):
"""
Create cylinder geometry based on user input (for currently active model)
:return:
"""
plane_orient=plane_data['plane_orient']
len1=float(plane_data['len1'])
len2=float(plane_data['len2'])
div1=float(plane_data['div1'])
div2=float(plane_data['div2'])
x_offset=float(plane_data['x_offset'])
y_offset=float(plane_data['y_offset'])
z_offset=float(plane_data['z_offset'])
start_num=float(plane_data['start_num'])
for model_id in self.activated_models:
# get nodes
df=self.create_plane_nodes_df(plane_orient,len1,len2,div1,div2,start_num,model_id,x_offset,y_offset,z_offset)
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
# get elements and element connectivity
df_elem,df_elem_nodes=self.create_plane_elem_df(div1,div2,start_num,model_id)
# update modal_data object with new geometry
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def create_box_geom(self,box_data):
"""
Create box geometry based on user input (for currently active model)
:return:
"""
lenx=float(box_data['lenx'])
leny=float(box_data['leny'])
lenz=float(box_data['lenz'])
divx=float(box_data['divx'])
divy=float(box_data['divy'])
divz=float(box_data['divz'])
x_offset=float(box_data['x_offset'])
y_offset=float(box_data['y_offset'])
z_offset=float(box_data['z_offset'])
start_num=float(box_data['start_num'])
for model_id in self.activated_models:
maximum_number_of_nodes=2*divx*divy+(divz-2)*(divy+(divx-1)+(divy-1)+(divx-2))
node_nums=np.arange(start_num,start_num+maximum_number_of_nodes)
# xz plane
spc_x=np.linspace(0,lenx,divx)
x_arr_1=np.repeat(spc_x,divz)
y_arr_1=np.zeros((divx*divz))
spc_z=np.linspace(0,lenz,divz)
z_arr_1=np.tile(spc_z[::-1],divx)
# far side yz plane
x_arr_2=np.repeat([lenx],divy*divz)
spc_y=np.linspace(0,leny,divy)
y_arr_2=np.repeat(spc_y,divz)
z_arr_2=np.tile(spc_z[::-1],divy)
# far side xz plane
spc_x=np.linspace(0,lenx,divx)
x_arr_3=np.repeat(spc_x[::-1],divz)
y_arr_3=np.repeat([leny],divx*divz)
spc_z=np.linspace(0,lenz,divz)
z_arr_3=np.tile(spc_z[::-1],divx)
# yz plane
x_arr_4=np.repeat([0],divy*divz)
spc_y=np.linspace(0,leny,divy)
y_arr_4=np.repeat(spc_y[::-1],divz)
z_arr_4=np.tile(spc_z[::-1],divy)
# xy plane (top)
x_arr_5=np.tile(spc_x,divy)
spc_y=np.linspace(0,leny,divy)
y_arr_5=np.repeat(spc_y,divx)
z_arr_5=np.repeat(lenz,divy*divx)
#remove corner nodes
x_mask=(x_arr_5!=lenx)*(x_arr_5!=0) # True where x coordinate is not on edge
y_mask=(y_arr_5!=leny)*(y_arr_5!=0) # True where y coordinate is not on edge
fin_mask=x_mask*y_mask
x_arr_5=x_arr_5[fin_mask]
y_arr_5=y_arr_5[fin_mask]
z_arr_5=z_arr_5[fin_mask]
# xy plane (bottom)
x_arr_6=np.tile(spc_x,divy)
spc_y=np.linspace(0,leny,divy)
y_arr_6=np.repeat(spc_y,divx)
z_arr_6=np.repeat(0,divy*divx)
#remove corner nodes
x_mask=(x_arr_6!=lenx)*(x_arr_6!=0) # True where x coordinate is not on edge
y_mask=(y_arr_6!=leny)*(y_arr_6!=0) # True where y coordinate is not on edge
fin_mask=x_mask*y_mask
x_arr_6=x_arr_6[fin_mask]
y_arr_6=y_arr_6[fin_mask]
z_arr_6=z_arr_6[fin_mask]
x_arr=np.concatenate((x_arr_1[:-divz],x_arr_2[:-divz],x_arr_3[:-divz],x_arr_4[:-divz],x_arr_5,x_arr_6))
y_arr=np.concatenate((y_arr_1[:-divz],y_arr_2[:-divz],y_arr_3[:-divz],y_arr_4[:-divz],y_arr_5,y_arr_6))
z_arr=np.concatenate((z_arr_1[:-divz],z_arr_2[:-divz],z_arr_3[:-divz],z_arr_4[:-divz],z_arr_5,z_arr_6))
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['x']=x_arr+x_offset
df['y']=y_arr+y_offset
df['z']=z_arr+z_offset
df['thx']=0
df['thy']=0
df['thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate r,phi from x,y
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arcsin(df['y']/df['r'])*180./np.pi
df['cyl_thz']= 0
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
#self.modal_data.tables['geometry']=pd.concat([self.modal_data.tables['geometry'],df])
#
#create element data
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='triangle'
nr_of_nodes=3
tot_num_of_elem=4*(divx-1)*(divz-1)+4*(divy-1)*(divz-1)+4*(divy-1)*(divx-1) #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
#self.modal_data.tables['elements_index'].update(df_elem)#,overwrite=True)
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
#store nodes
#walk through nodes and store elements
pos_1=[]
pos_2=[]
pos_3=[]
node_number=start_num
num_of_divs=int(2*divx+2*(divy-2)) # number of verticals along z
k=0
for i in range(1,num_of_divs+1):
for j in range(1,int(divz+1)):
if i==num_of_divs:
#last vertical line - elements have nodes also from first vertical line
if j==(divz):
#last row
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(start_num+k+1)
pos_1.append(node_number)
pos_2.append(start_num+k)
pos_3.append(start_num+k+1)
k=k+1
else:
if j==(divz): #vertical
#last row/last column
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(node_number+1+divz)
pos_1.append(node_number)
pos_2.append(node_number+divz)
pos_3.append(node_number+1+divz)
node_number=node_number+1
def get_nnum(x,y,z):
# get node number based on known location
x_mask = x_arr == x
y_mask = y_arr == y
z_mask = z_arr == z
fin_mask=x_mask*y_mask*z_mask
nnum=node_nums[fin_mask]
return nnum
x_cord=np.linspace(0,lenx,divx)
y_cord=np.linspace(0,leny,divy)
# Top plane
z_cord=lenz
for i in range(0,int(divy-1)):
for j in range(0,int(divx-1)):
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j+1],y_cord[i],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j],y_cord[i+1],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
# Bottom plane
z_cord=0
for i in range(0,int(divy-1)):
for j in range(0,int(divx-1)):
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j+1],y_cord[i],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j],y_cord[i+1],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,3)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
df_elem_nodes['element_id']=np.tile(elem_ids,3)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2+pos_3) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2,3],len(pos_1)) #node position in element
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def create_cyl_geom(self,cylinder_data):
"""
Create cylinder geometry based on user input (for currently active model)
:return:
"""
cyl_r=float(cylinder_data['radius'])
cyl_h=float(cylinder_data['height'])
start_num=cylinder_data['start_num']
num_orient=cylinder_data['num_orient']
z_offset=cylinder_data['z_offset']
height_div=float(cylinder_data['height_div'])
circ_div=float(cylinder_data['circ_div'])
for model_id in self.activated_models:
maximum_number_of_nodes=height_div*circ_div
node_nums=np.arange(start_num,start_num+maximum_number_of_nodes)
cyl_r_array=np.repeat(cyl_r,maximum_number_of_nodes)
phi_div=360./circ_div
cyl_phi_single_row=np.arange(0,360.,phi_div)
if num_orient=='Vertical':
cyl_phi_array=np.repeat(cyl_phi_single_row,height_div) # VERTICAL NUMBERING
else:
cyl_phi_array=np.tile(cyl_phi_single_row,height_div) # HORIZONTAL NUMBERING
##bottom->up numbering
#cyl_z_array_single_row=np.arange(0,cyl_h+z_div,z_div)
#top->down numbering
cyl_z_array_single_row=np.linspace(0,cyl_h,height_div)
cyl_z_array_single_row=cyl_z_array_single_row[::-1]
if num_orient=='Vertical':
cyl_z_array=np.tile(cyl_z_array_single_row,circ_div) # VERTICAL NUMBERING
else:
cyl_z_array=np.repeat(cyl_z_array_single_row,circ_div) # HORIZONTAL NUMBERING
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['r']=cyl_r_array
df['phi']=cyl_phi_array
df['z']=cyl_z_array+z_offset
df['thx']=0
df['thy']=0
df['cyl_thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate x,y from r,phi
df['x'] = df['r'] * np.cos(df['phi'].astype(np.float64)*np.pi/180)
df['y'] = df['r'] * np.sin(df['phi'].astype(np.float64)*np.pi/180)
df['thz']= df['cyl_thz'] + df['phi']
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
#self.modaldata.tables['geometry']=pd.concat([self.modaldata.tables['geometry'],df])
#create element data
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='triangle'
nr_of_nodes=3
tot_num_of_elem=circ_div*(height_div-1)*2 #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
#self.modal_data.tables['elements_index'].update(df_elem)#,overwrite=True)
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
#store nodes
#tot_elem_nums=circ_div*(height_div-1)*2 #total number of elements
#elem_nums=np.arange(element_id,element_id+tot_elem_nums+1)
#walk through nodes and store elements
pos_1=[]
pos_2=[]
pos_3=[]
node_number=start_num
if num_orient=='Vertical':
k=0
for i in range(1,int(circ_div+1)): # circumference division
for j in range(1,int(height_div+1)): # height divisions
if i==circ_div:
#last circumference division - elements have nodes also from first division
if j==(height_div):
#last row
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(start_num+k+1)
pos_1.append(node_number)
pos_2.append(start_num+k)
pos_3.append(start_num+k+1)
k=k+1
else:
if j==(height_div): #vertical
#last row/last column
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(node_number+1+height_div)
pos_1.append(node_number)
pos_2.append(node_number+height_div)
pos_3.append(node_number+1+height_div)
node_number=node_number+1
else:
k=0
for i in range(1,int(height_div+1)): # height division
for j in range(1,int(circ_div+1)): # circumference divisions
if j==circ_div:
#last circumference division - elements have nodes also from first division
if i==(height_div):
#last row
pass
else:
pos_1.append((start_num-1)+i*circ_div) # 4, 8
pos_2.append(start_num+k*circ_div) # 1, 5
pos_3.append(start_num+i*circ_div) # 5, 9
pos_1.append((start_num-1)+i*circ_div) # 4, 8
pos_2.append((start_num-1)+(i+1)*circ_div) # 8, 12
pos_3.append(start_num+i*circ_div) # 5, 9
k=k+1
else:
if i==(height_div):
#last row
pass
else:
pos_1.append(node_number) # 1,2
pos_2.append(node_number+1) # 2,3
pos_3.append(node_number+circ_div) # 5,6
pos_1.append(node_number+1) # 1, 2
pos_2.append(node_number+circ_div) # 5, 6
pos_3.append(node_number+1+circ_div) # 6, 7
node_number=node_number+1
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,3)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
#df_elem_nodes['model_id']=model_id
df_elem_nodes['element_id']=np.tile(elem_ids,3)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2+pos_3) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2,3],len(pos_1)) #node position in element
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def new_model(self,description=None):
"""
Open dialogue for new model creation
:param description:
:return:
"""
response,model_name=dialog_new_model.return_data()
if response==1:
#check for available model_ids
current_model_ids=self.modaldata.tables['info']['model_id'].values
# increment model_id
if len(current_model_ids)==0:
model_id=0
else:
model_id=np.max(np.unique(current_model_ids))+1
fields = {'db_app': 'ModalData', 'time_db_created': time.strftime("%d-%b-%y %H:%M:%S"),
'time_db_saved': time.strftime("%d-%b-%y %H:%M:%S"), 'program': 'modaldata.py',
'model_name': model_name, 'description': description, 'units_code': 9,
'temp': 1.0, 'temp_mode': 1, 'temp_offset': 1.0, 'length': 1.0, 'force': 1.0,
'units_description': 'User unit system'}
self.modaldata.new_model(model_id, entries=fields)
self.build_geometry([model_id])
#select new model
#self.modaldata.current_model_id=model_id
self.preferences['selected_model_id']=model_id
# open new model dialog
self.build_uff_tree(self.modaldata,refresh=True)
def rename_model(self):
"""
Open dialogue for new model creation
:param description:
:return:
"""
response,model_name=dialog_rename_model.return_data()
if response==1:
mask=self.modaldata.tables['info']['model_id']==self.sending_button.model_id
#change model name in modaldata object
self.modaldata.tables['info']['model_name'][mask]=model_name
self.sending_button.setText(model_name+ ' ' + str(self.sending_button.model_id))
def delete_model_dialog(self):
"""
Delete model from modal data object
:param model_id:
:return:
"""
response=dialog_delete_model.return_data()
if response==1:
self.delete_model()
def build_geometry(self,model_id_list):
#generate space for 1000 nodes for new models
for model_id in model_id_list:
if self.modaldata.tables['geometry'][self.modaldata.tables['geometry']['model_id'] == model_id].empty:
maximum_number_of_nodes=100
node_nums=np.arange(1,maximum_number_of_nodes+1)
#create data
df=pd.DataFrame(index=node_nums, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#create empty rows in geometry
self.modaldata.tables['geometry']=self.modaldata.tables['geometry'].append(df, ignore_index=True)
def populate_table_view(self,model_id_list):
#generate nodes if geometry is empty
self.build_geometry(model_id_list)
#cartesian gcs
self.geom_table_model.update(self.modaldata.tables['geometry'], model_id_list, self.fields)
#cylindrical csys
self.cyl_geom_table_model.update(self.modaldata.tables['geometry'], model_id_list, self.cyl_fields)
def populate_elem_table_view(self,model_id_list):
self.elem_table_model.update(self.modaldata.tables['elements_index'], model_id_list, self.elem_fields)
def build_uff_tree(self, modal_data,refresh):
"""
Check available data in uff and load it into data tree widget
:param modal_data:
:return:
"""
def on_activate(inp_model_id):
#first deactivate all other models
for model_id, model_obj in self.models.items():
model_obj.activated=False
self.model_buttons[int(model_id)].setChecked(False)
#activate only clicked model
self.models[inp_model_id].activated = True
self.model_buttons[int(inp_model_id)].setChecked(True)
self.activated_models=[]
for model_id, model_obj in self.models.items():
if model_obj.activated:
self.activated_models.append(int(model_obj.model_id))
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
#currently selected model_id
self.preferences['selected_model_id']=inp_model_id
self.plot_activated_models()
params = []
if refresh==False:
#reload was called, drop previous models
print('Clearing previous models')
self.delete_model(delete_all=True)
uff_tree_index = 0
#for i in range(len(available_model_ids)):
for index, row in self.modaldata.tables['info'].iterrows():
value = index
#model_id=int(available_model_ids[i])
model_id = row['model_id']
model_name = row['model_name'] + ' ' + str(model_id)
description = row['description']
units_code = row['units_code']
if model_id in self.models.keys():
print('model %f already stored - skipping' % model_id)
else:
print('Storing model id:',model_id)
self.models[model_id] = Model(model_id, model_name, modal_data, None, self.model_view,
None, None, uff_tree_index,None)
button=QtWidgets.QPushButton(qta.icon('fa.database', color='white'),str(model_name), self)
button.setObjectName('medium')
button.setCheckable(True)
button.clicked.connect(partial(on_activate, model_id))
button.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
button.customContextMenuRequested.connect(self.model_btn_context_menu)
button.model_name=model_name
button.model_id=model_id
button.show()
self.model_buttons[int(model_id)]=button
#deactivate all models and model buttons
self.deactivate_all()
#activate first model automatically
#TODO: implement 'current and previously selected models'
try:
on_activate(self.preferences['selected_model_id'])
except:
try:
#if current model_id was not yet set
keys=list(self.models.keys())
on_activate(self.models[keys[0]].model_id)
except:
print('There is no model to show.')
def build_uff_tree_OLD(self, modal_data):
"""
Check available data in uff and load it into data tree widget
:param modal_data:
:return:
"""
#TODO: localization via python gettext
params = []
uff_tree_index = 0
#for i in range(len(available_model_ids)):
for index, row in self.modaldata.tables['info'].iterrows():
value = index
#model_id=int(available_model_ids[i])
model_id = row['model_id']
model_name = row['model_name'] + ' ' + str(model_id)
description = row['description']
units_code = row['units_code']
if model_id in self.models.keys():
print('model %f already stored - skipping' % model_id)
else:
self.models[model_id] = Model(model_id, model_name, modal_data, None, self.model_view,
None, None, uff_tree_index)
params.append({'name': model_name, 'type': 'group', 'extra': 6, 'children': [
{'name': 'Activate:', 'type': 'bool', 'value': False, 'tip': "Click to activate model",
'model': model_id},
{'name': 'Model name:', 'type': 'str', 'value': str(model_id), 'tip': "Click to change model name",
'model': model_id},
{'name': 'Analysis data available:', 'type': 'str',
'value': not self.modaldata.tables['analysis_index'][
self.modaldata.tables['analysis_index']['model_id'] == model_id].empty,
'tip': "Indicates if processed modes are available", 'model': model_id},
{'name': 'Measurement data available:', 'type': 'str',
'value': not self.modaldata.tables['measurement_index'][
self.modaldata.tables['measurement_index']['model_id'] == model_id].empty,
'tip': "Indicates if measurement data is available", 'model': model_id},
]})
#TODO: coordinate system selection (cartesian, cylindrical, ...)
if not self.modaldata.tables['geometry'][self.modaldata.tables['geometry']['model_id'] == model_id].empty:
params[value]['children'].append(
{'name': 'View settings', 'type': 'group', 'expanded': False, 'children': [
{'name': 'Cube scale:', 'type': 'float', 'value': 0.01, 'model': model_id},
{'name': 'Node color:', 'type': 'color', 'value': "00FF00", 'model': model_id},
{'name': 'Element color:', 'type': 'color', 'value': "0000FF", 'model': model_id},
{'name': 'Units code:', 'type': 'int', 'value': units_code, 'model': model_id},
{'name': 'Description:', 'type': 'str', 'value': description, 'model': model_id},
{'name': 'Offset x:', 'type': 'float', 'value': 0, 'model': model_id},
{'name': 'Offset y:', 'type': 'float', 'value': 0, 'model': model_id},
{'name': 'Offset z:', 'type': 'float', 'value': 0, 'model': model_id},
]})
if not self.modaldata.tables['info'][self.modaldata.tables['info']['model_id'] == model_id].empty:
params[value]['children'].append({'name': 'Info', 'type': 'group', 'expanded': False, 'children': [
{'name': 'Model name:', 'type': 'str', 'value': model_name},
#{'name': 'Date created:', 'type': 'str', 'value': info_data.ix[model_id].ix['date_db_created'].value},
]})
uff_tree_index = uff_tree_index + 1
## Create tree of Parameter objects
self.tree_params = Parameter.create(name='params', type='group', children=params)
## If anything changes in the tree, print a message
def change(param, changes):
for param, change, data in changes:
if param.name() == 'Activate:' and change == 'value':
model_id = param.opts['model']
self.models[model_id].activated = data
activated_models=[]
for model_id, model_obj in self.models.items():
if model_obj.activated:
activated_models.append(int(model_obj.model_id))
self.populate_table_view(activated_models)
self.populate_elem_table_view(activated_models)
if param.name() == 'Offset x:' and change == 'value':
model_id = param.opts['model']
self.models[model_id].offset['x'] = data
if param.name() == 'Offset y:' and change == 'value':
model_id = param.opts['model']
self.models[model_id].offset['y'] = data
if param.name() == 'Offset z:' and change == 'value':
model_id = param.opts['model']
self.models[model_id].offset['z'] = data
if param.name() == 'Node color:' and change == 'value':
model_id = param.opts['model']
self.models[model_id].set_node_color(data)
if param.name() == 'Element color:' and change == 'value':
model_id = param.opts['model']
self.models[model_id].set_elem_color(data)
if param.name() == 'Cube scale:' and change == 'value':
model_id = param.opts['model']
self.activated_models[model_id]['needs_refresh'] = True
self.plot_activated_models()
#empty out table when new model is created
self.populate_table_view([])
self.tree_params.sigTreeStateChanged.connect(change)
self.t.setParameters(self.tree_params, showTop=False)
def geometry_changed(self):
"""
Method called when data in geometry table changes
:return:
"""
if self.gcs_type==1:
#calculate x,y from r,phi
nan_mask = self.modaldata.tables['geometry'][['node_nums', 'r', 'phi', 'z', 'cyl_thz', 'thy', 'thx' , 'model_id']].notnull().all(axis=1)
self.modaldata.tables['geometry'].ix[nan_mask, 'x']= \
self.modaldata.tables['geometry'].ix[nan_mask, 'r'] * \
np.cos(self.modaldata.tables['geometry'].ix[nan_mask, 'phi'].astype(np.float64) * np.pi / 180)
self.modaldata.tables['geometry'].ix[nan_mask, 'y']= \
self.modaldata.tables['geometry'].ix[nan_mask, 'r'] * \
np.sin(self.modaldata.tables['geometry'].ix[nan_mask, 'phi'].astype(np.float64) * np.pi / 180)
self.modaldata.tables['geometry'].ix[nan_mask, 'thz']= \
self.modaldata.tables['geometry'].ix[nan_mask, 'cyl_thz'] + \
self.modaldata.tables['geometry'].ix[nan_mask, 'phi']
if self.gcs_type==0:
#calculate r,phi from x,y
nan_mask = self.modaldata.tables['geometry'][['node_nums', 'x', 'y', 'z', 'thz', 'thy', 'thx', 'model_id']].notnull().all(axis=1)
self.modaldata.tables['geometry'].ix[nan_mask, 'r']=\
np.sqrt((self.modaldata.tables['geometry'].ix[nan_mask, 'x'] ** 2 +
self.modaldata.tables['geometry'].ix[nan_mask, 'y'] ** 2).astype(np.float64))
self.modaldata.tables['geometry'].ix[nan_mask, 'phi']=\
np.arcsin((self.modaldata.tables['geometry'].ix[nan_mask, 'y'].astype(np.float64) /
self.modaldata.tables['geometry'].ix[nan_mask, 'r'].astype(np.float64)))
#change NaNs to zero due to division with zeros
# this is also necessary so that cyl_thz is changed to 0 for later subtraction
aux_df=self.modaldata.tables['geometry'].ix[nan_mask,['r','phi','cyl_thz']]
aux_df.fillna(0,inplace=True)
self.modaldata.tables['geometry'].update(aux_df, overwrite=False)
self.modaldata.tables['geometry'].ix[nan_mask, 'cyl_thz']= \
self.modaldata.tables['geometry'].ix[nan_mask, 'thz'] - \
self.modaldata.tables['geometry'].ix[nan_mask, 'phi']
self.calc_node_lcs()
self.plot_activated_models()
def clear_all_views(self):
'''
Clear everything on 3D and 2D view
:return:
'''
# clear 3D view
self.model_view.items.clear()
self.model_view.updateGL()
def reload(self, refresh=False):
"""Update interface -- read modaldata object again.
Added by Matjaz!
"""
#Calculate local coordinate systems and add them to 'geometry' table
if self.modaldata.tables['info'].empty:
self.status_bar.setBusy('geometry', 'Modal data object empty! (info table)')
else:
#self.calc_node_lcs()
self.clear_all_views()
self.build_uff_tree(self.modaldata,refresh=refresh)
self.status_bar.setNotBusy('geometry')
def refresh(self):
'''Is called on tab-change (subwidget change) and on preferences change.'''
self.reload(refresh=True)
class dialog_new_model(QtWidgets.QDialog):
def __init__(self, parent=None):
super(dialog_new_model, self).__init__(parent)
self.setWindowTitle('Create new model')
with open('gui/styles/style_template.css', 'r', encoding='utf-8') as fh:
src = Template(fh.read())
src = src.substitute(COLOR_PALETTE)
self.setStyleSheet(src)
# Create widgets
self.model_name = QtWidgets.QLineEdit("Enter model name")
self.button = QtWidgets.QPushButton("Done")
self.button.setObjectName('small')
self.cancel_button = QtWidgets.QPushButton("Cancel")
self.cancel_button.setObjectName('small')
# Create layout and add widgets
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.model_name)
button_layout= QtWidgets.QHBoxLayout()
button_layout.addWidget(self.button)
button_layout.addWidget(self.cancel_button)
layout.addLayout(button_layout)
# Set dialog layout
self.setLayout(layout)
self.button.clicked.connect(self.accept)
self.cancel_button.clicked.connect(self.reject)
@staticmethod
def return_data(parent = None):
dialog = dialog_new_model(parent)
result = dialog.exec_()
model_name=dialog.model_name.text()
return (result,model_name)
class dialog_rename_model(QtWidgets.QDialog):
def __init__(self, parent=None):
super(dialog_new_model, self).__init__(parent)
self.setWindowTitle('Rename model')
with open('gui/styles/style_template.css', 'r', encoding='utf-8') as fh:
src = Template(fh.read())
src = src.substitute(COLOR_PALETTE)
self.setStyleSheet(src)
# Create widgets
self.model_name = QtWidgets.QLineEdit("Enter new model name")
self.button = QtWidgets.QPushButton("Done")
self.button.setObjectName('small')
self.cancel_button = QtWidgets.QPushButton("Cancel")
self.cancel_button.setObjectName('small')
# Create layout and add widgets
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.model_name)
button_layout= QtWidgets.QHBoxLayout()
button_layout.addWidget(self.button)
button_layout.addWidget(self.cancel_button)
layout.addLayout(button_layout)
# Set dialog layout
self.setLayout(layout)
self.button.clicked.connect(self.accept)
self.cancel_button.clicked.connect(self.reject)
@staticmethod
def return_data(parent = None):
dialog = dialog_new_model(parent)
result = dialog.exec_()
model_name=dialog.model_name.text()
return (result,model_name)
class dialog_delete_model(QtWidgets.QDialog):
def __init__(self, parent=None):
super(dialog_delete_model, self).__init__(parent)
self.setWindowTitle('Delete model')
with open('gui/styles/style_template.css', 'r', encoding='utf-8') as fh:
src = Template(fh.read())
src = src.substitute(COLOR_PALETTE)
self.setStyleSheet(src)
# Create widgets
self.question = QtWidgets.QLabel("Are you sure you want to delete current model?")
self.delete_button = QtWidgets.QPushButton("Yes")
self.delete_button.setObjectName('small')
self.cancel_button = QtWidgets.QPushButton("No")
self.cancel_button.setObjectName('small')
# Create layout and add widgets
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.question)
button_layout= QtWidgets.QHBoxLayout()
button_layout.addWidget(self.cancel_button)
button_layout.addWidget(self.delete_button)
layout.addLayout(button_layout)
# Set dialog layout
self.setLayout(layout)
self.delete_button.clicked.connect(self.accept)
self.cancel_button.clicked.connect(self.reject)
@staticmethod
def return_data(parent = None):
dialog = dialog_delete_model(parent)
result = dialog.exec_()
return (result)
class dialog_geom_primitives(QtWidgets.QDialog):
def __init__(self, parent=None):
super(dialog_geom_primitives, self).__init__(parent)
self.leftlist = QtWidgets.QListWidget ()
self.leftlist.insertItem (0, 'Line' )
self.leftlist.insertItem (1, 'Plane' )
self.leftlist.insertItem (2, 'Box' )
self.leftlist.insertItem (3, 'Cylinder' )
self.stack1 = QtWidgets.QWidget()
self.stack2 = QtWidgets.QWidget()
self.stack3 = QtWidgets.QWidget()
self.stack4 = QtWidgets.QWidget()
self.stack1UI()
self.stack2UI()
self.stack3UI()
self.stack4UI()
self.Stack = QtWidgets.QStackedWidget(self)
self.Stack.addWidget(self.stack1)
self.Stack.addWidget(self.stack2)
self.Stack.addWidget(self.stack3)
self.Stack.addWidget(self.stack4)
with open('gui/styles/style_template.css', 'r', encoding='utf-8') as fh:
src = Template(fh.read())
src = src.substitute(COLOR_PALETTE)
self.setStyleSheet(src)
base_layout = QtWidgets.QVBoxLayout(self)
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(self.leftlist)
layout.addWidget(self.Stack)
base_layout.addLayout(layout)
self.ok_button = QtWidgets.QPushButton("Ok")
self.ok_button.setObjectName('small')
self.cancel_button = QtWidgets.QPushButton("Cancel")
self.cancel_button.setObjectName('small')
button_layout= QtWidgets.QHBoxLayout()
button_layout.addWidget(self.ok_button)
button_layout.addWidget(self.cancel_button)
base_layout.addLayout(button_layout)
# Set dialog layout
self.setLayout(layout)
self.ok_button.clicked.connect(self.accept)
self.cancel_button.clicked.connect(self.reject)
self.leftlist.currentRowChanged.connect(self.display)
self.setGeometry(300, 200, 100,100)
self.setWindowTitle('Create geometry')
#set default selection
self.Stack.setCurrentIndex(0)
self.leftlist.setCurrentItem(self.leftlist.item(0))
self.show()
def stack1UI(self):
"""
Input data for creating line
:return:
"""
self.line_title = QtWidgets.QLabel("Create line")
self.line_label_s = QtWidgets.QLabel("Start point coordinates: [m]")
self.line_xs_str = QtWidgets.QLabel("X")
self.line_xs_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.line_xs = QtWidgets.QDoubleSpinBox(self)
self.line_xs.setRange(-100000,100000)
self.line_ys_str = QtWidgets.QLabel("Y")
self.line_ys_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.line_ys = QtWidgets.QDoubleSpinBox(self)
self.line_ys.setRange(-100000,100000)
self.line_zs_str = QtWidgets.QLabel("Z")
self.line_zs_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.line_zs = QtWidgets.QDoubleSpinBox(self)
self.line_zs.setRange(-100000,100000)
self.line_label_e = QtWidgets.QLabel("End point coordinates: [m]")
self.line_xe_str = QtWidgets.QLabel("X")
self.line_xe_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.line_xe = QtWidgets.QDoubleSpinBox(self)
self.line_xe.setRange(-100000,100000)
self.line_xe.setValue(1)
self.line_ye_str = QtWidgets.QLabel("Y")
self.line_ye_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.line_ye = QtWidgets.QDoubleSpinBox(self)
self.line_ye.setRange(-100000,100000)
self.line_ye.setValue(0)
self.line_ze_str = QtWidgets.QLabel("Z")
self.line_ze_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.line_ze = QtWidgets.QDoubleSpinBox(self)
self.line_ze.setRange(-100000,100000)
self.line_ze.setValue(0)
self.line_start_num_str = QtWidgets.QLabel("Start number:")
self.line_start_num = QtWidgets.QDoubleSpinBox(self)
self.line_start_num.setDecimals(0)
self.line_start_num.setRange(1,100000)
self.line_div_str = QtWidgets.QLabel("Number of points:")
self.line_div = QtWidgets.QDoubleSpinBox(self)
self.line_div.setDecimals(0)
self.line_div.setRange(2,1000)
left=0
top=0
first_top=0 #first widget from top
right=0
bottom=20
# Create layout and add widgets
main_layout = QtWidgets.QVBoxLayout()
layout = QtWidgets.QGridLayout()
layout.setContentsMargins(left, first_top, right, bottom)
layout.addWidget(self.line_label_s,0,1)
layout.addWidget(self.line_xs_str,1,0)
layout.addWidget(self.line_ys_str,1,1)
layout.addWidget(self.line_zs_str,1,2)
layout.addWidget(self.line_xs,2,0)
layout.addWidget(self.line_ys,2,1)
layout.addWidget(self.line_zs,2,2)
layout2 = QtWidgets.QGridLayout()
layout2.setContentsMargins(left, top, right, bottom)
layout2.addWidget(self.line_label_e,0,1)
layout2.addWidget(self.line_xe_str,1,0)
layout2.addWidget(self.line_ye_str,1,1)
layout2.addWidget(self.line_ze_str,1,2)
layout2.addWidget(self.line_xe,2,0)
layout2.addWidget(self.line_ye,2,1)
layout2.addWidget(self.line_ze,2,2)
main_layout.addLayout(layout)
main_layout.addLayout(layout2)
main_layout.addWidget(self.line_start_num_str)
main_layout.addWidget(self.line_start_num)
main_layout.addWidget(self.line_div_str)
main_layout.addWidget(self.line_div)
main_layout.addStretch()
self.stack1.setLayout(main_layout)
def stack2UI(self):
"""
Input data for creating plane
:return:
"""
self.plane_title = QtWidgets.QLabel("Create plane")
self.plane_orient_str= QtWidgets.QLabel("Plane orientation:")
self._r_plane_group=QtWidgets.QButtonGroup(self) # Number group
self._r_plane_xy=QtWidgets.QRadioButton("XY")
self._r_plane_xy.setChecked(True)
self._r_plane_group.addButton(self._r_plane_xy)
self._r_plane_yz=QtWidgets.QRadioButton("YZ")
self._r_plane_group.addButton(self._r_plane_yz)
self._r_plane_zx=QtWidgets.QRadioButton("ZX")
self._r_plane_group.addButton(self._r_plane_zx)
self.plane_len1_str = QtWidgets.QLabel("Length along first direction: [m]")
self.plane_len1 = QtWidgets.QDoubleSpinBox(self)
self.plane_len1.setRange(0,100000)
self.plane_len1.setValue(1)
self.plane_len2_str = QtWidgets.QLabel("Length along second direction: [m]")
self.plane_len2 = QtWidgets.QDoubleSpinBox(self)
self.plane_len2.setRange(0,100000)
self.plane_len2.setValue(1)
self.plane_div1_str = QtWidgets.QLabel("Num. of points in first direction: ")
self.plane_div1 = QtWidgets.QDoubleSpinBox(self)
self.plane_div1.setRange(2,1000)
self.plane_div1.setDecimals(0)
self.plane_div2_str = QtWidgets.QLabel("Num. of points in second direction: ")
self.plane_div2 = QtWidgets.QDoubleSpinBox(self)
self.plane_div2.setRange(2,1000)
self.plane_div2.setDecimals(0)
self.plane_x_off_str = QtWidgets.QLabel("X axis offset: [m]")
self.plane_x_off = QtWidgets.QDoubleSpinBox(self)
self.plane_x_off.setRange(-100000,100000)
self.plane_y_off_str = QtWidgets.QLabel("Y axis offset: [m]")
self.plane_y_off = QtWidgets.QDoubleSpinBox(self)
self.plane_y_off.setRange(-100000,100000)
self.plane_z_off_str = QtWidgets.QLabel("Z axis offset: [m]")
self.plane_z_off = QtWidgets.QDoubleSpinBox(self)
self.plane_z_off.setRange(-100000,100000)
self.plane_start_num_str = QtWidgets.QLabel("Start numbering with:")
self.plane_start_num = QtWidgets.QDoubleSpinBox(self)
self.plane_start_num.setDecimals(0)
self.plane_start_num.setRange(1,100000)
left=0
top=0
first_top=0 #first widget from top
right=0
bottom=20
# Create layout and add widgets
main_layout = QtWidgets.QVBoxLayout()
layout=QtWidgets.QGridLayout()
layout.setContentsMargins(left, first_top, right, bottom)
#layout.addWidget(self.plane_title,0,1)
layout.addWidget(self.plane_orient_str,1,1)
layout.addWidget(self._r_plane_xy,2,0)
layout.addWidget(self._r_plane_yz,2,1)
layout.addWidget(self._r_plane_zx,2,2)
layout2=QtWidgets.QGridLayout()
layout2.setContentsMargins(left, top, right, bottom)
layout2.addWidget(self.plane_len1_str,0,0)
layout2.addWidget(self.plane_len1,1,0)
layout2.addWidget(self.plane_len2_str,0,1)
layout2.addWidget(self.plane_len2,1,1)
layout2.addWidget(self.plane_div1_str,2,0)
layout2.addWidget(self.plane_div1,3,0)
layout2.addWidget(self.plane_div2_str,2,1)
layout2.addWidget(self.plane_div2,3,1)
layout3=QtWidgets.QGridLayout()
layout3.setContentsMargins(left, top, right, bottom)
layout3.addWidget(self.plane_x_off_str,0,0)
layout3.addWidget(self.plane_x_off,1,0)
layout3.addWidget(self.plane_y_off_str,0,1)
layout3.addWidget(self.plane_y_off,1,1)
layout3.addWidget(self.plane_z_off_str,0,2)
layout3.addWidget(self.plane_z_off,1,2)
main_layout.addLayout(layout)
main_layout.addLayout(layout2)
main_layout.addLayout(layout3)
main_layout.addWidget(self.plane_start_num_str)
main_layout.addWidget(self.plane_start_num)
main_layout.addStretch()
self.stack2.setLayout(main_layout)
def stack3UI(self):
"""
Input data for creating box
:return:
"""
self.box_title = QtWidgets.QLabel("Create box")
self.box_len_str = QtWidgets.QLabel("Length: [m]")
self.box_len_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_lenx_str = QtWidgets.QLabel("X")
self.box_lenx_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_leny_str = QtWidgets.QLabel("Y")
self.box_leny_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_lenz_str = QtWidgets.QLabel("Z")
self.box_lenz_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_lenx = QtWidgets.QDoubleSpinBox(self)
self.box_lenx.setRange(0,100000)
self.box_lenx.setValue(1)
self.box_leny = QtWidgets.QDoubleSpinBox(self)
self.box_leny.setRange(0,100000)
self.box_leny.setValue(1)
self.box_lenz = QtWidgets.QDoubleSpinBox(self)
self.box_lenz.setRange(0,100000)
self.box_lenz.setValue(1)
self.box_div_str = QtWidgets.QLabel("Num. of points: ")
self.box_div_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_divx_str = QtWidgets.QLabel("X")
self.box_divx_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_divy_str = QtWidgets.QLabel("Y")
self.box_divy_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_divz_str = QtWidgets.QLabel("Z")
self.box_divz_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_divx = QtWidgets.QDoubleSpinBox(self)
self.box_divx.setRange(2,1000)
self.box_divx.setDecimals(0)
self.box_divy = QtWidgets.QDoubleSpinBox(self)
self.box_divy.setRange(2,1000)
self.box_divy.setDecimals(0)
self.box_divz = QtWidgets.QDoubleSpinBox(self)
self.box_divz.setRange(2,1000)
self.box_divz.setDecimals(0)
self.box_off_str = QtWidgets.QLabel("Offset: [m]")
self.box_off_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_offx_str = QtWidgets.QLabel("X")
self.box_offx_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_offy_str = QtWidgets.QLabel("Y")
self.box_offy_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_offz_str = QtWidgets.QLabel("Z")
self.box_offz_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.box_x_off = QtWidgets.QDoubleSpinBox(self)
self.box_x_off.setRange(-100000,100000)
self.box_y_off = QtWidgets.QDoubleSpinBox(self)
self.box_y_off.setRange(-100000,100000)
self.box_z_off = QtWidgets.QDoubleSpinBox(self)
self.box_z_off.setRange(-100000,100000)
self.box_start_num_str = QtWidgets.QLabel("Start numbering with:")
self.box_start_num = QtWidgets.QDoubleSpinBox(self)
self.box_start_num.setDecimals(0)
self.box_start_num.setRange(1,100000)
left=0
top=0
first_top=0 #first widget from top
right=0
bottom=20
# Create layout and add widgets
main_layout = QtWidgets.QVBoxLayout()
#layout.addWidget(self.box_title)
layout=QtWidgets.QGridLayout()
layout.setContentsMargins(left, first_top, right, bottom)
layout.addWidget(self.box_len_str,0,1)
layout.addWidget(self.box_lenx_str,1,0)
layout.addWidget(self.box_leny_str,1,1)
layout.addWidget(self.box_lenz_str,1,2)
layout.addWidget(self.box_lenx,2,0)
layout.addWidget(self.box_leny,2,1)
layout.addWidget(self.box_lenz,2,2)
layout2=QtWidgets.QGridLayout()
layout2.setContentsMargins(left, top, right, bottom)
layout2.addWidget(self.box_div_str,0,1)
layout2.addWidget(self.box_divx_str,1,0)
layout2.addWidget(self.box_divy_str,1,1)
layout2.addWidget(self.box_divz_str,1,2)
layout2.addWidget(self.box_divx,2,0)
layout2.addWidget(self.box_divy,2,1)
layout2.addWidget(self.box_divz,2,2)
layout3=QtWidgets.QGridLayout()
layout3.setContentsMargins(left, top, right, bottom)
layout3.addWidget(self.box_off_str,0,1)
layout3.addWidget(self.box_offx_str,1,0)
layout3.addWidget(self.box_offy_str,1,1)
layout3.addWidget(self.box_offz_str,1,2)
layout3.addWidget(self.box_x_off,2,0)
layout3.addWidget(self.box_y_off,2,1)
layout3.addWidget(self.box_z_off,2,2)
main_layout.addLayout(layout)
main_layout.addLayout(layout2)
main_layout.addLayout(layout3)
main_layout.addWidget(self.box_start_num_str)
main_layout.addWidget(self.box_start_num)
main_layout.addStretch()
self.stack3.setLayout(main_layout)
def stack4UI(self):
"""
Input data for creating cylinder
:return:
"""
self.title = QtWidgets.QLabel("Create cylinder")
self.title.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_cyl_r_str = QtWidgets.QLabel("Radius: [m]")
self.input_cyl_r_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_cyl_r = QtWidgets.QDoubleSpinBox(self)
self.input_cyl_r.setRange(0,100000)
self.input_cyl_r.setValue(1)
self.input_cyl_h_str = QtWidgets.QLabel("Height: [m]")
self.input_cyl_h_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_cyl_h = QtWidgets.QDoubleSpinBox(self)
self.input_cyl_h.setRange(0,100000)
self.input_cyl_h.setValue(1)
self.input_cyl_z_off_str = QtWidgets.QLabel("Z axis offset: [m]")
self.input_cyl_z_off_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_cyl_z_off = QtWidgets.QDoubleSpinBox(self)
self.input_cyl_z_off.setRange(-100000,100000)
self.input_start_num_str = QtWidgets.QLabel("Start numbering with:")
self.input_start_num = QtWidgets.QDoubleSpinBox(self)
self.input_start_num.setDecimals(0)
self.input_start_num.setRange(1,100000)
self.input_num_orient_str= QtWidgets.QLabel("Numbering orientation:")
self._r_orient_group=QtWidgets.QButtonGroup(self) # Number group
self._r_vert_orient=QtWidgets.QRadioButton("Vertical")
self._r_vert_orient.setChecked(True)
self._r_orient_group.addButton(self._r_vert_orient)
self._r_horiz_orient=QtWidgets.QRadioButton("Horizontal")
self._r_orient_group.addButton(self._r_horiz_orient)
self.input_num_orient=0
self.input_main_div_str = QtWidgets.QLabel("Number of points:")
self.input_main_div_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_height_div_str = QtWidgets.QLabel("Along height:")
self.input_height_div_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_height_div = QtWidgets.QDoubleSpinBox(self)
self.input_height_div.setDecimals(0)
self.input_height_div.setRange(2,100)
self.input_circ_div_str = QtWidgets.QLabel("Along circumference:")
self.input_circ_div_str.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.input_circ_div = QtWidgets.QDoubleSpinBox(self)
self.input_circ_div.setDecimals(0)
self.input_circ_div.setRange(3,100)
left=0
top=0
first_top=0 #first widget from top
right=0
bottom=20
# Create layout and add widgets
main_layout = QtWidgets.QVBoxLayout()
layout=QtWidgets.QGridLayout()
layout.setContentsMargins(left, first_top, right, bottom)
#layout.addWidget(self.title,0,1)
layout.addWidget(self.input_cyl_r_str,1,0)
layout.addWidget(self.input_cyl_h_str,1,1)
layout.addWidget(self.input_cyl_z_off_str,1,2)
layout.addWidget(self.input_cyl_r,2,0)
layout.addWidget(self.input_cyl_h,2,1)
layout.addWidget(self.input_cyl_z_off,2,2)
layout2=QtWidgets.QGridLayout()
layout2.setContentsMargins(left, top, right, bottom)
layout2.addWidget(self.input_main_div_str,0,0,1,2) # row, column, rowSpan, columnSpan
layout2.addWidget(self.input_height_div_str,1,0)
layout2.addWidget(self.input_circ_div_str,1,1)
layout2.addWidget(self.input_height_div,2,0)
layout2.addWidget(self.input_circ_div,2,1)
main_layout.addLayout(layout)
main_layout.addLayout(layout2)
main_layout.addWidget(self.input_start_num_str)
main_layout.addWidget(self.input_start_num)
main_layout.addWidget(self.input_num_orient_str)
main_layout.addWidget(self._r_horiz_orient)
main_layout.addWidget(self._r_vert_orient)
main_layout.addStretch()
self.stack4.setLayout(main_layout)
def display(self,i):
self.Stack.setCurrentIndex(i)
def get_data(self):
"""
Gather user input for geometry creation
:return:
"""
if self.Stack.currentIndex()==0:
data={'geom_type':'line',
'xs':self.line_xs.value(),
'ys':self.line_ys.value(),
'zs':self.line_zs.value(),
'xe':self.line_xe.value(),
'ye':self.line_ye.value(),
'ze':self.line_ze.value(),
'num_of_points':self.line_div.value(),
'start_num':self.line_start_num.value()
}
if self.Stack.currentIndex()==1:
data={'geom_type':'plane',
'plane_orient':self._r_plane_group.checkedButton().text(),
'len1':self.plane_len1.value(),
'len2':self.plane_len2.value(),
'div1':self.plane_div1.value(),
'div2':self.plane_div2.value(),
'x_offset':self.plane_x_off.value(),
'y_offset':self.plane_y_off.value(),
'z_offset':self.plane_z_off.value(),
'start_num':self.plane_start_num.value()
}
if self.Stack.currentIndex()==2:
data={'geom_type':'box',
'lenx':self.box_lenx.value(),
'leny':self.box_leny.value(),
'lenz':self.box_lenz.value(),
'divx':self.box_divx.value(),
'divy':self.box_divy.value(),
'divz':self.box_divz.value(),
'x_offset':self.box_x_off.value(),
'y_offset':self.box_y_off.value(),
'z_offset':self.box_z_off.value(),
'start_num':self.box_start_num.value()
}
if self.Stack.currentIndex()==3:
data={'geom_type':'cylinder',
'radius':self.input_cyl_r.value(),
'height':self.input_cyl_h.value(),
'start_num':self.input_start_num.value(),
'num_orient':self._r_orient_group.checkedButton().text(),
'z_offset':self.input_cyl_z_off.value(),
'height_div':self.input_height_div.value(),
'circ_div':self.input_circ_div.value()
}
return data
@staticmethod
def return_data(parent = None):
dialog = dialog_geom_primitives(parent)
result = dialog.exec_()
input_data = dialog.get_data()
return (result, input_data)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
main_window = GeometryWidget()
main_window.setGeometry(100, 100, 640, 480)
main_window.show()
sys.exit(app.exec_())
| 41.574094 | 171 | 0.605129 |
ad94af299d5f7b25c6d8285732805a2ccf4a83b7 | 27,599 | py | Python | tensorflow2/tf2cv/models/resnet.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 2,649 | 2018-08-03T14:18:00.000Z | 2022-03-31T08:08:17.000Z | tensorflow2/tf2cv/models/resnet.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 95 | 2018-08-13T01:46:03.000Z | 2022-03-13T08:38:14.000Z | tensorflow2/tf2cv/models/resnet.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 549 | 2018-08-06T08:09:22.000Z | 2022-03-31T08:08:21.000Z | """
ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock', 'get_resnet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, conv7x7_block, MaxPool2d, SimpleSequential, flatten, is_channels_first
class ResBlock(nn.Layer):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
data_format="channels_last",
**kwargs):
super(ResBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class ResBottleneck(nn.Layer):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
padding=padding,
dilation=dilation,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class ResUnit(nn.Layer):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False,
data_format="channels_last",
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class ResInitBlock(nn.Layer):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ResInitBlock, self).__init__(**kwargs)
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class ResNet(tf.keras.Model):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 4
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
if __name__ == "__main__":
_test()
| 33.947109 | 120 | 0.615022 |
db65c355a026c8a2c275b9c2fce52cd2c42396ca | 13,306 | py | Python | test/functional/feature_config_args.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 8 | 2021-04-17T16:11:50.000Z | 2021-06-23T05:30:39.000Z | test/functional/feature_config_args.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 1 | 2021-04-18T11:57:59.000Z | 2021-04-18T11:57:59.000Z | test/functional/feature_config_args.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 7 | 2021-04-17T16:04:12.000Z | 2021-06-10T00:54:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
import time
from test_framework.test_framework import WidecoinTestFramework
from test_framework import util
class ConfArgsTest(WidecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
self.wallet_names = []
def test_config_file_parser(self):
self.stop_node(0)
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'widecoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file_path))
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1',
extra_args=['-dash_cli=1'],
)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('dash_conf=1\n')
with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']):
self.start_node(0)
self.stop_node(0)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain))
main_conf_file_path = os.path.join(self.options.tmpdir, 'node0', 'widecoin_main.conf')
util.write_config(main_conf_file_path, n=0, chain='', extra_config='includeconf={}\n'.format(inc_conf_file_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('acceptnonstdtxn=1\n')
self.nodes[0].assert_start_raises_init_error(extra_args=["-conf={}".format(main_conf_file_path)], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided')
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'widecoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file2_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + inc_conf_file2_path + ':1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: No proxy server specified. Use -proxy=<ip> or -proxy=<ip:port>.',
extra_args=['-proxy'],
)
def test_log_buffer(self):
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']):
self.start_node(0, extra_args=['-noconnect=0'])
def test_args_log(self):
self.stop_node(0)
self.log.info('Test config args logging')
with self.nodes[0].assert_debug_log(
expected_msgs=[
'Command-line arg: addnode="some.node"',
'Command-line arg: rpcauth=****',
'Command-line arg: rpcbind=****',
'Command-line arg: rpcpassword=****',
'Command-line arg: rpcuser=****',
'Command-line arg: torpassword=****',
'Config file arg: %s="1"' % self.chain,
'Config file arg: [%s] server="1"' % self.chain,
],
unexpected_msgs=[
'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'127.1.1.1',
'secret-rpcuser',
'secret-torpassword',
]):
self.start_node(0, extra_args=[
'-addnode=some.node',
'-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'-rpcbind=127.1.1.1',
'-rpcpassword=',
'-rpcuser=secret-rpcuser',
'-torpassword=secret-torpassword',
])
def test_networkactive(self):
self.log.info('Test -networkactive option')
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0)
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0, extra_args=['-networkactive'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0, extra_args=['-networkactive=1'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-networkactive=0'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-nonetworkactive'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-nonetworkactive=1'])
def test_seed_peers(self):
self.log.info('Test seed peers')
default_data_dir = self.nodes[0].datadir
# Only regtest has no fixed seeds. To avoid connections to random
# nodes, regtest is the only network where it is safe to enable
# -fixedseeds in tests
util.assert_equal(self.nodes[0].getblockchaininfo()['chain'],'regtest')
self.stop_node(0)
# No peers.dat exists and -dnsseed=1
# We expect the node will use DNS Seeds, but Regtest mode has 0 DNS seeds
# So after 60 seconds, the node should fallback to fixed seeds (this is a slow test)
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"0 addresses found from DNS seeds",
"opencon thread start", # Ensure ThreadOpenConnections::start time is properly set
]):
self.start_node(0, extra_args=['-dnsseed=1', '-fixedseeds=1', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
"Adding fixed seeds as 60 seconds have passed and addrman is empty",
]):
self.nodes[0].setmocktime(start + 65)
self.stop_node(0)
# No peers.dat exists and -dnsseed=0
# We expect the node will fallback immediately to fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = time.time()
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1'])
assert time.time() - start < 60
self.stop_node(0)
# No peers.dat exists and dns seeds are disabled.
# We expect the node will not add fixed seeds when explicitly disabled.
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = time.time()
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"Fixed seeds are disabled",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=0'])
assert time.time() - start < 60
self.stop_node(0)
# No peers.dat exists and -dnsseed=0, but a -addnode is provided
# We expect the node will allow 60 seconds prior to using fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"opencon thread start", # Ensure ThreadOpenConnections::start time is properly set
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1', '-addnode=fakenodeaddr', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
"Adding fixed seeds as 60 seconds have passed and addrman is empty",
]):
self.nodes[0].setmocktime(start + 65)
def run_test(self):
self.test_log_buffer()
self.test_args_log()
self.test_seed_peers()
self.test_networkactive()
self.test_config_file_parser()
self.test_invalid_command_line_options()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "widecoin.conf")
# datadir needs to be set before [chain] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error: Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file])
assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks'))
if __name__ == '__main__':
ConfArgsTest().main()
| 50.401515 | 207 | 0.640914 |
1efdc607fe8915a38a3ded1085c71d27ab0ee136 | 4,287 | py | Python | webvtt/segmenter.py | gana-dimsum/webvtt-py | 6818ab363b057b8d0928ce395683ea2e95e08dea | [
"MIT"
] | null | null | null | webvtt/segmenter.py | gana-dimsum/webvtt-py | 6818ab363b057b8d0928ce395683ea2e95e08dea | [
"MIT"
] | null | null | null | webvtt/segmenter.py | gana-dimsum/webvtt-py | 6818ab363b057b8d0928ce395683ea2e95e08dea | [
"MIT"
] | null | null | null | import os
from math import ceil, floor
from .errors import InvalidCaptionsError
from .webvtt import WebVTT
from .structures import Caption
MPEGTS = 900000
SECONDS = 10 # default number of seconds per segment
__all__ = ['WebVTTSegmenter']
class WebVTTSegmenter(object):
"""
Provides segmentation of WebVTT captions for HTTP Live Streaming (HLS).
"""
def __init__(self):
self._total_segments = 0
self._output_folder = ''
self._seconds = 0
self._mpegts = 0
self._segments = []
def _validate_webvtt(self, webvtt):
# Validates that the captions is a list and all the captions are instances of Caption.
if not isinstance(webvtt, WebVTT):
return False
for c in webvtt.captions:
if not isinstance(c, Caption):
return False
return True
def _slice_segments(self, captions):
self._segments = [[] for _ in range(self.total_segments)]
for c in captions:
segment_index_start = floor(c.start_in_seconds / self.seconds)
self.segments[segment_index_start].append(c)
# Also include a caption in other segments based on the end time.
segment_index_end = floor(c.end_in_seconds / self.seconds)
if segment_index_end > segment_index_start:
for i in range(segment_index_start + 1, segment_index_end + 1):
self.segments[i].append(c)
def _write_segments(self):
for index in range(self.total_segments):
segment_file = os.path.join(self._output_folder, '{}-{}.webvtt'.format(self._webvttname, index))
with open(segment_file, 'w', encoding='utf-8') as f:
f.write('WEBVTT\n')
f.write('X-TIMESTAMP-MAP=MPEGTS:{},LOCAL:00:00:00.000\n'.format(self._mpegts))
for caption in self.segments[index]:
f.write('\n{} --> {}\n'.format(caption.start, caption.end))
f.writelines(['{}\n'.format(l) for l in caption.lines])
def _write_manifest(self):
manifest_file = os.path.join(self._output_folder, '{}.m3u8'.format(self._webvttname))
with open(manifest_file, 'w', encoding='utf-8') as f:
f.write('#EXTM3U\n')
f.write('#EXT-X-VERSION:3\n')
f.write('#EXT-X-MEDIA-SEQUENCE:0\n')
f.write('#EXT-X-TARGETDURATION:{}\n'.format(self.seconds))
f.write('#EXT-X-PLAYLIST-TYPE:VOD\n')
for i in range(self.total_segments):
f.write('#EXTINF:{}.00000\n'.format(self.seconds))
f.write('{}-{}.webvtt\n'.format(os.path.basename(self._webvttname), i))
f.write('#EXT-X-ENDLIST\n')
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS):
"""Segments the captions based on a number of seconds."""
if isinstance(webvtt, str):
# if a string is supplied we parse the file
captions = WebVTT().read(webvtt).captions
elif not self._validate_webvtt(webvtt):
raise InvalidCaptionsError('The captions provided are invalid')
else:
# we expect to have a webvtt object
captions = webvtt.captions
self._total_segments = 0 if not captions else int(ceil(captions[-1].end_in_seconds / seconds))
self._output_folder = output
self._seconds = seconds
self._mpegts = mpegts
webvtt_name = os.path.splitext(webvtt)[0]
self._webvttname = webvtt_name
output_folder = os.path.join(os.getcwd(), output)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
self._slice_segments(captions)
self._write_segments()
self._write_manifest()
@property
def seconds(self):
"""Returns the number of seconds used for segmenting captions."""
return self._seconds
@property
def total_segments(self):
"""Returns the total of segments."""
return self._total_segments
@property
def segments(self):
"""Return the list of segments."""
return self._segments
| 37.278261 | 109 | 0.598554 |
6d3947c863071783b04607f068bf0738e003639c | 1,184 | py | Python | Python/997.find-the-town-judge.py | Mo-Shakib/LeetCode | a3f1cfda648d9abf504e9d79697f1ca433c48460 | [
"Apache-2.0"
] | 1 | 2022-01-10T01:10:03.000Z | 2022-01-10T01:10:03.000Z | Python/997.find-the-town-judge.py | Mo-Shakib/LeetCode | a3f1cfda648d9abf504e9d79697f1ca433c48460 | [
"Apache-2.0"
] | 3 | 2022-01-10T18:24:21.000Z | 2022-01-10T22:38:38.000Z | Python/997.find-the-town-judge.py | Mo-Shakib/LeetCode | a3f1cfda648d9abf504e9d79697f1ca433c48460 | [
"Apache-2.0"
] | 2 | 2022-01-10T05:06:22.000Z | 2022-01-14T06:20:09.000Z | #
# @lc app=leetcode id=997 lang=python3
#
# [997] Find the Town Judge
#
# @lc code=start
class Solution:
def findJudge(self, n: int, trust: List[List[int]]) -> int:
trusted = {} # to store key value pairs
# there is only one person and he does not trust himself
if len(trust) == 0 and n == 1:
return 1
# there are more than one perosn and nobody trust anyone!
if n > 1 and len(trust) == 0:
return -1
# looping through the list and getting the trust values of every person. If a person is trusted by someone then his trust value increases else decreases.
for (a, b) in trust:
if a not in trusted:
trusted[a] = -1
else:
trusted[a] -= 1
if b not in trusted:
trusted[b] = 1
else:
trusted[b] += 1
# if a person is trusted by n-1 people then he will be the judge, else there is no judge!
for key, value in trusted.items():
if value == n -1:
return key
return -1
# @lc code=end
| 29.6 | 161 | 0.513514 |
e056a25f4263cfc7aee896cca0e0254a88d89670 | 543 | py | Python | edgelm/fairseq/model_parallel/modules/__init__.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | 1 | 2021-11-07T00:30:05.000Z | 2021-11-07T00:30:05.000Z | edgelm/fairseq/model_parallel/modules/__init__.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | edgelm/fairseq/model_parallel/modules/__init__.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .multihead_attention import ModelParallelMultiheadAttention
from .transformer_layer import (
ModelParallelTransformerEncoderLayer,
ModelParallelTransformerDecoderLayer,
)
__all__ = [
"ModelParallelMultiheadAttention",
"ModelParallelTransformerEncoderLayer",
"ModelParallelTransformerDecoderLayer",
]
| 30.166667 | 66 | 0.767956 |
ecf905625038e4e26c1595fd7ee933f6ee73c483 | 1,569 | py | Python | 463.island-perimeter.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 463.island-perimeter.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 463.island-perimeter.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=463 lang=python3
#
# [463] Island Perimeter
#
# https://leetcode.com/problems/island-perimeter/description/
#
# algorithms
# Easy (62.13%)
# Likes: 1346
# Dislikes: 95
# Total Accepted: 159.6K
# Total Submissions: 255.9K
# Testcase Example: '[[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]'
#
# You are given a map in form of a two-dimensional integer grid where 1
# represents land and 0 represents water.
#
# Grid cells are connected horizontally/vertically (not diagonally). The grid
# is completely surrounded by water, and there is exactly one island (i.e., one
# or more connected land cells).
#
# The island doesn't have "lakes" (water inside that isn't connected to the
# water around the island). One cell is a square with side length 1. The grid
# is rectangular, width and height don't exceed 100. Determine the perimeter of
# the island.
#
#
#
# Example:
#
#
# Input:
# [[0,1,0,0],
# [1,1,1,0],
# [0,1,0,0],
# [1,1,0,0]]
#
# Output: 16
#
# Explanation: The perimeter is the 16 yellow stripes in the image below:
#
#
#
#
#
# @lc code=start
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
ans = 0
m = len(grid)
n = len(grid[0])
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j]==1:
ans+=4
for a, b in zip([1,-1,0,0],[0,0,1,-1]):
if 0<=i+a<m and 0<=j+b<n:
ans-=(grid[i+a][j+b]==1)
return ans
# @lc code=end
| 24.515625 | 79 | 0.583811 |
5a833ebd1c0d7a6e6405f9bbcb480edcd0445186 | 27,917 | py | Python | samples/client/petstore/python-tornado/petstore_api/api_client.py | sensorario/openapi-generator | bf68e9b7d2d9a27fab481fe6bab3f57bc135b94c | [
"Apache-2.0"
] | 1 | 2022-01-03T04:40:07.000Z | 2022-01-03T04:40:07.000Z | samples/client/petstore/python-tornado/petstore_api/api_client.py | sensorario/openapi-generator | bf68e9b7d2d9a27fab481fe6bab3f57bc135b94c | [
"Apache-2.0"
] | 28 | 2021-04-07T07:38:36.000Z | 2022-03-31T03:10:56.000Z | samples/client/petstore/python-tornado/petstore_api/api_client.py | sensorario/openapi-generator | bf68e9b7d2d9a27fab481fe6bab3f57bc135b94c | [
"Apache-2.0"
] | 2 | 2021-11-03T10:07:15.000Z | 2021-12-17T13:00:53.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
import tornado.gen
from petstore_api.configuration import Configuration
import petstore_api.models
from petstore_api import rest
from petstore_api.exceptions import ApiValueError, ApiException
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
@tornado.gen.coroutine
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_types_map=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings,
request_auth=_request_auth)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = yield self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8') if six.PY3 else e.body
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
raise tornado.gen.Return(return_data)
response_type = response_types_map.get(response_data.status, None)
if six.PY3 and response_type not in ["file", "bytes"]:
match = None
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
encoding = match.group(1) if match else "utf-8"
response_data.data = response_data.data.decode(encoding)
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
raise tornado.gen.Return(return_data)
else:
raise tornado.gen.Return((return_data, response_data.status,
response_data.getheaders()))
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(petstore_api.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_types_map=None, auth_settings=None,
async_req=None, _return_http_data_only=None,
collection_formats=None,_preload_content=True,
_request_timeout=None, _host=None, _request_auth=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_token: dict, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_types_map, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_request_auth)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_types_map,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _request_auth))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types, method=None, body=None):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:param method: http method (e.g. POST, PATCH).
:param body: http body to send.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if (method == 'PATCH' and
'application/json-patch+json' in content_types and
isinstance(body, list)):
return 'application/json-patch+json'
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, queries, auth_settings,
request_auth=None):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param request_auth: if set, the provided settings will
override the token in the configuration.
"""
if not auth_settings:
return
if request_auth:
self._apply_auth_params(headers, queries, request_auth)
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
self._apply_auth_params(headers, queries, auth_setting)
def _apply_auth_params(self, headers, queries, auth_setting):
"""Updates the request parameters based on a single auth_setting
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_setting: auth settings for the endpoint
"""
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
queries.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
has_discriminator = False
if (hasattr(klass, 'get_real_child_model')
and klass.discriminator_value_class_map):
has_discriminator = True
if not klass.openapi_types and has_discriminator is False:
return data
kwargs = {}
if (data is not None and
klass.openapi_types is not None and
isinstance(data, (list, dict))):
for attr, attr_type in six.iteritems(klass.openapi_types):
if klass.attribute_map[attr] in data:
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if has_discriminator:
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| 39.711238 | 174 | 0.557653 |
144ff40bec50ef1f05b013319b985a575aea9a2a | 975 | py | Python | setup.py | bernardosabatinilab/face-rhythm | ea4b5213827beecc174a0f510574d81346b2f07e | [
"MIT"
] | null | null | null | setup.py | bernardosabatinilab/face-rhythm | ea4b5213827beecc174a0f510574d81346b2f07e | [
"MIT"
] | null | null | null | setup.py | bernardosabatinilab/face-rhythm | ea4b5213827beecc174a0f510574d81346b2f07e | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='face_rhythm',
packages=find_packages(),
version='0.1.0',
description="Project structure for Face Rhythms",
author='Rich Hakim',
license='MIT',
install_requires=['numpy==1.18.3',
'torch',
'torchvision',
'torchaudio',
'jupyterlab',
'tensorly',
'opencv-python==4.5.1.48',
'imageio==2.9.0',
'matplotlib',
'scikit-learn',
'scikit-image',
'librosa',
'pyyaml',
'imageio-ffmpeg',
'tqdm',
'h5py==2.10.0',
'pynwb',
'ipywidgets',
'pytest',
'Pillow==7.2.0'
]
)
| 30.46875 | 53 | 0.36 |
097de645e230633d82089907f67ba65b5ad79f5a | 9,517 | py | Python | tests/connection/test_fsm.py | workfloworchestrator/SuPA | 75c34a446e7133ac3f9378810db749a7df2c21a3 | [
"Apache-2.0"
] | null | null | null | tests/connection/test_fsm.py | workfloworchestrator/SuPA | 75c34a446e7133ac3f9378810db749a7df2c21a3 | [
"Apache-2.0"
] | 6 | 2021-12-01T13:05:28.000Z | 2022-03-07T12:40:10.000Z | tests/connection/test_fsm.py | workfloworchestrator/SuPA | 75c34a446e7133ac3f9378810db749a7df2c21a3 | [
"Apache-2.0"
] | null | null | null | from supa.connection.fsm import (
DataPlaneStateMachine,
LifecycleStateMachine,
ProvisionStateMachine,
ReservationStateMachine,
)
from supa.db.model import Reservation
def test_reservation_state_machine() -> None: # noqa: D103
reservation = Reservation()
rsm = ReservationStateMachine(reservation, state_field="reservation_state")
#
# reserve_request -> reserve_failed -> reserve_abort_request -> reserve_abort_confirmed
#
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
rsm.reserve_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveChecking.value
rsm.reserve_failed()
assert reservation.reservation_state == ReservationStateMachine.ReserveFailed.value
rsm.reserve_abort_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveAborting.value
rsm.reserve_abort_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
#
# reserve_request -> reserve_confirmed -> reserve_abort_request -> reserve_abort_confirmed
#
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
rsm.reserve_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveChecking.value
rsm.reserve_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveHeld.value
rsm.reserve_abort_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveAborting.value
rsm.reserve_abort_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
#
# reserve_request -> reserve_confirmed -> reserve_commit_request -> reserve_commit_confirmed
#
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
rsm.reserve_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveChecking.value
rsm.reserve_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveHeld.value
rsm.reserve_commit_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveCommitting.value
rsm.reserve_commit_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
#
# reserve_request -> reserve_confirmed -> reserve_commit_request -> reserve_commit_failed
#
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
rsm.reserve_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveChecking.value
rsm.reserve_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveHeld.value
rsm.reserve_commit_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveCommitting.value
rsm.reserve_commit_failed()
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
#
# reserve_request -> reserve_confirmed -> reserve_timeout_notification -> reserve_commit_request ->
# reserve_commit_confirmed
#
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
rsm.reserve_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveChecking.value
rsm.reserve_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveHeld.value
rsm.reserve_timeout_notification()
assert reservation.reservation_state == ReservationStateMachine.ReserveTimeout.value
rsm.reserve_commit_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveCommitting.value
rsm.reserve_commit_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
#
# reserve_request -> reserve_confirmed -> reserve_timeout_notification -> reserve_abort_request ->
# reserve_abort_confirmed
#
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
rsm.reserve_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveChecking.value
rsm.reserve_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveHeld.value
rsm.reserve_timeout_notification()
assert reservation.reservation_state == ReservationStateMachine.ReserveTimeout.value
rsm.reserve_abort_request()
assert reservation.reservation_state == ReservationStateMachine.ReserveAborting.value
rsm.reserve_abort_confirmed()
assert reservation.reservation_state == ReservationStateMachine.ReserveStart.value
def test_provision_state_machine() -> None: # noqa: D103
reservation = Reservation()
psm = ProvisionStateMachine(reservation, state_field="provision_state")
#
# provision_request -> provision_confirmed -> release_request -> release_confirmed
#
assert reservation.provision_state == ProvisionStateMachine.Released.value
psm.provision_request()
assert reservation.provision_state == ProvisionStateMachine.Provisioning.value
psm.provision_confirmed()
assert reservation.provision_state == ProvisionStateMachine.Provisioned.value
psm.release_request()
assert reservation.provision_state == ProvisionStateMachine.Releasing.value
psm.release_confirmed()
assert reservation.provision_state == ProvisionStateMachine.Released.value
def test_lifecycle_state_machine() -> None: # noqa: D103
reservation = Reservation()
lsm = LifecycleStateMachine(reservation, state_field="lifecycle_state")
#
# terminate_request -> terminate_confirmed
#
assert reservation.lifecycle_state == LifecycleStateMachine.Created.value
lsm.terminate_request()
assert reservation.lifecycle_state == LifecycleStateMachine.Terminating.value
lsm.terminate_confirmed()
assert reservation.lifecycle_state == LifecycleStateMachine.Terminated.value
#
# forced_end_notification -> terminate_request -> terminate_confirmed
#
reservation = Reservation()
lsm = LifecycleStateMachine(reservation, state_field="lifecycle_state")
assert reservation.lifecycle_state == LifecycleStateMachine.Created.value
lsm.forced_end_notification()
assert reservation.lifecycle_state == LifecycleStateMachine.Failed.value
lsm.terminate_request()
assert reservation.lifecycle_state == LifecycleStateMachine.Terminating.value
lsm.terminate_confirmed()
assert reservation.lifecycle_state == LifecycleStateMachine.Terminated.value
#
# endtime_event -> terminate_request -> terminate_confirmed
#
reservation = Reservation()
lsm = LifecycleStateMachine(reservation, state_field="lifecycle_state")
assert reservation.lifecycle_state == LifecycleStateMachine.Created.value
lsm.endtime_event()
assert reservation.lifecycle_state == LifecycleStateMachine.PassedEndTime.value
lsm.terminate_request()
assert reservation.lifecycle_state == LifecycleStateMachine.Terminating.value
lsm.terminate_confirmed()
assert reservation.lifecycle_state == LifecycleStateMachine.Terminated.value
def test_data_plane_state_machine() -> None: # noqa: D103
reservation = Reservation()
dpsm = DataPlaneStateMachine(reservation, state_field="data_plane_state")
#
# auto_start_request -> deactivate_request
#
assert reservation.data_plane_state == DataPlaneStateMachine.Deactivated.value
dpsm.auto_start_request()
assert reservation.data_plane_state == DataPlaneStateMachine.AutoStart.value
dpsm.deactivate_request()
assert reservation.data_plane_state == DataPlaneStateMachine.Deactivated.value
#
# auto_start_request -> activate_request -> activate_failed
#
dpsm = DataPlaneStateMachine(reservation, state_field="data_plane_state")
assert reservation.data_plane_state == DataPlaneStateMachine.Deactivated.value
dpsm.auto_start_request()
assert reservation.data_plane_state == DataPlaneStateMachine.AutoStart.value
dpsm.activate_request()
assert reservation.data_plane_state == DataPlaneStateMachine.Activating.value
dpsm.activate_failed()
assert reservation.data_plane_state == DataPlaneStateMachine.ActivateFailed.value
#
# activate_request -> activate_confirmed
#
reservation.data_plane_state = DataPlaneStateMachine.Deactivated.value
dpsm.activate_request()
assert reservation.data_plane_state == DataPlaneStateMachine.Activating.value
dpsm.activate_confirmed()
assert reservation.data_plane_state == DataPlaneStateMachine.Activated.value
#
# auto_end_request -> deactivate_request -> deactivate_failed
#
reservation.data_plane_state = DataPlaneStateMachine.Activated.value
dpsm.auto_end_request()
assert reservation.data_plane_state == DataPlaneStateMachine.AutoEnd.value
dpsm.deactivate_request()
assert reservation.data_plane_state == DataPlaneStateMachine.Deactivating.value
dpsm.deactivate_failed()
assert reservation.data_plane_state == DataPlaneStateMachine.DeactivateFailed.value
#
# deactivate_request -> deactivate_confirm
#
reservation.data_plane_state = DataPlaneStateMachine.Activated.value
dpsm.deactivate_request()
assert reservation.data_plane_state == DataPlaneStateMachine.Deactivating.value
dpsm.deactivate_confirm()
assert reservation.data_plane_state == DataPlaneStateMachine.Deactivated.value
| 48.805128 | 103 | 0.794788 |
f2a6709b3d3ba733569487447a61fae1a53bb4ca | 15,078 | py | Python | data_generator_reptile.py | ryanbrand/mil | 6524047febe35fa59c356794f1649946332c4e7f | [
"MIT"
] | null | null | null | data_generator_reptile.py | ryanbrand/mil | 6524047febe35fa59c356794f1649946332c4e7f | [
"MIT"
] | null | null | null | data_generator_reptile.py | ryanbrand/mil | 6524047febe35fa59c356794f1649946332c4e7f | [
"MIT"
] | null | null | null | """ Code for loading data and generating data batches during training """
from __future__ import division
import copy
import logging
import os
import glob
import tempfile
import pickle
from datetime import datetime
from collections import OrderedDict
import numpy as np
import random
import tensorflow as tf
from utils import extract_demo_dict, Timer
from tensorflow.python.platform import flags
from natsort import natsorted
from random import shuffle
FLAGS = flags.FLAGS
class DataGenerator(object):
def __init__(self, config={}):
# Hyperparameters
self.update_batch_size = FLAGS.update_batch_size
self.test_batch_size = FLAGS.train_update_batch_size if FLAGS.train_update_batch_size != -1 else self.update_batch_size
self.meta_batch_size = FLAGS.meta_batch_size
self.T = FLAGS.T
self.demo_gif_dir = FLAGS.demo_gif_dir
self.gif_prefix = FLAGS.gif_prefix
self.restore_iter = FLAGS.restore_iter
# Scale and bias for data normalization
self.scale, self.bias = None, None
demo_file = FLAGS.demo_file
demo_file = natsorted(glob.glob(demo_file + '/*pkl'))
self.dataset_size = len(demo_file)
if FLAGS.train and FLAGS.training_set_size != -1:
tmp = demo_file[:FLAGS.training_set_size]
tmp.extend(demo_file[-FLAGS.val_set_size:])
demo_file = tmp
self.extract_supervised_data(demo_file)
if FLAGS.use_noisy_demos:
self.noisy_demo_gif_dir = FLAGS.noisy_demo_gif_dir
noisy_demo_file = FLAGS.noisy_demo_file
self.extract_supervised_data(noisy_demo_file, noisy=True)
def extract_supervised_data(self, demo_file, noisy=False):
"""
Load the states and actions of the demos into memory.
Args:
demo_file: list of demo files where each file contains expert's states and actions of one task.
"""
demos = extract_demo_dict(demo_file)
# We don't need the whole dataset of simulated pushing.
if FLAGS.experiment == 'sim_push':
for key in demos.keys():
demos[key]['demoX'] = demos[key]['demoX'][6:-6, :, :].copy()
demos[key]['demoU'] = demos[key]['demoU'][6:-6, :, :].copy()
n_folders = len(demos.keys())
N_demos = np.sum(demo['demoX'].shape[0] for i, demo in demos.iteritems())
self.state_idx = range(demos[0]['demoX'].shape[-1])
self._dU = demos[0]['demoU'].shape[-1]
print "Number of demos: %d" % N_demos
idx = np.arange(n_folders)
if FLAGS.train:
n_val = FLAGS.val_set_size # number of demos for testing
if not hasattr(self, 'train_idx'):
if n_val != 0:
if not FLAGS.shuffle_val:
self.val_idx = idx[-n_val:]
self.train_idx = idx[:-n_val]
else:
self.val_idx = np.sort(np.random.choice(idx, size=n_val, replace=False))
mask = np.array([(i in self.val_idx) for i in idx])
self.train_idx = np.sort(idx[~mask])
else:
self.train_idx = idx
self.val_idx = []
# Normalize the states if it's training.
with Timer('Normalizing states'):
if self.scale is None or self.bias is None:
states = np.vstack((demos[i]['demoX'] for i in self.train_idx)) # hardcoded here to solve the memory issue
states = states.reshape(-1, len(self.state_idx))
# 1e-3 to avoid infs if some state dimensions don't change in the
# first batch of samples
self.scale = np.diag(
1.0 / np.maximum(np.std(states, axis=0), 1e-3))
self.bias = - np.mean(
states.dot(self.scale), axis=0)
# Save the scale and bias.
with open('data/scale_and_bias_%s.pkl' % FLAGS.experiment, 'wb') as f:
pickle.dump({'scale': self.scale, 'bias': self.bias}, f)
for key in demos.keys():
demos[key]['demoX'] = demos[key]['demoX'].reshape(-1, len(self.state_idx))
demos[key]['demoX'] = demos[key]['demoX'].dot(self.scale) + self.bias
demos[key]['demoX'] = demos[key]['demoX'].reshape(-1, self.T, len(self.state_idx))
if not noisy:
self.demos = demos
else:
self.noisy_demos = demos
def generate_batches(self, noisy=False):
with Timer('Generating batches for each iteration'):
if FLAGS.training_set_size != -1:
offset = self.dataset_size - FLAGS.training_set_size - FLAGS.val_set_size
else:
offset = 0
train_img_folders = {i: os.path.join(self.demo_gif_dir, self.gif_prefix + '_%d' % i) for i in self.train_idx}
val_img_folders = {i: os.path.join(self.demo_gif_dir, self.gif_prefix + '_%d' % (i+offset)) for i in self.val_idx}
if noisy:
noisy_train_img_folders = {i: os.path.join(self.noisy_demo_gif_dir, self.gif_prefix + '_%d' % i) for i in self.train_idx}
noisy_val_img_folders = {i: os.path.join(self.noisy_demo_gif_dir, self.gif_prefix + '_%d' % (i+offset)) for i in self.val_idx}
TEST_PRINT_INTERVAL = 500
TOTAL_ITERS = FLAGS.metatrain_iterations
self.all_training_filenames = []
self.all_val_filenames = []
self.training_batch_idx = {i: OrderedDict() for i in xrange(TOTAL_ITERS)}
self.val_batch_idx = {i: OrderedDict() for i in TEST_PRINT_INTERVAL*np.arange(1, int(TOTAL_ITERS/TEST_PRINT_INTERVAL))}
if noisy:
self.noisy_training_batch_idx = {i: OrderedDict() for i in xrange(TOTAL_ITERS)}
self.noisy_val_batch_idx = {i: OrderedDict() for i in TEST_PRINT_INTERVAL*np.arange(1, TOTAL_ITERS/TEST_PRINT_INTERVAL)}
for itr in xrange(TOTAL_ITERS):
sampled_train_idx = random.sample(self.train_idx, self.meta_batch_size)
for idx in sampled_train_idx:
sampled_folder = train_img_folders[idx]
image_paths = natsorted(os.listdir(sampled_folder))
if FLAGS.experiment == 'sim_push':
image_paths = image_paths[6:-6]
try:
#print('image paths:', len(image_paths), 'demos:', self.demos[idx]['demoX'].shape[0])
#print('TODO: remove the comment below')
assert len(image_paths) == self.demos[idx]['demoX'].shape[0]
except AssertionError:
import pdb; pdb.set_trace()
if noisy:
noisy_sampled_folder = noisy_train_img_folders[idx]
noisy_image_paths = natsorted(os.listdir(noisy_sampled_folder))
assert len(noisy_image_paths) == self.noisy_demos[idx]['demoX'].shape[0]
if not noisy:
sampled_image_idx = np.random.choice(range(len(image_paths)), size=self.update_batch_size+self.test_batch_size, replace=False) # True
sampled_images = [os.path.join(sampled_folder, image_paths[i]) for i in sampled_image_idx]
else:
noisy_sampled_image_idx = np.random.choice(range(len(noisy_image_paths)), size=self.update_batch_size, replace=False) #True
sampled_image_idx = np.random.choice(range(len(image_paths)), size=self.test_batch_size, replace=False) #True
sampled_images = [os.path.join(noisy_sampled_folder, noisy_image_paths[i]) for i in noisy_sampled_image_idx]
sampled_images.extend([os.path.join(sampled_folder, image_paths[i]) for i in sampled_image_idx])
self.all_training_filenames.extend(sampled_images)
self.training_batch_idx[itr][idx] = sampled_image_idx
if noisy:
self.noisy_training_batch_idx[itr][idx] = noisy_sampled_image_idx
if itr != 0 and itr % TEST_PRINT_INTERVAL == 0:
sampled_val_idx = random.sample(self.val_idx, self.meta_batch_size)
for idx in sampled_val_idx:
sampled_folder = val_img_folders[idx]
image_paths = natsorted(os.listdir(sampled_folder))
if FLAGS.experiment == 'sim_push':
image_paths = image_paths[6:-6]
assert len(image_paths) == self.demos[idx]['demoX'].shape[0]
if noisy:
noisy_sampled_folder = noisy_val_img_folders[idx]
noisy_image_paths = natsorted(os.listdir(noisy_sampled_folder))
assert len(noisy_image_paths) == self.noisy_demos[idx]['demoX'].shape[0]
if not noisy:
sampled_image_idx = np.random.choice(range(len(image_paths)), size=self.update_batch_size+self.test_batch_size, replace=False) # True
sampled_images = [os.path.join(sampled_folder, image_paths[i]) for i in sampled_image_idx]
else:
noisy_sampled_image_idx = np.random.choice(range(len(noisy_image_paths)), size=self.update_batch_size, replace=False) # True
sampled_image_idx = np.random.choice(range(len(image_paths)), size=self.test_batch_size, replace=False) # True
sampled_images = [os.path.join(noisy_sampled_folder, noisy_image_paths[i]) for i in noisy_sampled_image_idx]
sampled_images.extend([os.path.join(sampled_folder, image_paths[i]) for i in sampled_image_idx])
self.all_val_filenames.extend(sampled_images)
self.val_batch_idx[itr][idx] = sampled_image_idx
if noisy:
self.noisy_val_batch_idx[itr][idx] = noisy_sampled_image_idx
def make_batch_tensor(self, network_config, restore_iter=0, train=True):
TEST_INTERVAL = 500
batch_image_size = (self.update_batch_size + self.test_batch_size) * 1
if train:
all_filenames = self.all_training_filenames
if restore_iter > 0:
all_filenames = all_filenames[batch_image_size*(restore_iter+1):]
else:
all_filenames = self.all_val_filenames
if restore_iter > 0:
all_filenames = all_filenames[batch_image_size*(int(restore_iter/TEST_INTERVAL)+1):]
im_height = network_config['image_height']
im_width = network_config['image_width']
num_channels = network_config['image_channels']
# make queue for tensorflow to read from
filename_queue = tf.train.string_input_producer(tf.convert_to_tensor(all_filenames), shuffle=False)
print 'Generating image processing ops'
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.image.decode_gif(image_file)
# should be T x C x W x H
image.set_shape((self.T, im_height, im_width, num_channels))
image = tf.cast(image, tf.float32)
image /= 255.0
if FLAGS.hsv:
eps_min, eps_max = 0.5, 1.5
assert eps_max >= eps_min >= 0
# convert to HSV only fine if input images in [0, 1]
img_hsv = tf.image.rgb_to_hsv(image)
img_h = img_hsv[..., 0]
img_s = img_hsv[..., 1]
img_v = img_hsv[..., 2]
eps = tf.random_uniform([self.T, 1, 1], eps_min, eps_max)
img_v = tf.clip_by_value(eps * img_v, 0., 1.)
img_hsv = tf.stack([img_h, img_s, img_v], 3)
image_rgb = tf.image.hsv_to_rgb(img_hsv)
image = image_rgb
image = tf.transpose(image, perm=[0, 3, 2, 1]) # transpose to mujoco setting for images
image = tf.reshape(image, [self.T, -1])
num_preprocess_threads = 1 # TODO - enable this to be set to >1
min_queue_examples = 64 #128 #256
print 'Batching images'
images = tf.train.batch(
[image],
batch_size = batch_image_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_image_size,
)
all_images = []
for i in xrange(1):
image = images[i*(self.update_batch_size+self.test_batch_size):(i+1)*(self.update_batch_size+self.test_batch_size)]
image = tf.reshape(image, [(self.update_batch_size+self.test_batch_size)*self.T, -1])
all_images.append(image)
return tf.squeeze(tf.stack(all_images), axis=0)
def generate_data_batch(self, itr, train=True):
if train:
demos = {key: self.demos[key].copy() for key in self.train_idx}
idxes = self.training_batch_idx[itr]
if FLAGS.use_noisy_demos:
noisy_demos = {key: self.noisy_demos[key].copy() for key in self.train_idx}
noisy_idxes = self.noisy_training_batch_idx[itr]
else:
demos = {key: self.demos[key].copy() for key in self.val_idx}
idxes = self.val_batch_idx[itr]
if FLAGS.use_noisy_demos:
noisy_demos = {key: self.noisy_demos[key].copy() for key in self.val_idx}
noisy_idxes = self.noisy_val_batch_idx[itr]
batch_size = self.meta_batch_size
update_batch_size = self.update_batch_size
test_batch_size = self.test_batch_size
if not FLAGS.use_noisy_demos:
U = [demos[k]['demoU'][v].reshape((test_batch_size+update_batch_size)*self.T, -1) for k, v in idxes.items()]
U = np.array(U)
X = [demos[k]['demoX'][v].reshape((test_batch_size+update_batch_size)*self.T, -1) for k, v in idxes.items()]
X = np.array(X)
else:
noisy_U = [noisy_demos[k]['demoU'][v].reshape(update_batch_size*self.T, -1) for k, v in noisy_idxes.items()]
noisy_X = [noisy_demos[k]['demoX'][v].reshape(update_batch_size*self.T, -1) for k, v in noisy_idxes.items()]
U = [demos[k]['demoU'][v].reshape(test_batch_size*self.T, -1) for k, v in idxes.items()]
U = np.concatenate((np.array(noisy_U), np.array(U)), axis=1)
X = [demos[k]['demoX'][v].reshape(test_batch_size*self.T, -1) for k, v in idxes.items()]
X = np.concatenate((np.array(noisy_X), np.array(X)), axis=1)
assert U.shape[2] == self._dU
#print('TODO: UNCOMMENT ABOVE', U.shape[2], self._dU)
assert X.shape[2] == len(self.state_idx)
return X, U
| 56.684211 | 161 | 0.592917 |
c2c88037441d01664804e288e92604a36ede263b | 566 | py | Python | microservices/w2/config_w2.py | nyancol/OpenPenguin | ac229f5b2ea65f32aec0d8dcdd4ac855b4e37d3e | [
"Apache-2.0"
] | 5 | 2017-11-28T13:15:20.000Z | 2017-12-09T23:14:04.000Z | microservices/w2/config_w2.py | nyancol/OpenPenguin | ac229f5b2ea65f32aec0d8dcdd4ac855b4e37d3e | [
"Apache-2.0"
] | 9 | 2017-12-12T17:15:37.000Z | 2018-01-30T14:43:36.000Z | microservices/w2/config.py | hp-cloud-lab1/cloud_native_app | a243d6dd75b99450874f661e42d62d14cca95683 | [
"Apache-2.0"
] | 15 | 2017-09-15T09:27:43.000Z | 2018-12-18T11:05:53.000Z | # coding=utf-8
import configparser
class Configuration(object):
def __init__(self, configuration_file):
self.config = configparser.ConfigParser(allow_no_value=True)
self.config.read(configuration_file)
def get_w2_rabbithost(self):
return self.config.get("w2", "rabbithost")
def get_w2_rabbitlogin(self):
return self.config.get("w2", "rabbitlogin")
def get_w2_rabbitpassword(self):
return self.config.get("w2", "rabbitpassword")
def get_w2_debug(self):
return self.config.get("w2", "debug")
| 24.608696 | 68 | 0.687279 |
f13735da3b63929bfd8cf2d3c4ad3486f1ef4f98 | 2,395 | py | Python | tests/test_rola.py | alanarteagav/musicWave | dd1cfdd2a799686f17e432a392aca46921db9ff0 | [
"MIT"
] | null | null | null | tests/test_rola.py | alanarteagav/musicWave | dd1cfdd2a799686f17e432a392aca46921db9ff0 | [
"MIT"
] | null | null | null | tests/test_rola.py | alanarteagav/musicWave | dd1cfdd2a799686f17e432a392aca46921db9ff0 | [
"MIT"
] | null | null | null | from music_wave.rola import Rola
import unittest
class TestRola(unittest.TestCase):
def setUp(self):
self.rola_test = Rola(id = 0, performer_id = 0, album_id = 0,
path = 'null', title = 'null', track = 0,
year = 0, genre = 'null')
def test_set_get_id(self):
self.assertEqual(self.rola_test.get_id(), 0, 'get id fail')
self.rola_test.set_id(113)
self.assertEqual(self.rola_test.get_id(), 113, 'set id fail')
def test_set_get_performer(self):
self.assertEqual(self.rola_test.get_performer_id(), 0, 'get performer fail')
self.rola_test.set_performer_id(113)
self.assertEqual(self.rola_test.get_performer_id(), 113, 'set performer fail')
def test_set_get_album(self):
self.assertEqual(self.rola_test.get_album_id(), 0, 'get album fail')
self.rola_test.set_album_id(113)
self.assertEqual(self.rola_test.get_album_id(), 113, 'set album fail')
def test_set_get_path(self):
self.assertEqual(self.rola_test.get_path(), "null", 'get path fail')
self.rola_test.set_path("shaffer/fletcher/Caravan.mp3")
self.assertEqual(self.rola_test.get_path(), "shaffer/fletcher/Caravan.mp3",
'set path fail')
def test_set_get_title(self):
self.assertEqual(self.rola_test.get_title(), "null", 'get title fail')
self.rola_test.set_title("Whiplash")
self.assertEqual(self.rola_test.get_title(), "Whiplash", 'set title fail')
def test_set_get_track(self):
self.assertEqual(self.rola_test.get_track(), 0, 'get track fail')
self.rola_test.set_track(66)
self.assertEqual(self.rola_test.get_track(), 66, 'set track fail')
def test_set_get_year(self):
self.assertEqual(self.rola_test.get_year(), 0, 'get year fail')
self.rola_test.set_year(1997)
self.assertEqual(self.rola_test.get_year(), 1997, 'set year fail')
def test_set_get_genre(self):
self.assertEqual(self.rola_test.get_genre(), "null", 'get genre fail')
self.rola_test.set_genre("jazz")
self.assertEqual(self.rola_test.get_genre(), "jazz", 'set genre fail')
if __name__ == '__main__':
try:
suite = unittest.TestLoader().loadTestsFromTestCase(TestRola)
except:
pass
unittest.TextTestRunner(verbosity=2).run(suite)
| 40.59322 | 86 | 0.658873 |
161ef85842d6e1f389ac2be9b1a689d3a3bdffd3 | 1,950 | py | Python | public/scripts/python/test/Suite2/test29.py | jimb245/scriptremote | 74853f6ac8a287c3a97068833f62d5c94707b092 | [
"MIT"
] | null | null | null | public/scripts/python/test/Suite2/test29.py | jimb245/scriptremote | 74853f6ac8a287c3a97068833f62d5c94707b092 | [
"MIT"
] | null | null | null | public/scripts/python/test/Suite2/test29.py | jimb245/scriptremote | 74853f6ac8a287c3a97068833f62d5c94707b092 | [
"MIT"
] | null | null | null | #
# Existing owned project, attempt to add job using wrong passphrase
#
import os
import time
import unittest
import srutil
import srio
import credentials
class Test(unittest.TestCase):
def runTest(self):
user = credentials.SRUSER
token = credentials.SRTOKEN
projName = 'TEST(suite2)-Project29'
projShare = projName + '~' + credentials.SREMAIL
locName = 'location'
jobName1 = 'Job1'
jobName2 = 'Job2'
jobName3 = 'Job3'
passphrase1 = '12345'
passphrase2 = 'abc'
result1 = srio.SR_start(user, token, projName, jobName1, passphrase1)
if (result1[0] != srio.SR_OK):
self.fail()
proj1 = srio.sr_project_encoded
result2 = srio.SR_send(locName, data_array=[{'name':'A','value':'Hello World'}], reply=False)
if (result2[0] != srio.SR_OK):
self.fail()
srio.SR_end()
result3 = srio.SR_start(user, token, projName, jobName2, passphrase2 )
if (result3[0] != srio.SR_OK):
self.fail()
result4 = srio.SR_send(locName, data_array=[{'name':'A','value':'Hello World'}], reply=False)
if (result4[0] != srio.SR_OK):
self.fail()
result5 = srutil.SR_get_jobs()
if (result5[0] != srio.SR_OK):
self.fail()
data = result5[1]
jobs = data[u'jobs']
if len(jobs) != 1:
self.fail()
srio.SR_end()
result6 = srio.SR_start(user, token, projName, jobName3, passphrase1)
if (result6[0] != srio.SR_OK):
self.fail()
result7 = srutil.SR_get_jobs()
if (result7[0] != srio.SR_OK):
self.fail()
data = result7[1]
jobs = data[u'jobs']
if len(jobs) != 2:
self.fail()
srio.sr_userid = user
srio.sr_token = token
srio.sr_project_encoded=proj1
srutil.SR_delete_project()
| 25.657895 | 101 | 0.564615 |
4a3971bf2fe63c7bf1d00f4e3956f4e3d41cbcdf | 18,778 | py | Python | google/ads/googleads/v9/services/services/age_range_view_service/client.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/services/services/age_range_view_service/client.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/services/services/age_range_view_service/client.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import age_range_view
from google.ads.googleads.v9.services.types import age_range_view_service
from .transports.base import AgeRangeViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AgeRangeViewServiceGrpcTransport
class AgeRangeViewServiceClientMeta(type):
"""Metaclass for the AgeRangeViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AgeRangeViewServiceTransport]]
_transport_registry["grpc"] = AgeRangeViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AgeRangeViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AgeRangeViewServiceClient(metaclass=AgeRangeViewServiceClientMeta):
"""Service to manage age range views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AgeRangeViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AgeRangeViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def age_range_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified age_range_view string."""
return "customers/{customer_id}/ageRangeViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_age_range_view_path(path: str) -> Dict[str, str]:
"""Parse a age_range_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/ageRangeViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AgeRangeViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the age range view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AgeRangeViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AgeRangeViewServiceTransport):
# transport is a AgeRangeViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AgeRangeViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_age_range_view(
self,
request: Union[
age_range_view_service.GetAgeRangeViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> age_range_view.AgeRangeView:
r"""Returns the requested age range view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetAgeRangeViewRequest, dict]):
The request object. Request message for
[AgeRangeViewService.GetAgeRangeView][google.ads.googleads.v9.services.AgeRangeViewService.GetAgeRangeView].
resource_name (:class:`str`):
Required. The resource name of the
age range view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.AgeRangeView:
An age range view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a age_range_view_service.GetAgeRangeViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, age_range_view_service.GetAgeRangeViewRequest
):
request = age_range_view_service.GetAgeRangeViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_age_range_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AgeRangeViewServiceClient",)
| 40.733189 | 124 | 0.635478 |
1131ea744214a80679a167dbdd1083f43ae12fd0 | 769 | py | Python | theory/16th_sprint/B.broke_me.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | 3 | 2020-11-18T05:16:30.000Z | 2021-03-08T06:36:01.000Z | theory/16th_sprint/B.broke_me.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | null | null | null | theory/16th_sprint/B.broke_me.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | 1 | 2021-01-20T12:41:48.000Z | 2021-01-20T12:41:48.000Z | import string
import random
def polynomial_hash(string, base=1000, module=123_987_123):
my_hash = 0
string_len = len(string)
for n, s in enumerate(string):
my_hash += ord(s)*(base**(string_len-n-1))
return my_hash % module
def string_generator(size=8, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
hashes = {}
mem = 0
while True:
a = string_generator(random.randint(4, 20))
try:
if hashes[polynomial_hash(a)] != a:
print(hashes[polynomial_hash(a)], a)
break
except KeyError:
hashes[polynomial_hash(a)] = a
# showing used memory
if hashes.__sizeof__() // 1024 > mem:
mem = hashes.__sizeof__() // 1024
print(mem)
| 21.361111 | 61 | 0.628088 |
e2b9b638ddc798f14a0f110f5296206fe33052f6 | 875 | py | Python | traffic_light_dl/workspace/raw_training_data/validate_records.py | hssavage/CarND-Capstone | 2e1ddb8f06e2d1218b93841ee6bdc88e89a1bc4c | [
"MIT"
] | null | null | null | traffic_light_dl/workspace/raw_training_data/validate_records.py | hssavage/CarND-Capstone | 2e1ddb8f06e2d1218b93841ee6bdc88e89a1bc4c | [
"MIT"
] | null | null | null | traffic_light_dl/workspace/raw_training_data/validate_records.py | hssavage/CarND-Capstone | 2e1ddb8f06e2d1218b93841ee6bdc88e89a1bc4c | [
"MIT"
] | 2 | 2018-03-14T17:33:09.000Z | 2018-03-15T17:09:20.000Z |
import tensorflow as tf
def validate_dataset(filenames, reader_opts=None):
"""
Attempt to iterate over every record in the supplied iterable of TFRecord filenames
:param filenames: iterable of filenames to read
:param reader_opts: (optional) tf.python_io.TFRecordOptions to use when constructing the record iterator
"""
i = 0
for fname in filenames:
print('validating ', fname)
record_iterator = tf.python_io.tf_record_iterator(path=fname, options=reader_opts)
try:
for _ in record_iterator:
i += 1
except Exception as e:
print('error in {} at record {}'.format(fname, i))
print(e)
if __name__ == '__main__':
filenames = ['/workspace/data/bosch_traffic_light_train.record','/workspace/data/bosch_traffic_light_val.record']
validate_dataset(filenames) | 35 | 117 | 0.677714 |
2df41b2677769790c00105d555785da977cc4744 | 2,593 | py | Python | echome/identity/management/commands/createaccount.py | jasoncolburne/echome | a5ab87666ae859d1ca8e4902d5c441c0ce36547a | [
"MIT"
] | 2 | 2022-01-31T19:32:51.000Z | 2022-01-31T22:42:13.000Z | echome/identity/management/commands/createaccount.py | jasoncolburne/echome | a5ab87666ae859d1ca8e4902d5c441c0ce36547a | [
"MIT"
] | 7 | 2021-04-04T01:15:53.000Z | 2022-02-07T03:34:48.000Z | echome/identity/management/commands/createaccount.py | jasoncolburne/echome | a5ab87666ae859d1ca8e4902d5c441c0ce36547a | [
"MIT"
] | 1 | 2022-02-01T11:34:50.000Z | 2022-02-01T11:34:50.000Z | from django.core.management.base import BaseCommand, CommandError
from django.core import exceptions
from django.utils.text import capfirst
from identity.models import Account
class Command(BaseCommand):
help = 'Create an echome account'
# def add_arguments(self, parser):
# parser.add_argument('poll_ids', nargs='+', type=int)
def handle(self, *args, **options):
newacct = Account()
newacct.generate_id()
accountname = Account._meta.get_field('name')
verbose_field_name = accountname.verbose_name
name = None
while name is None:
message = self._get_input_message(accountname)
name = self.get_input_data(accountname, message)
if name:
error_msg = self._validate_acctname(name, verbose_field_name)
if error_msg:
self.stderr.write(error_msg)
name = None
continue
try:
newacct.name = name
self.stdout.write(f"Creating account '{name}' with account id: {newacct.account_id}")
newacct.save()
self.stdout.write(self.style.SUCCESS('Successfully created account'))
except Exception as e:
self.stdout.write(e)
self.stderr.write('Error: There was an error when attempting to create the account.')
def get_input_data(self, field, message, default=None):
"""
Override this method if you want to customize data inputs or
validation exceptions.
"""
raw_value = input(message)
if default and raw_value == '':
raw_value = default
try:
val = field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
val = None
return val
def _get_input_message(self, field, default=None):
return '%s%s%s: ' % (
capfirst(field.verbose_name),
" (leave blank to use '%s')" % default if default else '',
' (%s.%s)' % (
field.remote_field.model._meta.object_name,
field.m2m_target_field_name() if field.many_to_many else field.remote_field.field_name,
) if field.remote_field else '',
)
def _validate_acctname(self, acctname, verbose_field_name):
"""Validate username. If invalid, return a string error message."""
if not acctname:
return '%s cannot be blank.' % capfirst(verbose_field_name)
| 36.013889 | 103 | 0.600077 |
a33eff045a2161cd59b6d7630833dbb2fa41b93b | 24,953 | py | Python | lib/spack/spack/directory_layout.py | FJ-NaokiMatsumura/spack | 7cfe626e21795f0a4bfe61f36ca1b48ffd2fc961 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | lib/spack/spack/directory_layout.py | FJ-NaokiMatsumura/spack | 7cfe626e21795f0a4bfe61f36ca1b48ffd2fc961 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2022-02-28T11:30:18.000Z | 2022-03-23T19:34:56.000Z | lib/spack/spack/directory_layout.py | FJ-NaokiMatsumura/spack | 7cfe626e21795f0a4bfe61f36ca1b48ffd2fc961 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import glob
import os
import posixpath
import re
import shutil
import tempfile
from contextlib import contextmanager
import ruamel.yaml as yaml
import six
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import spack.config
import spack.hash_types as ht
import spack.spec
import spack.util.spack_json as sjson
from spack.error import SpackError
# Note: Posixpath is used here as opposed to
# os.path.join due to spack.spec.Spec.format
# requiring forward slash path seperators at this stage
default_projections = {'all': posixpath.join(
'{architecture}', '{compiler.name}-{compiler.version}',
'{name}-{version}-{hash}')}
def _check_concrete(spec):
"""If the spec is not concrete, raise a ValueError"""
if not spec.concrete:
raise ValueError('Specs passed to a DirectoryLayout must be concrete!')
class DirectoryLayout(object):
"""A directory layout is used to associate unique paths with specs.
Different installations are going to want different layouts for their
install, and they can use this to customize the nesting structure of
spack installs. The default layout is:
* <install root>/
* <platform-os-target>/
* <compiler>-<compiler version>/
* <name>-<version>-<hash>
The hash here is a SHA-1 hash for the full DAG plus the build
spec.
The installation directory projections can be modified with the
projections argument.
"""
def __init__(self, root, **kwargs):
self.root = root
self.check_upstream = True
projections = kwargs.get('projections') or default_projections
self.projections = dict((key, projection.lower())
for key, projection in projections.items())
# apply hash length as appropriate
self.hash_length = kwargs.get('hash_length', None)
if self.hash_length is not None:
for when_spec, projection in self.projections.items():
if '{hash}' not in projection:
if '{hash' in projection:
raise InvalidDirectoryLayoutParametersError(
"Conflicting options for installation layout hash"
" length")
else:
raise InvalidDirectoryLayoutParametersError(
"Cannot specify hash length when the hash is not"
" part of all install_tree projections")
self.projections[when_spec] = projection.replace(
"{hash}", "{hash:%d}" % self.hash_length)
# If any of these paths change, downstream databases may not be able to
# locate files in older upstream databases
self.metadata_dir = '.spack'
self.deprecated_dir = 'deprecated'
self.spec_file_name = 'spec.json'
# Use for checking yaml and deprecated types
self._spec_file_name_yaml = 'spec.yaml'
self.extension_file_name = 'extensions.yaml'
self.packages_dir = 'repos' # archive of package.py files
self.manifest_file_name = 'install_manifest.json'
@property
def hidden_file_regexes(self):
return (re.escape(self.metadata_dir),)
def relative_path_for_spec(self, spec):
_check_concrete(spec)
projection = spack.projections.get_projection(self.projections, spec)
path = spec.format(projection)
return path
def write_spec(self, spec, path):
"""Write a spec out to a file."""
_check_concrete(spec)
with open(path, 'w') as f:
# The hash the the projection is the DAG hash but we write out the
# full provenance by full hash so it's availabe if we want it later
# extension = os.path.splitext(path)[-1].lower()
# if 'json' in extension:
spec.to_json(f, hash=ht.full_hash)
# elif 'yaml' in extension:
# spec.to_yaml(f, hash=ht.full_hash)
def write_host_environment(self, spec):
"""The host environment is a json file with os, kernel, and spack
versioning. We use it in the case that an analysis later needs to
easily access this information.
"""
from spack.util.environment import get_host_environment_metadata
env_file = self.env_metadata_path(spec)
environ = get_host_environment_metadata()
with open(env_file, 'w') as fd:
sjson.dump(environ, fd)
def read_spec(self, path):
"""Read the contents of a file and parse them as a spec"""
try:
with open(path) as f:
extension = os.path.splitext(path)[-1].lower()
if extension == '.json':
spec = spack.spec.Spec.from_json(f)
elif extension == '.yaml':
# Too late for conversion; spec_file_path() already called.
spec = spack.spec.Spec.from_yaml(f)
else:
raise SpecReadError('Did not recognize spec file extension:'
' {0}'.format(extension))
except Exception as e:
if spack.config.get('config:debug'):
raise
raise SpecReadError(
'Unable to read file: %s' % path, 'Cause: ' + str(e))
# Specs read from actual installations are always concrete
spec._mark_concrete()
return spec
def spec_file_path(self, spec):
"""Gets full path to spec file"""
_check_concrete(spec)
# Attempts to convert to JSON if possible.
# Otherwise just returns the YAML.
yaml_path = os.path.join(
self.metadata_path(spec), self._spec_file_name_yaml)
json_path = os.path.join(self.metadata_path(spec), self.spec_file_name)
if os.path.exists(yaml_path) and fs.can_write_to_dir(yaml_path):
self.write_spec(spec, json_path)
try:
os.remove(yaml_path)
except OSError as err:
tty.debug('Could not remove deprecated {0}'.format(yaml_path))
tty.debug(err)
elif os.path.exists(yaml_path):
return yaml_path
return json_path
def deprecated_file_path(self, deprecated_spec, deprecator_spec=None):
"""Gets full path to spec file for deprecated spec
If the deprecator_spec is provided, use that. Otherwise, assume
deprecated_spec is already deprecated and its prefix links to the
prefix of its deprecator."""
_check_concrete(deprecated_spec)
if deprecator_spec:
_check_concrete(deprecator_spec)
# If deprecator spec is None, assume deprecated_spec already deprecated
# and use its link to find the file.
base_dir = self.path_for_spec(
deprecator_spec
) if deprecator_spec else os.readlink(deprecated_spec.prefix)
yaml_path = os.path.join(base_dir, self.metadata_dir,
self.deprecated_dir, deprecated_spec.dag_hash()
+ '_' + self._spec_file_name_yaml)
json_path = os.path.join(base_dir, self.metadata_dir,
self.deprecated_dir, deprecated_spec.dag_hash()
+ '_' + self.spec_file_name)
if (os.path.exists(yaml_path) and fs.can_write_to_dir(yaml_path)):
self.write_spec(deprecated_spec, json_path)
try:
os.remove(yaml_path)
except (IOError, OSError) as err:
tty.debug('Could not remove deprecated {0}'.format(yaml_path))
tty.debug(err)
elif os.path.exists(yaml_path):
return yaml_path
return json_path
@contextmanager
def disable_upstream_check(self):
self.check_upstream = False
yield
self.check_upstream = True
def metadata_path(self, spec):
return os.path.join(spec.prefix, self.metadata_dir)
def env_metadata_path(self, spec):
return os.path.join(self.metadata_path(spec), "install_environment.json")
def build_packages_path(self, spec):
return os.path.join(self.metadata_path(spec), self.packages_dir)
def create_install_directory(self, spec):
_check_concrete(spec)
# Create install directory with properly configured permissions
# Cannot import at top of file
from spack.package_prefs import get_package_dir_permissions, get_package_group
# Each package folder can have its own specific permissions, while
# intermediate folders (arch/compiler) are set with access permissions
# equivalent to the root permissions of the layout.
group = get_package_group(spec)
perms = get_package_dir_permissions(spec)
fs.mkdirp(spec.prefix, mode=perms, group=group, default_perms='parents')
fs.mkdirp(self.metadata_path(spec), mode=perms, group=group) # in prefix
self.write_spec(spec, self.spec_file_path(spec))
def ensure_installed(self, spec):
"""
Throws DirectoryLayoutError if:
1. spec prefix does not exist
2. spec prefix does not contain a spec file
3. the spec file does not correspond to the spec
"""
_check_concrete(spec)
path = self.path_for_spec(spec)
spec_file_path = self.spec_file_path(spec)
if not os.path.isdir(path):
raise InconsistentInstallDirectoryError(
"Install prefix {0} does not exist.".format(path))
if not os.path.isfile(spec_file_path):
raise InconsistentInstallDirectoryError(
'Install prefix exists but contains no spec.json:',
" " + path)
installed_spec = self.read_spec(spec_file_path)
if installed_spec == spec:
return
# DAG hashes currently do not include build dependencies.
#
# TODO: remove this when we do better concretization and don't
# ignore build-only deps in hashes.
elif (installed_spec.copy(deps=('link', 'run')) ==
spec.copy(deps=('link', 'run'))):
# The directory layout prefix is based on the dag hash, so among
# specs with differing full-hash but matching dag-hash, only one
# may be installed. This means for example that for two instances
# that differ only in CMake version used to build, only one will
# be installed.
return
if spec.dag_hash() == installed_spec.dag_hash():
raise SpecHashCollisionError(spec, installed_spec)
else:
raise InconsistentInstallDirectoryError(
'Spec file in %s does not match hash!' % spec_file_path)
def all_specs(self):
if not os.path.isdir(self.root):
return []
specs = []
for _, path_scheme in self.projections.items():
path_elems = ["*"] * len(path_scheme.split(posixpath.sep))
# NOTE: Does not validate filename extension; should happen later
path_elems += [self.metadata_dir, 'spec.json']
pattern = os.path.join(self.root, *path_elems)
spec_files = glob.glob(pattern)
if not spec_files: # we're probably looking at legacy yaml...
path_elems += [self.metadata_dir, 'spec.yaml']
pattern = os.path.join(self.root, *path_elems)
spec_files = glob.glob(pattern)
specs.extend([self.read_spec(s) for s in spec_files])
return specs
def all_deprecated_specs(self):
if not os.path.isdir(self.root):
return []
deprecated_specs = set()
for _, path_scheme in self.projections.items():
path_elems = ["*"] * len(path_scheme.split(posixpath.sep))
# NOTE: Does not validate filename extension; should happen later
path_elems += [self.metadata_dir, self.deprecated_dir,
'*_spec.*'] # + self.spec_file_name]
pattern = os.path.join(self.root, *path_elems)
spec_files = glob.glob(pattern)
get_depr_spec_file = lambda x: os.path.join(
os.path.dirname(os.path.dirname(x)), self.spec_file_name)
deprecated_specs |= set((self.read_spec(s),
self.read_spec(get_depr_spec_file(s)))
for s in spec_files)
return deprecated_specs
def specs_by_hash(self):
by_hash = {}
for spec in self.all_specs():
by_hash[spec.dag_hash()] = spec
return by_hash
def path_for_spec(self, spec):
"""Return absolute path from the root to a directory for the spec."""
_check_concrete(spec)
if spec.external:
return spec.external_path
if self.check_upstream:
upstream, record = spack.store.db.query_by_spec_hash(
spec.dag_hash())
if upstream:
raise SpackError(
"Internal error: attempted to call path_for_spec on"
" upstream-installed package.")
path = self.relative_path_for_spec(spec)
assert(not path.startswith(self.root))
return os.path.join(self.root, path)
def remove_install_directory(self, spec, deprecated=False):
"""Removes a prefix and any empty parent directories from the root.
Raised RemoveFailedError if something goes wrong.
"""
path = self.path_for_spec(spec)
assert(path.startswith(self.root))
if deprecated:
if os.path.exists(path):
try:
metapath = self.deprecated_file_path(spec)
os.unlink(path)
os.remove(metapath)
except OSError as e:
raise six.raise_from(RemoveFailedError(spec, path, e), e)
elif os.path.exists(path):
try:
shutil.rmtree(path)
except OSError as e:
raise six.raise_from(RemoveFailedError(spec, path, e), e)
path = os.path.dirname(path)
while path != self.root:
if os.path.isdir(path):
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOENT:
# already deleted, continue with parent
pass
elif e.errno == errno.ENOTEMPTY:
# directory wasn't empty, done
return
else:
raise e
path = os.path.dirname(path)
class ExtensionsLayout(object):
"""A directory layout is used to associate unique paths with specs for
package extensions.
Keeps track of which extensions are activated for what package.
Depending on the use case, this can mean globally activated extensions
directly in the installation folder - or extensions activated in
filesystem views.
"""
def __init__(self, view, **kwargs):
self.view = view
def add_extension(self, spec, ext_spec):
"""Add to the list of currently installed extensions."""
raise NotImplementedError()
def check_activated(self, spec, ext_spec):
"""Ensure that ext_spec can be removed from spec.
If not, raise NoSuchExtensionError.
"""
raise NotImplementedError()
def check_extension_conflict(self, spec, ext_spec):
"""Ensure that ext_spec can be activated in spec.
If not, raise ExtensionAlreadyInstalledError or
ExtensionConflictError.
"""
raise NotImplementedError()
def extension_map(self, spec):
"""Get a dict of currently installed extension packages for a spec.
Dict maps { name : extension_spec }
Modifying dict does not affect internals of this layout.
"""
raise NotImplementedError()
def extendee_target_directory(self, extendee):
"""Specify to which full path extendee should link all files
from extensions."""
raise NotImplementedError
def remove_extension(self, spec, ext_spec):
"""Remove from the list of currently installed extensions."""
raise NotImplementedError()
class YamlViewExtensionsLayout(ExtensionsLayout):
"""Maintain extensions within a view.
"""
def __init__(self, view, layout):
"""layout is the corresponding YamlDirectoryLayout object for which
we implement extensions.
"""
super(YamlViewExtensionsLayout, self).__init__(view)
self.layout = layout
self.extension_file_name = 'extensions.yaml'
# Cache of already written/read extension maps.
self._extension_maps = {}
def add_extension(self, spec, ext_spec):
_check_concrete(spec)
_check_concrete(ext_spec)
# Check whether it's already installed or if it's a conflict.
exts = self._extension_map(spec)
self.check_extension_conflict(spec, ext_spec)
# do the actual adding.
exts[ext_spec.name] = ext_spec
self._write_extensions(spec, exts)
def check_extension_conflict(self, spec, ext_spec):
exts = self._extension_map(spec)
if ext_spec.name in exts:
installed_spec = exts[ext_spec.name].copy(deps=('link', 'run'))
if ext_spec.copy(deps=('link', 'run')) == installed_spec:
raise ExtensionAlreadyInstalledError(spec, ext_spec)
else:
raise ExtensionConflictError(spec, ext_spec, installed_spec)
def check_activated(self, spec, ext_spec):
exts = self._extension_map(spec)
if (ext_spec.name not in exts) or (ext_spec != exts[ext_spec.name]):
raise NoSuchExtensionError(spec, ext_spec)
def extension_file_path(self, spec):
"""Gets full path to an installed package's extension file, which
keeps track of all the extensions for that package which have been
added to this view.
"""
_check_concrete(spec)
normalize_path = lambda p: (
os.path.abspath(p).rstrip(os.path.sep))
view_prefix = self.view.get_projection_for_spec(spec)
if normalize_path(spec.prefix) == normalize_path(view_prefix):
# For backwards compatibility, when the view is the extended
# package's installation directory, do not include the spec name
# as a subdirectory.
components = [view_prefix, self.layout.metadata_dir,
self.extension_file_name]
else:
components = [view_prefix, self.layout.metadata_dir, spec.name,
self.extension_file_name]
return os.path.join(*components)
def extension_map(self, spec):
"""Defensive copying version of _extension_map() for external API."""
_check_concrete(spec)
return self._extension_map(spec).copy()
def remove_extension(self, spec, ext_spec):
_check_concrete(spec)
_check_concrete(ext_spec)
# Make sure it's installed before removing.
exts = self._extension_map(spec)
self.check_activated(spec, ext_spec)
# do the actual removing.
del exts[ext_spec.name]
self._write_extensions(spec, exts)
def _extension_map(self, spec):
"""Get a dict<name -> spec> for all extensions currently
installed for this package."""
_check_concrete(spec)
if spec not in self._extension_maps:
path = self.extension_file_path(spec)
if not os.path.exists(path):
self._extension_maps[spec] = {}
else:
by_hash = self.layout.specs_by_hash()
exts = {}
with open(path) as ext_file:
yaml_file = yaml.load(ext_file)
for entry in yaml_file['extensions']:
name = next(iter(entry))
dag_hash = entry[name]['hash']
prefix = entry[name]['path']
if dag_hash not in by_hash:
raise InvalidExtensionSpecError(
"Spec %s not found in %s" % (dag_hash, prefix))
ext_spec = by_hash[dag_hash]
if prefix != ext_spec.prefix:
raise InvalidExtensionSpecError(
"Prefix %s does not match spec hash %s: %s"
% (prefix, dag_hash, ext_spec))
exts[ext_spec.name] = ext_spec
self._extension_maps[spec] = exts
return self._extension_maps[spec]
def _write_extensions(self, spec, extensions):
path = self.extension_file_path(spec)
if not extensions:
# Remove the empty extensions file
os.remove(path)
return
# Create a temp file in the same directory as the actual file.
dirname, basename = os.path.split(path)
fs.mkdirp(dirname)
tmp = tempfile.NamedTemporaryFile(
prefix=basename, dir=dirname, delete=False)
# write tmp file
with tmp:
yaml.dump({
'extensions': [
{ext.name: {
'hash': ext.dag_hash(),
'path': str(ext.prefix)
}} for ext in sorted(extensions.values())]
}, tmp, default_flow_style=False, encoding='utf-8')
# Atomic update by moving tmpfile on top of old one.
fs.rename(tmp.name, path)
class DirectoryLayoutError(SpackError):
"""Superclass for directory layout errors."""
def __init__(self, message, long_msg=None):
super(DirectoryLayoutError, self).__init__(message, long_msg)
class SpecHashCollisionError(DirectoryLayoutError):
"""Raised when there is a hash collision in an install layout."""
def __init__(self, installed_spec, new_spec):
super(SpecHashCollisionError, self).__init__(
'Specs %s and %s have the same SHA-1 prefix!'
% (installed_spec, new_spec))
class RemoveFailedError(DirectoryLayoutError):
"""Raised when a DirectoryLayout cannot remove an install prefix."""
def __init__(self, installed_spec, prefix, error):
super(RemoveFailedError, self).__init__(
'Could not remove prefix %s for %s : %s'
% (prefix, installed_spec.short_spec, error))
self.cause = error
class InconsistentInstallDirectoryError(DirectoryLayoutError):
"""Raised when a package seems to be installed to the wrong place."""
def __init__(self, message, long_msg=None):
super(InconsistentInstallDirectoryError, self).__init__(
message, long_msg)
class SpecReadError(DirectoryLayoutError):
"""Raised when directory layout can't read a spec."""
class InvalidDirectoryLayoutParametersError(DirectoryLayoutError):
"""Raised when a invalid directory layout parameters are supplied"""
def __init__(self, message, long_msg=None):
super(InvalidDirectoryLayoutParametersError, self).__init__(
message, long_msg)
class InvalidExtensionSpecError(DirectoryLayoutError):
"""Raised when an extension file has a bad spec in it."""
class ExtensionAlreadyInstalledError(DirectoryLayoutError):
"""Raised when an extension is added to a package that already has it."""
def __init__(self, spec, ext_spec):
super(ExtensionAlreadyInstalledError, self).__init__(
"%s is already installed in %s"
% (ext_spec.short_spec, spec.short_spec))
class ExtensionConflictError(DirectoryLayoutError):
"""Raised when an extension is added to a package that already has it."""
def __init__(self, spec, ext_spec, conflict):
super(ExtensionConflictError, self).__init__(
"%s cannot be installed in %s because it conflicts with %s"
% (ext_spec.short_spec, spec.short_spec, conflict.short_spec))
class NoSuchExtensionError(DirectoryLayoutError):
"""Raised when an extension isn't there on deactivate."""
def __init__(self, spec, ext_spec):
super(NoSuchExtensionError, self).__init__(
"%s cannot be removed from %s because it's not activated."
% (ext_spec.short_spec, spec.short_spec))
| 38.389231 | 86 | 0.612311 |
ecea476c087c84a4dbfcc648f1c3b73e24fa2397 | 1,203 | py | Python | basetest/BaseTest/mail/MailUtils.py | jAchillus/pythontools | 518d72a46dada6c62db94cd3d49e848f11129442 | [
"Apache-2.0"
] | null | null | null | basetest/BaseTest/mail/MailUtils.py | jAchillus/pythontools | 518d72a46dada6c62db94cd3d49e848f11129442 | [
"Apache-2.0"
] | null | null | null | basetest/BaseTest/mail/MailUtils.py | jAchillus/pythontools | 518d72a46dada6c62db94cd3d49e848f11129442 | [
"Apache-2.0"
] | null | null | null | #D:\DevelopTools\Softs\64\Python\Python35
#coding=UTF-8
#-*-coding: UTF-8 -*-
import smtplib
import Mail
class MailUtils:
def sendMail(self, mail):
try:
server = smtplib.SMTP()
server.connect(mail.getHost())
server.login(mail.getUser(),mail.getPWD())
print(mail.mail_info)
server.sendmail(mail.getUser(), mail.getToList(),\
mail.mail_info.as_string())
print('success')
return True
except Exception as e:
print('fail')
raise e
finally:
server.close()
return False
if __name__ == '__main__':
mail = Mail.Mail()
mail.setMessage('HELLOID', '', '')
mail.setSubject('test')
lists = ['D:\\DevelopTools\\Projects\\python\\coreBase\\BaseTest\\BaseTest.py', \
'D:\\DevelopTools\\Projects\\python\\coreBase\\BaseTest\\opjectTest.py']
listnames = ['2BaseTest.py', \
'3pjectTest.py']
mail.setAttachmentList(lists, '', '', listnames)
mail.setPic('test PIC', 'D:\\DevelopTools\\ORG\\WEBPictrues\\1.jpg', '', '')
mailsend = MailUtils()
mailsend.sendMail(mail)
pass
| 32.513514 | 85 | 0.570241 |
18ec7956395d4c67583f46e471a6ef55c2172e9e | 924 | py | Python | python/cocos2d/actors.py | liushooter/OneDayOneCommit | 87dc037fcb21c9cd91723c282d1b618bef3e0414 | [
"MIT"
] | null | null | null | python/cocos2d/actors.py | liushooter/OneDayOneCommit | 87dc037fcb21c9cd91723c282d1b618bef3e0414 | [
"MIT"
] | null | null | null | python/cocos2d/actors.py | liushooter/OneDayOneCommit | 87dc037fcb21c9cd91723c282d1b618bef3e0414 | [
"MIT"
] | null | null | null | import cocos
from cocos.text import Label
from cocos import scene
from cocos.layer import Layer
from cocos.director import director
from cocos.sprite import Sprite
class Actors(Layer):
def __init__(self):
super(Actors, self).__init__()
# Here is where the code starts to get different
# Instead of text, I create a sprite object
# Also unlike last time, I added the sprite to the object instead of making it local
# This is useful if you want to access it in other functions, like I will show in the next tutorial
self.actor = Sprite('assets/img/grossini.png')
# Then I add it to the layer, similar to the text
self.actor.position = 320, 240
# And lastly I add it to the layer. Standard stuff
self.add(self.actor)
# Now I initialize the director and run the scene just like before
director.init()
director.run(scene.Scene(Actors()))
| 31.862069 | 107 | 0.699134 |
09e09592c65735e8b68d87e5094328f5c99f0421 | 675 | py | Python | venv/bin/Patch Dock/patch_dock_get_results.py | lpreuett/ser499_bioinformatics | 93fbed08a49851bb6cc484594fe2180b8a6bce1f | [
"MIT"
] | null | null | null | venv/bin/Patch Dock/patch_dock_get_results.py | lpreuett/ser499_bioinformatics | 93fbed08a49851bb6cc484594fe2180b8a6bce1f | [
"MIT"
] | null | null | null | venv/bin/Patch Dock/patch_dock_get_results.py | lpreuett/ser499_bioinformatics | 93fbed08a49851bb6cc484594fe2180b8a6bce1f | [
"MIT"
] | null | null | null | import scrapy
import sys
class BioSpider(scrapy.Spider):
def __init__(self, *a, **kw):
# get start URL
url = kw.pop('link', [])
if url:
#print('link: {}'.format(url))
self.start_urls = [url]
else:
# exit if no start url
print('Usage: -a link')
sys.exit(1)
self.logger.info(self.start_urls)
super(BioSpider, self).__init__(*a, **kw)
name = "bio_spider"
#start_urls = [ self.link ]
def parse(self, response):
score = int(response.xpath('//table[4]/tr[2]/td[2]/text()').extract()[0])
print('Patch Dock Score: {}'.format(score))
| 21.774194 | 81 | 0.525926 |
446a5c77997b17b8ef0961eff19f05dfa76fb849 | 4,518 | py | Python | pydoop/avrolib.py | timgates42/pydoop | 438c92ed34e2d4f12db7cc1ea3a7ed094206c3a5 | [
"Apache-2.0"
] | 203 | 2015-01-02T05:52:49.000Z | 2022-03-25T00:05:50.000Z | pydoop/avrolib.py | timgates42/pydoop | 438c92ed34e2d4f12db7cc1ea3a7ed094206c3a5 | [
"Apache-2.0"
] | 209 | 2015-01-16T14:14:20.000Z | 2022-01-20T16:19:24.000Z | pydoop/avrolib.py | timgates42/pydoop | 438c92ed34e2d4f12db7cc1ea3a7ed094206c3a5 | [
"Apache-2.0"
] | 63 | 2015-01-29T08:44:25.000Z | 2021-12-24T03:25:15.000Z | # BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Avro tools.
"""
# DEV NOTE: since Avro is not a requirement, do *not* import this
# module unconditionally anywhere in the main code (importing it in
# the Avro examples is OK, ofc).
import sys
import avro.schema
from avro.datafile import DataFileReader, DataFileWriter
from avro.io import DatumReader, DatumWriter, BinaryDecoder, BinaryEncoder
from pydoop.mapreduce.api import RecordWriter, RecordReader
import pydoop.hdfs as hdfs
from pydoop.utils.py3compat import StringIO
parse = avro.schema.Parse if sys.version_info[0] == 3 else avro.schema.parse
class Deserializer(object):
def __init__(self, schema_str):
schema = parse(schema_str)
self.reader = DatumReader(schema)
def deserialize(self, rec_bytes):
return self.reader.read(BinaryDecoder(StringIO(rec_bytes)))
class Serializer(object):
def __init__(self, schema_str):
schema = parse(schema_str)
self.writer = DatumWriter(schema)
def serialize(self, record):
f = StringIO()
encoder = BinaryEncoder(f)
self.writer.write(record, encoder)
return f.getvalue()
try:
from pyavroc import AvroDeserializer
except ImportError:
AvroDeserializer = Deserializer
try:
from pyavroc import AvroSerializer
except ImportError:
AvroSerializer = Serializer
class SeekableDataFileReader(DataFileReader):
FORWARD_WINDOW_SIZE = 8192
def align_after(self, offset):
"""
Search for a sync point after offset and align just after that.
"""
f = self.reader
if offset <= 0: # FIXME what is a negative offset??
f.seek(0)
self._block_count = 0
self._read_header() # FIXME we can't extimate how big it is...
return
sm = self.sync_marker
sml = len(sm)
pos = offset
while pos < self.file_length - sml:
f.seek(pos)
data = f.read(self.FORWARD_WINDOW_SIZE)
sync_offset = data.find(sm)
if sync_offset > -1:
f.seek(pos + sync_offset)
self._block_count = 0
return
pos += len(data)
# FIXME this is just an example with no error checking
class AvroReader(RecordReader):
"""
Avro data file reader.
Reads all data blocks that begin within the given input split.
"""
def __init__(self, ctx):
super(AvroReader, self).__init__(ctx)
isplit = ctx.input_split
self.region_start = isplit.offset
self.region_end = isplit.offset + isplit.length
self.reader = SeekableDataFileReader(hdfs.open(isplit.filename),
DatumReader())
self.reader.align_after(isplit.offset)
def next(self):
pos = self.reader.reader.tell()
if pos > self.region_end and self.reader._block_count == 0:
raise StopIteration
record = next(self.reader)
return pos, record
def get_progress(self):
"""
Give a rough estimate of the progress done.
"""
pos = self.reader.reader.tell()
return min((pos - self.region_start) /
float(self.region_end - self.region_start),
1.0)
# FIXME this is just an example with no error checking
class AvroWriter(RecordWriter):
schema = None
def __init__(self, context):
super(AvroWriter, self).__init__(context)
job_conf = context.job_conf
part = int(job_conf['mapreduce.task.partition'])
outdir = job_conf["mapreduce.task.output.dir"]
outfn = "%s/part-r-%05d.avro" % (outdir, part)
wh = hdfs.open(outfn, "w")
self.writer = DataFileWriter(wh, DatumWriter(), self.schema)
def close(self):
self.writer.close()
# FIXME do we really need to explicitly close the filesystem?
self.writer.writer.fs.close()
| 29.723684 | 77 | 0.650509 |
9684868480242485f4241b41ea4ee0370af4684b | 28,806 | py | Python | tests/ignite/engine/test_engine.py | jkhenning/ignite | c3f80910fb39ee101f07afe21172754c01026927 | [
"BSD-3-Clause"
] | 1 | 2021-08-10T05:32:29.000Z | 2021-08-10T05:32:29.000Z | tests/ignite/engine/test_engine.py | rishabhvarshney14/ignite | 2485fd42c6ef4d3e97fd606a52f8c6e5d940357e | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/engine/test_engine.py | rishabhvarshney14/ignite | 2485fd42c6ef4d3e97fd606a52f8c6e5d940357e | [
"BSD-3-Clause"
] | null | null | null | import os
import time
from unittest.mock import MagicMock, Mock, call
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter, get_iterable_dataset
def test_terminate():
engine = Engine(lambda e, b: 1)
assert not engine.should_terminate
engine.terminate()
assert engine.should_terminate
def test_invalid_process_raises_with_invalid_signature():
with pytest.raises(ValueError, match=r"Engine must be given a processing function in order to run"):
Engine(None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda batch: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda engine, batch, extra_arg: None)
def test_invalid_input_data():
engine = Engine(lambda e, b: None)
def data():
pass
with pytest.raises(TypeError, match=r"Argument data should be iterable"):
engine.run(data)
def test_current_epoch_counter_increases_every_epoch():
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = EpochCounter()
engine.add_event_handler(Events.EPOCH_STARTED, counter)
state = engine.run([1, 2], max_epochs=max_epochs)
assert state.epoch == max_epochs
counter.current_epoch_count = 1
state = engine.run([1, 2], max_epochs=max_epochs)
assert state.epoch == max_epochs
def test_current_iteration_counter_increases_every_iteration():
batches = [1, 2, 3]
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = IterationCounter()
engine.add_event_handler(Events.ITERATION_STARTED, counter)
state = engine.run(batches, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(batches)
counter.current_iteration_count = 1
state = engine.run(batches, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(batches)
def test_stopping_criterion_is_max_epochs():
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == max_epochs
def test_terminate_at_end_of_epoch_stops_run():
max_epochs = 5
last_epoch_to_run = 3
engine = Engine(MagicMock(return_value=1))
def end_of_epoch_handler(engine):
if engine.state.epoch == last_epoch_to_run:
engine.terminate()
engine.add_event_handler(Events.EPOCH_COMPLETED, end_of_epoch_handler)
assert not engine.should_terminate
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == last_epoch_to_run
assert engine.should_terminate
def test_terminate_at_start_of_epoch_stops_run_after_completing_iteration():
max_epochs = 5
epoch_to_terminate_on = 3
batches_per_epoch = [1, 2, 3]
engine = Engine(MagicMock(return_value=1))
def start_of_epoch_handler(engine):
if engine.state.epoch == epoch_to_terminate_on:
engine.terminate()
engine.add_event_handler(Events.EPOCH_STARTED, start_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(batches_per_epoch, max_epochs=max_epochs)
# epoch is not completed so counter is not incremented
assert state.epoch == epoch_to_terminate_on
assert engine.should_terminate
# completes first iteration
assert state.iteration == ((epoch_to_terminate_on - 1) * len(batches_per_epoch)) + 1
def test_terminate_stops_run_mid_epoch():
num_iterations_per_epoch = 10
iteration_to_stop = num_iterations_per_epoch + 3
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate()
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data=[None] * num_iterations_per_epoch, max_epochs=3)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
assert state.iteration == iteration_to_stop
assert state.epoch == np.ceil(iteration_to_stop / num_iterations_per_epoch) # it starts from 0
def test_terminate_epoch_stops_mid_epoch():
num_iterations_per_epoch = 10
iteration_to_stop = num_iterations_per_epoch + 4
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate_epoch()
max_epochs = 3
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data=[None] * num_iterations_per_epoch, max_epochs=max_epochs)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
true_value = num_iterations_per_epoch * (max_epochs - 1) + iteration_to_stop % num_iterations_per_epoch
assert state.iteration == true_value
def _create_mock_data_loader(epochs, batches_per_epoch):
batches = [MagicMock()] * batches_per_epoch
data_loader_manager = MagicMock()
batch_iterators = [iter(batches) for _ in range(epochs)]
data_loader_manager.__iter__.side_effect = batch_iterators
data_loader_manager.__len__.return_value = batches_per_epoch
return data_loader_manager
def test_iteration_events_are_fired():
max_epochs = 5
num_batches = 3
data = _create_mock_data_loader(max_epochs, num_batches)
engine = Engine(MagicMock(return_value=1))
mock_manager = Mock()
iteration_started = Mock()
engine.add_event_handler(Events.ITERATION_STARTED, iteration_started)
iteration_complete = Mock()
engine.add_event_handler(Events.ITERATION_COMPLETED, iteration_complete)
mock_manager.attach_mock(iteration_started, "iteration_started")
mock_manager.attach_mock(iteration_complete, "iteration_complete")
engine.run(data, max_epochs=max_epochs)
assert iteration_started.call_count == num_batches * max_epochs
assert iteration_complete.call_count == num_batches * max_epochs
expected_calls = []
for i in range(max_epochs * num_batches):
expected_calls.append(call.iteration_started(engine))
expected_calls.append(call.iteration_complete(engine))
assert mock_manager.mock_calls == expected_calls
def test_last_event_name():
engine = Engine(MagicMock(return_value=1))
assert engine.last_event_name is None
@engine.on(Events.STARTED)
def _(_engine):
assert _engine.last_event_name == Events.STARTED
@engine.on(Events.EPOCH_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_STARTED
@engine.on(Events.ITERATION_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_STARTED
@engine.on(Events.ITERATION_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_COMPLETED
@engine.on(Events.EPOCH_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_COMPLETED
engine.run([0, 1])
assert engine.last_event_name == Events.COMPLETED
def test_reset_should_terminate():
def update_fn(engine, batch):
pass
engine = Engine(update_fn)
@engine.on(Events.ITERATION_COMPLETED)
def terminate_on_iteration_10(engine):
if engine.state.iteration == 10:
engine.terminate()
engine.run([0] * 20)
assert engine.state.iteration == 10
engine.run([0] * 20)
assert engine.state.iteration == 10
def test_batch_values():
def _test(data):
# This test check the content passed to update function
counter = [0]
num_iters = len(data)
def update_fn(_, batch):
assert batch == data[counter[0] % num_iters]
counter[0] += 1
engine = Engine(update_fn)
engine.run(data, max_epochs=10)
data = torch.randint(0, 1000, size=(256,))
_test(data)
def test_state_repr():
data = [0, 1, 2, 3, 4, 5]
max_epochs = 1
metrics = {"accuracy": Mock()}
state = State(dataloader=data, max_epochs=max_epochs, metrics=metrics)
s = repr(state)
assert "iteration" in s
assert "epoch" in s
assert "max_epochs: 1" in s
assert "dataloader" in s
assert "metrics" in s
assert "output" in s
assert "batch" in s
def test_alter_batch():
small_shape = (1, 2, 2)
large_shape = (1, 3, 3)
small_loader = torch.randint(0, 256, size=(30,) + small_shape)
large_loader = torch.randint(0, 256, size=(20,) + large_shape)
switch_iteration = 50
def should_take_large_img(i):
return i >= switch_iteration
def update_fn(engine, batch):
i = engine.state.iteration
if i < switch_iteration:
assert batch.shape == small_shape
assert (small_loader[(i - 1) % len(small_loader), ...] == batch).all()
else:
assert batch.shape == large_shape
assert (large_loader[(i - switch_iteration) % len(large_loader), ...] == batch).all()
trainer = Engine(update_fn)
def cycle(seq):
while True:
for i in seq:
yield i
small_loader_iter = cycle(small_loader)
large_loader_iter = cycle(large_loader)
@trainer.on(Events.ITERATION_STARTED)
def choose_batch(engine):
i = engine.state.iteration
if should_take_large_img(i):
batch = next(large_loader_iter)
else:
batch = next(small_loader_iter)
engine.state.batch = batch
num_epochs = 5
num_iters = 25
data = range(num_iters)
trainer.run(data, num_epochs)
def test__is_done():
state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
assert not Engine._is_done(state)
state = State(iteration=1000, max_epochs=10, epoch_length=100)
assert Engine._is_done(state)
def test__setup_engine():
engine = Engine(lambda e, b: 1)
engine.state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
data = list(range(100))
engine.state.dataloader = data
engine._setup_engine()
assert len(engine._init_iter) == 1 and engine._init_iter[0] == 10
# assert engine._dataloader_len == len(data)
def test_run_asserts():
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"Input data has zero size. Please provide non-empty data"):
engine.run([])
def test_state_get_event_attrib_value():
state = State()
state.iteration = 10
state.epoch = 9
e = Events.ITERATION_STARTED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.ITERATION_STARTED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
def test_time_stored_in_state():
def _test(data, max_epochs, epoch_length):
sleep_time = 0.01
extra_sleep_time = 0.1
engine = Engine(lambda e, b: time.sleep(sleep_time))
@engine.on(Events.EPOCH_COMPLETED)
def check_epoch_time():
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length
time.sleep(extra_sleep_time)
@engine.on(Events.COMPLETED)
def check_completed_time():
assert (
engine.state.times[Events.COMPLETED.name] >= (sleep_time * epoch_length + extra_sleep_time) * max_epochs
)
time.sleep(extra_sleep_time)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length + extra_sleep_time
assert (
engine.state.times[Events.COMPLETED.name]
>= (sleep_time * epoch_length + extra_sleep_time) * max_epochs + extra_sleep_time
)
_test(list(range(100)), max_epochs=2, epoch_length=100)
_test(list(range(200)), max_epochs=2, epoch_length=100)
_test(list(range(200)), max_epochs=5, epoch_length=100)
def _test_check_triggered_events(data, max_epochs, epoch_length, exp_iter_stops=None):
engine = Engine(lambda e, b: 1)
events = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.DATALOADER_STOP_ITERATION,
]
handlers = {e: MagicMock() for e in events}
for e, handler in handlers.items():
engine.add_event_handler(e, handler)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
expected_num_calls = {
Events.STARTED: 1,
Events.COMPLETED: 1,
Events.EPOCH_STARTED: max_epochs,
Events.EPOCH_COMPLETED: max_epochs,
Events.ITERATION_STARTED: max_epochs * epoch_length,
Events.ITERATION_COMPLETED: max_epochs * epoch_length,
Events.GET_BATCH_STARTED: max_epochs * epoch_length,
Events.GET_BATCH_COMPLETED: max_epochs * epoch_length,
Events.DATALOADER_STOP_ITERATION: (max_epochs - 1) if exp_iter_stops is None else exp_iter_stops,
}
for n, handler in handlers.items():
assert handler.call_count == expected_num_calls[n], f"{n}: {handler.call_count} vs {expected_num_calls[n]}"
def _test_run_check_triggered_events():
# tests issue https://github.com/pytorch/ignite/issues/818
_test_check_triggered_events(list(range(10)), max_epochs=4, epoch_length=10)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=100)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=50, exp_iter_stops=50 * 5 // 100)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=150, exp_iter_stops=150 * 5 // 100)
def test_run_check_triggered_events_list():
_test_run_check_triggered_events()
def _test_run_check_triggered_events_on_iterator():
def infinite_data_iterator():
while True:
for i in range(100):
yield i
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=100, exp_iter_stops=0)
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=50, exp_iter_stops=0)
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=150, exp_iter_stops=0)
def limited_data_iterator():
for i in range(100):
yield i
_test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=100, exp_iter_stops=0)
_test_check_triggered_events(limited_data_iterator(), max_epochs=10, epoch_length=10, exp_iter_stops=0)
# These tests will fail
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=100)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=75)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=101)
def test_run_check_triggered_events_on_iterator():
_test_run_check_triggered_events_on_iterator()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(distributed_context_single_node_nccl):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
def test_engine_random_state():
def random_data_generator():
while True:
yield torch.randint(0, 100, size=(5,))
def sum_data(_, batch):
result = torch.sum(batch)
return result
def get_engine():
engine = Engine(sum_data)
average = Average()
average.attach(engine, "average")
return engine
torch.manual_seed(34)
engine = get_engine()
state1 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(34)
engine = get_engine()
state2 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(42)
engine = get_engine()
state3 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
assert state1.metrics["average"] == pytest.approx(state2.metrics["average"])
assert state1.metrics["average"] != pytest.approx(state3.metrics["average"])
assert state2.metrics["average"] != pytest.approx(state3.metrics["average"])
def test_altered_random_state():
# tests issue https://github.com/pytorch/ignite/issues/795
size = 1
def random_train_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,))
def random_val_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,)) + 100
train_only_batches = []
def train_fn(_, batch):
train_only_batches.append(batch[0].item())
torch.manual_seed(1)
epoch_length = 6
trainer = Engine(train_fn)
trainer.run(
random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length,
)
def val_fn(_1, _2):
pass
evaluator = Engine(val_fn)
train_batches = []
def train_fn2(_, batch):
train_batches.append(batch[0].item())
trainer = Engine(train_fn2)
@trainer.on(Events.EPOCH_COMPLETED)
@keep_random_state
def run_evaluation(_):
evaluator.run(random_val_data_generator(size), epoch_length=4)
torch.manual_seed(1)
trainer.run(
random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length,
)
for i in range(epoch_length):
assert train_batches[epoch_length + i] != train_batches[2 * epoch_length + i]
assert train_batches[i] == train_only_batches[i]
def test_engine_with_dataloader_no_auto_batching():
# tests https://github.com/pytorch/ignite/issues/941
from torch.utils.data import DataLoader, BatchSampler, RandomSampler
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
counter[0] += 1
engine = Engine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_once_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
completed_handler = MagicMock()
engine.add_event_handler(Events.COMPLETED, completed_handler)
data_iter = finite_unk_size_data_iter()
engine.run(data_iter)
assert engine.state.epoch == 1
assert engine.state.iteration == unknown_size
assert completed_handler.call_count == 1
def test_run_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
def test_run_finite_iterator_no_epoch_length_2():
# FR: https://github.com/pytorch/ignite/issues/871
known_size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
bc = BatchChecker(data=list(range(known_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.ITERATION_COMPLETED(every=known_size))
def restart_iter():
engine.state.dataloader = finite_size_data_iter(known_size)
data_iter = finite_size_data_iter(known_size)
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == known_size * 5
def test_faq_inf_iterator_with_epoch_length():
# Code snippet from FAQ
import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
# We need to specify epoch_length to define the epoch
trainer.run(infinite_iterator(4), epoch_length=5, max_epochs=3)
assert trainer.state.epoch == 3
assert trainer.state.iteration == 3 * 5
def test_faq_inf_iterator_no_epoch_length():
# Code snippet from FAQ
import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(once=15))
def stop_training():
trainer.terminate()
trainer.run(infinite_iterator(4))
assert trainer.state.epoch == 1
assert trainer.state.iteration == 15
def test_faq_fin_iterator_unknw_size():
# Code snippet from FAQ
import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
trainer.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * 11
# # # # #
import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_unk_size_data_iter()
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == 1 * 11
def test_faq_fin_iterator():
# Code snippet from FAQ
import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)
data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * size
# # # # #
import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_size_data_iter(size)
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == size
def test_set_data():
# tests FR https://github.com/pytorch/ignite/issues/833
from torch.utils.data import DataLoader
num_iters1 = 10
num_iters2 = 20
batch_size = 4
torch.manual_seed(1)
data1 = DataLoader(torch.rand(num_iters1 * batch_size, 11), batch_size=batch_size)
data2 = DataLoader(torch.rand(num_iters2 * batch_size, 22), batch_size=batch_size)
switch_iteration = 35
def train_fn(e, batch):
if e.state.iteration <= switch_iteration:
assert batch.shape[1] == 11, f"{e.state.iteration}: {batch.shape}"
else:
assert batch.shape[1] == 22, f"{e.state.iteration}: {batch.shape}"
trainer = Engine(train_fn)
@trainer.on(Events.ITERATION_COMPLETED(once=switch_iteration))
def switch_dataloader():
trainer.set_data(data2)
trainer.run(data1, max_epochs=10)
def test_run_with_max_iters():
max_iters = 8
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
assert engine.state.max_iters == max_iters
def test_run_with_max_iters_greater_than_epoch_length():
max_iters = 73
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
def test_run_with_invalid_max_iters_and_max_epoch():
max_iters = 12
max_epochs = 2
engine = Engine(lambda e, b: 1)
with pytest.raises(
ValueError,
match=r"Arguments max_iters and max_epochs are mutually exclusive."
"Please provide only max_epochs or max_iters.",
):
engine.run([0] * 20, max_iters=max_iters, max_epochs=max_epochs)
def test_epoch_events_fired():
max_iters = 32
engine = Engine(lambda e, b: 1)
@engine.on(Events.EPOCH_COMPLETED)
def fired_event(engine):
assert engine.state.iteration % engine.state.epoch_length == 0
engine.run([0] * 10, max_iters=max_iters)
def test_is_done_with_max_iters():
state = State(iteration=100, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert not Engine._is_done(state)
state = State(iteration=250, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert Engine._is_done(state)
| 30.710021 | 120 | 0.701278 |
1e62c6075e1a538d302950bb74ea101fc2dbb005 | 747 | py | Python | dizoo/gfootball/envs/reward/gfootball_reward_runner.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 464 | 2021-07-08T07:26:33.000Z | 2022-03-31T12:35:16.000Z | dizoo/gfootball/envs/reward/gfootball_reward_runner.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 177 | 2021-07-09T08:22:55.000Z | 2022-03-31T07:35:22.000Z | dizoo/gfootball/envs/reward/gfootball_reward_runner.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 92 | 2021-07-08T12:16:37.000Z | 2022-03-31T09:24:41.000Z | import copy
import torch
from ding.envs.common import EnvElementRunner
from ding.envs.env.base_env import BaseEnv
from .gfootball_reward import GfootballReward
class GfootballRewardRunner(EnvElementRunner):
def _init(self, cfg, *args, **kwargs) -> None:
# set self._core and other state variable
self._core = GfootballReward(cfg)
self._cum_reward = 0.0
def get(self, engine: BaseEnv) -> torch.tensor:
ret = copy.deepcopy(engine._reward_of_action)
self._cum_reward += ret
return self._core._to_agent_processor(ret)
def reset(self) -> None:
self._cum_reward = 0.0
@property
def cum_reward(self) -> torch.tensor:
return torch.FloatTensor([self._cum_reward])
| 26.678571 | 53 | 0.694779 |
9b7eef3c03ad5549e0b4196b28ac01c98fd1ddc0 | 7,433 | py | Python | tests/test_queuing_context.py | hsim13372/pennylane | 1fae4c3412d60e1a792836551d7071f0ffd0fae0 | [
"Apache-2.0"
] | null | null | null | tests/test_queuing_context.py | hsim13372/pennylane | 1fae4c3412d60e1a792836551d7071f0ffd0fae0 | [
"Apache-2.0"
] | 1 | 2020-04-15T07:30:31.000Z | 2020-04-15T07:30:31.000Z | tests/test_queuing_context.py | hsim13372/pennylane | 1fae4c3412d60e1a792836551d7071f0ffd0fae0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane` :class:`QueuingContext` class.
"""
import pytest
import pennylane as qml
from pennylane import QueuingContext
@pytest.fixture(scope="function")
def mock_queuing_context(monkeypatch):
"""A mock instance of the abstract QueuingContext class."""
with monkeypatch.context() as m:
m.setattr(QueuingContext, "__abstractmethods__", frozenset())
m.setattr(
QueuingContext, "_append_operator", lambda self, operator: self.queue.append(operator)
)
m.setattr(
QueuingContext, "_remove_operator", lambda self, operator: self.queue.remove(operator)
)
context = QueuingContext()
context.queue = []
yield context
@pytest.fixture(scope="function")
def three_mock_queuing_contexts(monkeypatch):
"""A list of three mock instances of the abstract QueuingContext class."""
with monkeypatch.context() as m:
m.setattr(QueuingContext, "__abstractmethods__", frozenset())
m.setattr(
QueuingContext, "_append_operator", lambda self, operator: self.queue.append(operator)
)
m.setattr(
QueuingContext, "_remove_operator", lambda self, operator: self.queue.remove(operator)
)
contexts = [QueuingContext() for _ in range(3)]
for context in contexts:
context.queue = []
yield contexts
class TestQueuingContext:
"""Test the logic associated with the QueuingContext class."""
def test_context_activation(self, mock_queuing_context):
"""Test that the QueuingContext is properly activated and deactivated."""
# Assert that the list of active contexts is empty
assert not QueuingContext._active_contexts
with mock_queuing_context:
assert len(QueuingContext._active_contexts) == 1
assert mock_queuing_context in QueuingContext._active_contexts
assert not QueuingContext._active_contexts
def test_multiple_context_activation(self, three_mock_queuing_contexts):
"""Test that multiple QueuingContexts are properly activated and deactivated."""
# Assert that the list of active contexts is empty
assert not QueuingContext._active_contexts
with three_mock_queuing_contexts[0]:
with three_mock_queuing_contexts[1]:
with three_mock_queuing_contexts[2]:
assert len(QueuingContext._active_contexts) == 3
assert three_mock_queuing_contexts[0] in QueuingContext._active_contexts
assert three_mock_queuing_contexts[1] in QueuingContext._active_contexts
assert three_mock_queuing_contexts[2] in QueuingContext._active_contexts
assert not QueuingContext._active_contexts
def test_append_operator_no_context(self):
"""Test that append_operator does not fail when no context is present."""
QueuingContext.append_operator(qml.PauliZ(0))
def test_remove_operator_no_context(self):
"""Test that remove_operator does not fail when no context is present."""
QueuingContext.remove_operator(qml.PauliZ(0))
def test_append_operator(self, mock_queuing_context):
"""Test that append_operator appends the operator to the queue."""
op = qml.PauliZ(0)
assert not mock_queuing_context.queue
with mock_queuing_context:
QueuingContext.append_operator(op)
assert len(mock_queuing_context.queue) == 1
assert op in mock_queuing_context.queue
def test_remove_operator(self, mock_queuing_context):
"""Test that remove_operator removes the operator from the queue."""
op = qml.PauliZ(0)
assert not mock_queuing_context.queue
with mock_queuing_context:
QueuingContext.append_operator(op)
assert len(mock_queuing_context.queue) == 1
assert op in mock_queuing_context.queue
QueuingContext.remove_operator(op)
assert not mock_queuing_context.queue
def test_remove_operator_not_in_list(self, mock_queuing_context):
"""Test that remove_operator does not fail when the operator to be removed is not in the queue."""
op1 = qml.PauliZ(0)
op2 = qml.PauliZ(1)
assert not mock_queuing_context.queue
with mock_queuing_context:
QueuingContext.append_operator(op1)
assert len(mock_queuing_context.queue) == 1
assert op1 in mock_queuing_context.queue
QueuingContext.remove_operator(op2)
assert len(mock_queuing_context.queue) == 1
assert op1 in mock_queuing_context.queue
def test_append_operator_multiple_queues(self, three_mock_queuing_contexts):
"""Test that append_operator appends the operator to multiple queues."""
op = qml.PauliZ(0)
assert not three_mock_queuing_contexts[0].queue
assert not three_mock_queuing_contexts[1].queue
assert not three_mock_queuing_contexts[2].queue
with three_mock_queuing_contexts[0]:
with three_mock_queuing_contexts[1]:
with three_mock_queuing_contexts[2]:
QueuingContext.append_operator(op)
assert len(three_mock_queuing_contexts[0].queue) == 1
assert op in three_mock_queuing_contexts[0].queue
assert len(three_mock_queuing_contexts[1].queue) == 1
assert op in three_mock_queuing_contexts[1].queue
assert len(three_mock_queuing_contexts[1].queue) == 1
assert op in three_mock_queuing_contexts[1].queue
def test_remove_operator_multiple_queues(self, three_mock_queuing_contexts):
"""Test that remove_operator removes the operator from the queue."""
op = qml.PauliZ(0)
assert not three_mock_queuing_contexts[0].queue
assert not three_mock_queuing_contexts[1].queue
assert not three_mock_queuing_contexts[2].queue
with three_mock_queuing_contexts[0]:
with three_mock_queuing_contexts[1]:
with three_mock_queuing_contexts[2]:
QueuingContext.append_operator(op)
assert len(three_mock_queuing_contexts[0].queue) == 1
assert op in three_mock_queuing_contexts[0].queue
assert len(three_mock_queuing_contexts[1].queue) == 1
assert op in three_mock_queuing_contexts[1].queue
assert len(three_mock_queuing_contexts[2].queue) == 1
assert op in three_mock_queuing_contexts[2].queue
QueuingContext.remove_operator(op)
assert not three_mock_queuing_contexts[0].queue
assert not three_mock_queuing_contexts[1].queue
assert not three_mock_queuing_contexts[2].queue
| 38.117949 | 106 | 0.692318 |
a169718fab92094995401d63290d2f07d1346b14 | 4,261 | py | Python | Python/cameo/trackers.py | abondar24/OpenCVBase | 9b23e3b31304e77ad1135d90efb41e3dc069194a | [
"Apache-2.0"
] | null | null | null | Python/cameo/trackers.py | abondar24/OpenCVBase | 9b23e3b31304e77ad1135d90efb41e3dc069194a | [
"Apache-2.0"
] | null | null | null | Python/cameo/trackers.py | abondar24/OpenCVBase | 9b23e3b31304e77ad1135d90efb41e3dc069194a | [
"Apache-2.0"
] | null | null | null | import cv2
import rects
import utils
class Face(object):
"""Data on facial features: face, eyes, nose, mouth"""
def __init__(self):
self.face_rect = None
self.left_eye_rect = None
self.right_eye_rect = None
self.nose_rect = None
self.mouth_rect = None
class FaceTracker(object):
"""A tracker for facial features: face,eyes,nose,mouth"""
def __init__(self, scale_factor=1.2, min_neighbors=2, flags=cv2.cv.CV_HAAR_SCALE_IMAGE):
self.scale_factor = scale_factor
self.min_neighbors = min_neighbors
self.flags = flags
self._faces = []
self._face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
self._eye_classifier = cv2.CascadeClassifier('haarcascade_eye.xml')
self._nose_classifier = cv2.CascadeClassifier('haarcascade_mcs_nose.xml')
self._mouth_classifier = cv2.CascadeClassifier('haarcascade_mcs_mouth.xml')
@property
def faces(self):
"""Tracked facial features"""
return self._faces
def update(self, image):
"""Update the tracked facial features"""
self._faces = []
if utils.is_gray(image):
image = cv2.equalizeHist(image)
else:
image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY)
cv2.equalizeHist(image, image)
min_size = utils.width_height_divided_by(image, 8)
face_rects = self._face_classifier.detectMultiScale(image,
self.scale_factor,
self.min_neighbors,
self.flags,
min_size)
if face_rects is not None:
for face_rect in face_rects:
face = Face()
face.face_rect = face_rect
x, y, w, h = face_rect
# seek a left eye
search_rect = (x+w*4/7, y, w*2/7, h/2)
face.left_eye_rect = self._detect_one_object(self._eye_classifier, image, search_rect, 64)
# seek a right eye
search_rect = (x+w/7, y, w*2/7, h/2)
face.right_eye_rect = self._detect_one_object(self._eye_classifier, image, search_rect, 64)
# seek a nose
search_rect = (x+w/4, y+h/4, w/2, h/2)
face.nose_rect = self._detect_one_object(self._nose_classifier, image, search_rect, 32)
# seek a mouth
search_rect = (x+w/6, y+h*2/3, w*2/3, h/3)
face.mouth_rect = self._detect_one_object(self._mouth_classifier, image, search_rect, 16)
self._faces.append(face)
def _detect_one_object(self, classifier, image, rect, image_size_to_min_size_ratio):
x, y, w, h = rect
min_size = utils.width_height_divided_by(image, image_size_to_min_size_ratio)
sub_image = image[y:y+h, x:x+w]
sub_rects = classifier.detectMultiScale(sub_image, self.scale_factor, self.min_neighbors, self.flags, min_size)
if len(sub_rects) == 0:
return None
sub_x, sub_y, sub_w, sub_h = sub_rects[0]
return x + sub_x, y + sub_y, sub_w, sub_h
def draw_debug_rects(self, image):
"""Draw rectangles around the tracked facial features"""
if utils.is_gray(image):
face_color = 255
left_eye_color = 255
right_eye_color = 255
nose_color = 255
mouth_color = 255
else:
face_color = (255, 255, 255) # white
left_eye_color = (0, 0, 255) # red
right_eye_color = (0, 255, 255) # yellow
nose_color = (0, 255, 0) # green
mouth_color = (255, 0, 0) # blue
for face in self.faces:
rects.outline_rect(image, face.face_rect, face_color)
rects.outline_rect(image, face.left_eye_rect, left_eye_color)
rects.outline_rect(image, face.right_eye_rect, right_eye_color)
rects.outline_rect(image, face.nose_rect, nose_color)
rects.outline_rect(image, face.mouth_rect, mouth_color)
| 35.214876 | 119 | 0.578503 |
d5bf5e165714d922b2d6763e1202b20e6be3b7ca | 808 | py | Python | 28-KMP/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | 28-KMP/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | 28-KMP/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | class Solution:
def __init__(self, pat):
self.pat = pat
self.dp = []
self.KMP(self.pat)
def KMP(self, pat):
M = len(pat)
self.dp = [[0] * 256 for _ in range(M)]
self.dp[0][ord(pat[0])] = 1
X = 0
for j in range(1, M):
for c in range(256):
if ord(pat[j]) == c:
self.dp[j][c] = j + 1
else:
self.dp[j][c] = self.dp[X][c]
X = self.dp[X][ord(pat[j])]
def search(self, txt):
M = len(self.pat)
N = len(txt)
s = 0
for i in range(N):
s = self.dp[s][ord(txt[i])]
if s == M:
return i - M + 1
return -1
sol = Solution("ababc")
print(sol.search("ababdabababc"))
| 21.837838 | 49 | 0.404703 |
6d7f49f8cda19c1f9ce2beffdaabfcf34f5ce6da | 5,200 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_express_route_service_providers_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_express_route_service_providers_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_express_route_service_providers_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations(object):
"""ExpressRouteServiceProvidersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRouteServiceProviderListResult"]
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteServiceProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
| 45.614035 | 135 | 0.664423 |
9263e5a3eabc4deb4ea72b57ac8b3b0070df5b2b | 1,214 | py | Python | ejercicio1.py/Animales.py | acasasaez/examen_Poo | 65a421574f0656b34aecfcb40ec34b770a2c79b3 | [
"Apache-2.0"
] | null | null | null | ejercicio1.py/Animales.py | acasasaez/examen_Poo | 65a421574f0656b34aecfcb40ec34b770a2c79b3 | [
"Apache-2.0"
] | null | null | null | ejercicio1.py/Animales.py | acasasaez/examen_Poo | 65a421574f0656b34aecfcb40ec34b770a2c79b3 | [
"Apache-2.0"
] | null | null | null | class Animal(): #Clase Animal
def __init__(self,name): #Cuenta con el atributo name
self.name=name #self.name toma el valor de name
class Mamifero(Animal): #Mamífero hereda de la clase animal (métodos y atributos y no son vriados)
def __init__(self,name):
Animal.__init__(self,name)
class Oviparo(Animal): #Mamífero hereda de la clase animal (métodos y atributos y no son vriados)
def __init__(self,name):
Animal.__init__(self,name)
class Gato(Mamifero): #La clase gato hereda de la clase mamífero, por lo tanto no solo heredará métodos y atributos de la clase mamífero (que ya heredaba métodos y atributos de la clase animal)
def __init__(self,name):
Mamifero.__init__(self,name)
class Ornitorrinco(Mamifero,Oviparo): #La clase ornitorrinco hereda métodos y atributos de la clase mamífero y de la clase ovíparo, por lo tanto hereda también los métodos y atributos de clase animal
def __init__(self,name):
Mamifero.__init__(self,name)
Oviparo.__init__(self,name)
class Pato(Oviparo):# La clase pato hereda de la clase ovíparo y como consecuencia también de la clase animal
def __init__(self,name):
Oviparo.__init__(self,name)
| 48.56 | 199 | 0.737232 |
d53239e3550c15a1a0ede84e33b6222d8d46f620 | 12,851 | py | Python | symphony/bdk/gen/agent_model/room_created_message.py | symphony-elias/symphony-bdk-python | 0d1cd94a9982e3687ea52c49acdb5f942ecd9bec | [
"Apache-2.0"
] | 17 | 2018-09-06T09:58:18.000Z | 2021-07-13T12:54:20.000Z | symphony/bdk/gen/agent_model/room_created_message.py | symphony-elias/symphony-bdk-python | 0d1cd94a9982e3687ea52c49acdb5f942ecd9bec | [
"Apache-2.0"
] | 59 | 2018-11-21T15:17:57.000Z | 2021-08-03T10:00:43.000Z | symphony/bdk/gen/agent_model/room_created_message.py | symphony-elias/symphony-bdk-python | 0d1cd94a9982e3687ea52c49acdb5f942ecd9bec | [
"Apache-2.0"
] | 37 | 2018-09-01T03:07:48.000Z | 2021-07-06T10:21:50.000Z | """
Agent API
This document refers to Symphony API calls to send and receive messages and content. They need the on-premise Agent installed to perform decryption/encryption of content. - sessionToken and keyManagerToken can be obtained by calling the authenticationAPI on the symphony back end and the key manager respectively. Refer to the methods described in authenticatorAPI.yaml. - Actions are defined to be atomic, ie will succeed in their entirety or fail and have changed nothing. - If it returns a 40X status then it will have sent no message to any stream even if a request to aome subset of the requested streams would have succeeded. - If this contract cannot be met for any reason then this is an error and the response code will be 50X. - MessageML is a markup language for messages. See reference here: https://rest-api.symphony.com/docs/messagemlv2 - **Real Time Events**: The following events are returned when reading from a real time messages and events stream (\"datafeed\"). These events will be returned for datafeeds created with the v5 endpoints. To know more about the endpoints, refer to Create Messages/Events Stream and Read Messages/Events Stream. Unless otherwise specified, all events were added in 1.46. # noqa: E501
The version of the OpenAPI document: 20.13.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from typing import List
from symphony.bdk.gen.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from symphony.bdk.gen.agent_model.room_created_message_all_of import RoomCreatedMessageAllOf
from symphony.bdk.gen.agent_model.room_tag import RoomTag
from symphony.bdk.gen.agent_model.v2_base_message import V2BaseMessage
globals()['RoomCreatedMessageAllOf'] = RoomCreatedMessageAllOf
globals()['RoomTag'] = RoomTag
globals()['V2BaseMessage'] = V2BaseMessage
class RoomCreatedMessage(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a agent_model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a agent_model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'timestamp': (str,), # noqa: E501
'v2message_type': (str,), # noqa: E501
'stream_id': (str,), # noqa: E501
'creation_date': (int, none_type), # noqa: E501
'name': (str, none_type), # noqa: E501
'keywords': ([RoomTag], none_type), # noqa: E501
'description': (str, none_type), # noqa: E501
'created_by_user_id': (int, none_type), # noqa: E501
'read_only': (bool, none_type), # noqa: E501
'discoverable': (bool, none_type), # noqa: E501
'public': (bool, none_type), # noqa: E501
'members_can_invite': (bool, none_type), # noqa: E501
'copy_protected': (bool, none_type), # noqa: E501
'id': (str, none_type), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'v2message_type': val}
attribute_map = {
'timestamp': 'timestamp', # noqa: E501
'v2message_type': 'v2messageType', # noqa: E501
'stream_id': 'streamId', # noqa: E501
'creation_date': 'creationDate', # noqa: E501
'name': 'name', # noqa: E501
'keywords': 'keywords', # noqa: E501
'description': 'description', # noqa: E501
'created_by_user_id': 'createdByUserId', # noqa: E501
'read_only': 'readOnly', # noqa: E501
'discoverable': 'discoverable', # noqa: E501
'public': 'public', # noqa: E501
'members_can_invite': 'membersCanInvite', # noqa: E501
'copy_protected': 'copyProtected', # noqa: E501
'id': 'id', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, timestamp, v2message_type, stream_id, *args, **kwargs): # noqa: E501
"""RoomCreatedMessage - a agent_model defined in OpenAPI
Args:
timestamp (str):
v2message_type (str):
stream_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the agent_model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
creation_date (int): [optional] # noqa: E501
name (str): [optional] # noqa: E501
keywords ([RoomTag]): [optional] # noqa: E501
description (str): [optional] # noqa: E501
created_by_user_id (int): The Symphony userId of the user who created the room.. [optional] # noqa: E501
read_only (bool): [optional] # noqa: E501
discoverable (bool): [optional] # noqa: E501
public (bool): [optional] # noqa: E501
members_can_invite (bool): [optional] # noqa: E501
copy_protected (bool): [optional] # noqa: E501
id (str): The messageId is assigned by the ingestor service when a message is sent.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'timestamp': timestamp,
'v2message_type': v2message_type,
'stream_id': stream_id,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
self.timestamp: str = timestamp
self.v2message_type: str = v2message_type
self.stream_id: str = stream_id
self.creation_date: int = None
self.name: str = None
self.keywords: List[RoomTag] = None
self.description: str = None
self.created_by_user_id: int = None
self.read_only: bool = None
self.discoverable: bool = None
self.public: bool = None
self.members_can_invite: bool = None
self.copy_protected: bool = None
self.id: str = None
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
'anyOf': [
],
'allOf': [
RoomCreatedMessageAllOf,
V2BaseMessage,
],
'oneOf': [
],
}
| 45.733096 | 1,241 | 0.610303 |
cb9ec92b950cbd439222f44750c21f2807729f6b | 38,347 | py | Python | sqa/weaksp/sempar/executors/wikitable_executor.py | WDZRMPCBIT/SCoRE | c426e58c253f5d97fc4ad0e0fea9606f70cff872 | [
"MIT"
] | null | null | null | sqa/weaksp/sempar/executors/wikitable_executor.py | WDZRMPCBIT/SCoRE | c426e58c253f5d97fc4ad0e0fea9606f70cff872 | [
"MIT"
] | null | null | null | sqa/weaksp/sempar/executors/wikitable_executor.py | WDZRMPCBIT/SCoRE | c426e58c253f5d97fc4ad0e0fea9606f70cff872 | [
"MIT"
] | null | null | null | from typing import List, Dict, Tuple, Union, Any
from collections import defaultdict
import logging
from allennlp.semparse import util as semparse_util
from allennlp.semparse.worlds.world import ExecutionError
from allennlp.semparse.contexts.table_question_knowledge_graph import MONTH_NUMBERS
from allennlp.semparse.contexts import TableQuestionContext
from allennlp.tools import wikitables_evaluator as evaluator
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
NestedList = List[Union[str, List]] # pylint: disable=invalid-name
class Date:
def __init__(self, year: int, month: int, day: int) -> None:
self.year = year
self.month = month
self.day = day
def __eq__(self, other) -> bool:
# Note that the logic below renders equality to be non-transitive. That is,
# Date(2018, -1, -1) == Date(2018, 2, 3) and Date(2018, -1, -1) == Date(2018, 4, 5)
# but Date(2018, 2, 3) != Date(2018, 4, 5).
if not isinstance(other, Date):
return False
year_is_same = self.year == -1 or other.year == -1 or self.year == other.year
month_is_same = self.month == -1 or other.month == -1 or self.month == other.month
day_is_same = self.day == -1 or other.day == -1 or self.day == other.day
return year_is_same and month_is_same and day_is_same
def __gt__(self, other) -> bool:
# pylint: disable=too-many-return-statements
# The logic below is tricky, and is based on some assumptions we make about date comparison.
# Year, month or day being -1 means that we do not know its value. In those cases, the
# we consider the comparison to be undefined, and return False if all the fields that are
# more significant than the field being compared are equal. However, when year is -1 for both
# dates being compared, it is safe to assume that the year is not specified because it is
# the same. So we make an exception just in that case. That is, we deem the comparison
# undefined only when one of the year values is -1, but not both.
if not isinstance(other, Date):
return False # comparison undefined
# We're doing an exclusive or below.
if (self.year == -1) != (other.year == -1):
return False # comparison undefined
# If both years are -1, we proceed.
if self.year != other.year:
return self.year > other.year
# The years are equal and not -1, or both are -1.
if self.month == -1 or other.month == -1:
return False
if self.month != other.month:
return self.month > other.month
# The months and years are equal and not -1
if self.day == -1 or other.day == -1:
return False
return self.day > other.day
def __ge__(self, other) -> bool:
if not isinstance(other, Date):
return False
return self > other or self == other
def __str__(self):
return f"{self.year}-{self.month}-{self.day}"
class WikiTablesVariableFreeExecutor:
# pylint: disable=too-many-public-methods
"""
Implements the functions in the variable free language we use, that's inspired by the one in
"Memory Augmented Policy Optimization for Program Synthesis with Generalization" by Liang et al.
Parameters
----------
table_data : ``List[Dict[str, str]]``
All the rows in the table on which the executor will be used. The class expects each row to
be represented as a dict from column names to corresponding cell values.
"""
def __init__(self, table_data: List[Dict[str, str]]) -> None:
self.table_data = table_data
def __eq__(self, other):
if not isinstance(other, WikiTablesVariableFreeExecutor):
return False
return self.table_data == other.table_data
def execute(self, logical_form: str) -> Any:
if not logical_form.startswith("("):
logical_form = f"({logical_form})"
logical_form = logical_form.replace(",", " ")
expression_as_list = semparse_util.lisp_to_nested_expression(logical_form)
# Expression list has an additional level of
# nesting at the top. For example, if the
# logical form is
# "(select all_rows fb:row.row.league)",
# the expression list will be
# [['select', 'all_rows', 'fb:row.row.league']].
# Removing the top most level of nesting.
result = self._handle_expression(expression_as_list[0])
return result
def evaluate_logical_form(self, logical_form: str, target_list: List[str]) -> bool:
"""
Takes a logical form, and the list of target values as strings from the original lisp
string, and returns True iff the logical form executes to the target list.
"""
normalized_target_list = [TableQuestionContext.normalize_string(value) for value in
target_list]
target_value_list = evaluator.to_value_list(normalized_target_list)
try:
denotation = self.execute(logical_form)
except ExecutionError:
logger.warning(f'Failed to execute: {logical_form}')
return False
if isinstance(denotation, list):
denotation_list = [str(denotation_item) for denotation_item in denotation]
else:
denotation_list = [str(denotation)]
denotation_value_list = evaluator.to_value_list(denotation_list)
return evaluator.check_denotation(target_value_list, denotation_value_list)
## Helper functions
def _handle_expression(self, expression_list):
if isinstance(expression_list, list) and len(expression_list) == 1:
expression = expression_list[0]
else:
expression = expression_list
if isinstance(expression, list):
# This is a function application.
function_name = expression[0]
else:
# This is a constant (like "all_rows" or "2005")
return self._handle_constant(expression)
try:
function = getattr(self, function_name)
return function(*expression[1:])
except AttributeError:
raise ExecutionError(f"Function not found: {function_name}")
def _handle_constant(self, constant: str) -> Union[List[Dict[str, str]], str, float]:
if constant == "all_rows":
return self.table_data
try:
return float(constant)
except ValueError:
# The constant is not a number. Returning as-is if it is a string.
if constant.startswith("string:"):
return constant.replace("string:", "")
raise ExecutionError(f"Cannot handle constant: {constant}")
@staticmethod
def _get_number_row_pairs_to_filter(row_list: List[Dict[str, str]],
column_name: str) -> List[Tuple[float, Dict[str, str]]]:
"""
Helper method that takes a row list and a column name, and returns a list of tuples, each
containing as the first element a number taken from that column, and the corresponding row
as the second element. The output can be used to compare rows based on the numbers.
"""
if not row_list:
return []
try:
# Various symbols like commas, dollar signs would have been converted to _. Removing
# them for float conversion.
cell_row_pairs = [(float(row[column_name].replace('_', '')), row) for row in row_list]
except ValueError:
# This means that at least one of the cells is not numerical.
return []
return cell_row_pairs
def _get_date_row_pairs_to_filter(self,
row_list: List[Dict[str, str]],
column_name: str) -> List[Tuple[Date, Dict[str, str]]]:
"""
Helper method that takes a row list and a column name, and returns a list of tuples, each
containing as the first element a date taken from that column, and the corresponding row as
the second element. The output can be used to compare rows based on the dates.
"""
if not row_list:
return []
cell_row_pairs = [(self._make_date(row[column_name]), row) for row in row_list]
return cell_row_pairs
@staticmethod
def _make_date(cell_string: str) -> Date:
string_parts = cell_string.split("_")
year = -1
month = -1
day = -1
for part in string_parts:
if part.isdigit():
if len(part) == 4:
year = int(part)
else:
day = int(part)
elif part in MONTH_NUMBERS:
month = MONTH_NUMBERS[part]
return Date(year, month, day)
@staticmethod
def _value_looks_like_date(cell_value: str) -> bool:
# TODO (pradeep): This will be unnecessary when we have column types identified.
# We try to figure out if the values being compared are simple numbers or dates. We use
# simple rules here: that the string contains less than 4 parts, and one of the parts is a
# month name. Note that this will not consider strings with just years as dates. That's fine
# because we can compare them as numbers.
values_are_dates = False
cell_value_parts = cell_value.split('_')
# Check if the number of parts in the string are 3 or fewer. If not, it's probably neither a
# date nor a number.
if len(cell_value_parts) <= 3:
for part in cell_value_parts:
if part in MONTH_NUMBERS:
values_are_dates = True
return values_are_dates
def _get_row_index(self, row: Dict[str, str]) -> int:
"""
Takes a row and returns its index in the full list of rows. If the row does not occur in the
table (which should never happen because this function will only be called with a row that
is the result of applying one or more functions on the table rows), the method returns -1.
"""
row_index = -1
for index, table_row in enumerate(self.table_data):
if table_row == row:
row_index = index
break
return row_index
## Functions in the language
def select(self, row_expression_list: NestedList, column_name: str) -> List[str]:
"""
Select function takes a list of rows and a column name and returns a list of cell values as
strings.
"""
row_list = self._handle_expression(row_expression_list)
return [row[column_name] for row in row_list]
def argmax(self, row_expression_list: NestedList, column_name: str) -> List[Dict[str, str]]:
"""
Takes a list of rows and a column name and returns a list containing a single row (dict from
columns to cells) that has the maximum numerical value in the given column. We return a list
instead of a single dict to be consistent with the return type of `_select` and `_all_rows`.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
# We just check whether the first cell value is a date or number and assume that the rest
# are the same kind of values.
first_cell_value = row_list[0][column_name]
if self._value_looks_like_date(first_cell_value):
value_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
else:
value_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name) # type: ignore
if not value_row_pairs:
return []
# Returns a list containing the row with the max cell value.
return [sorted(value_row_pairs, key=lambda x: x[0], reverse=True)[0][1]]
def argmin(self, row_expression_list: NestedList, column_name: str) -> List[Dict[str, str]]:
"""
Takes a list of rows and a column and returns a list containing a single row (dict from
columns to cells) that has the minimum numerical value in the given column. We return a list
instead of a single dict to be consistent with the return type of `_select` and `_all_rows`.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
# We just check whether the first cell value is a date or number and assume that the rest
# are the same kind of values.
first_cell_value = row_list[0][column_name]
if self._value_looks_like_date(first_cell_value):
value_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
else:
value_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name) # type: ignore
if not value_row_pairs:
return []
# Returns a list containing the row with the max cell value.
return [sorted(value_row_pairs, key=lambda x: x[0])[0][1]]
def filter_number_greater(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows as an expression, a column, and a numerical value expression and
returns all the rows where the value in that column is greater than the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, float):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value > filter_value:
return_list.append(row)
return return_list
def filter_number_greater_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows as an expression, a column, and a numerical value expression and
returns all the rows where the value in that column is greater than or equal to the given
value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, float):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value >= filter_value:
return_list.append(row)
return return_list
def filter_number_lesser(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows as an expression, a column, and a numerical value expression and
returns all the rows where the value in that column is less than the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, float):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value < filter_value:
return_list.append(row)
return return_list
def filter_number_lesser_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a numerical value and returns all the rows where the
value in that column is lesser than or equal to the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, float):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value <= filter_value:
return_list.append(row)
return return_list
def filter_number_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a numerical value and returns all the rows where the
value in that column equals the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, float):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value == filter_value:
return_list.append(row)
return return_list
def filter_number_not_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a numerical value and returns all the rows where the
value in that column is not equal to the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, float):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value != filter_value:
return_list.append(row)
return return_list
# Note that the following six methods are identical to the ones above, except that the filter
# values are obtained from `_get_date_row_pairs_to_filter`.
def filter_date_greater(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows as an expression, a column, and a numerical value expression and
returns all the rows where the value in that column is greater than the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, Date):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value > filter_value:
return_list.append(row)
return return_list
def filter_date_greater_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows as an expression, a column, and a numerical value expression and
returns all the rows where the value in that column is greater than or equal to the given
value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, Date):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value >= filter_value:
return_list.append(row)
return return_list
def filter_date_lesser(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows as an expression, a column, and a numerical value expression and
returns all the rows where the value in that column is less than the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, Date):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value < filter_value:
return_list.append(row)
return return_list
def filter_date_lesser_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a numerical value and returns all the rows where the
value in that column is lesser than or equal to the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, Date):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value <= filter_value:
return_list.append(row)
return return_list
def filter_date_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a numerical value and returns all the rows where the
value in that column equals the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, Date):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value == filter_value:
return_list.append(row)
return return_list
def filter_date_not_equals(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a numerical value and returns all the rows where the
value in that column is not equal to the given value.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
cell_row_pairs = self._get_date_row_pairs_to_filter(row_list, column_name)
filter_value = self._handle_expression(value_expression)
if not isinstance(filter_value, Date):
raise ExecutionError(f"Invalid filter value: {value_expression}")
return_list = []
for cell_value, row in cell_row_pairs:
if cell_value != filter_value:
return_list.append(row)
return return_list
def filter_in(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a string value and returns all the rows where the value
in that column contains the given string.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
expression_evaluation = self._handle_expression(value_expression)
if isinstance(expression_evaluation, list):
filter_value = expression_evaluation[0]
elif isinstance(expression_evaluation, str):
filter_value = expression_evaluation
else:
raise ExecutionError(f"Unexpected filter value for filter_in: {value_expression}")
if not isinstance(filter_value, str):
raise ExecutionError(f"Unexpected filter value for filter_in: {value_expression}")
# Assuming filter value has underscores for spaces. The cell values also have underscores
# for spaces, so we do not need to replace them here.
result_list = []
for row in row_list:
if filter_value in row[column_name]:
result_list.append(row)
return result_list
def filter_not_in(self,
row_expression_list: NestedList,
column_name: str,
value_expression: NestedList) -> List[Dict[str, str]]:
"""
Takes a list of rows, a column, and a string value and returns all the rows where the value
in that column does not contain the given string.
"""
row_list = self._handle_expression(row_expression_list)
if not row_list:
return []
expression_evaluation = self._handle_expression(value_expression)
if isinstance(expression_evaluation, list):
filter_value = expression_evaluation[0]
elif isinstance(expression_evaluation, str):
filter_value = expression_evaluation
else:
raise ExecutionError(f"Unexpected filter value for filter_in: {value_expression}")
if not isinstance(filter_value, str):
raise ExecutionError(f"Unexpected filter value for filter_in: {value_expression}")
# Assuming filter value has underscores for spaces. The cell values also have underscores
# for spaces, so we do not need to replace them here.
result_list = []
for row in row_list:
if filter_value not in row[column_name]:
result_list.append(row)
return result_list
def first(self, row_expression_list: NestedList) -> List[Dict[str, str]]:
"""
Takes an expression that evaluates to a list of rows, and returns the first one in that
list.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
if not row_list:
logger.warning("Trying to get first row from an empty list: %s", row_expression_list)
return []
return [row_list[0]]
def last(self, row_expression_list: NestedList) -> List[Dict[str, str]]:
"""
Takes an expression that evaluates to a list of rows, and returns the last one in that
list.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
if not row_list:
logger.warning("Trying to get last row from an empty list: %s", row_expression_list)
return []
return [row_list[-1]]
def previous(self, row_expression_list: NestedList) -> List[Dict[str, str]]:
"""
Takes an expression that evaluates to a single row, and returns the row (as a list to be
consistent with the rest of the API), that occurs before the input row in the original set
of rows. If the input row happens to be the top row, we will return an empty list.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
if not row_list:
logger.warning("Trying to get the previous row from an empty list: %s",
row_expression_list)
return []
if len(row_list) > 1:
logger.warning("Trying to get the previous row from a non-singleton list: %s",
row_expression_list)
input_row_index = self._get_row_index(row_list[0]) # Take the first row.
if input_row_index > 0:
return [self.table_data[input_row_index - 1]]
return []
def next(self, row_expression_list: NestedList) -> List[Dict[str, str]]:
"""
Takes an expression that evaluates to a single row, and returns the row (as a list to be
consistent with the rest of the API), that occurs after the input row in the original set
of rows. If the input row happens to be the last row, we will return an empty list.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
if not row_list:
logger.warning("Trying to get the next row from an empty list: %s", row_expression_list)
return []
if len(row_list) > 1:
logger.warning("Trying to get the next row from a non-singleton list: %s", row_expression_list)
input_row_index = self._get_row_index(row_list[-1]) # Take the last row.
if input_row_index < len(self.table_data) - 1 and input_row_index != -1:
return [self.table_data[input_row_index + 1]]
return []
def count(self, row_expression_list: NestedList) -> float:
"""
Takes an expression that evaluates to a a list of rows and returns their count (as a float
to be consistent with the other functions like max that also return numbers).
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
return float(len(row_list))
def max(self,
row_expression_list: NestedList,
column_name: str) -> float:
"""
Takes an expression list that evaluates to a list of rows and a column name, and returns the max
of the values under that column in those rows.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
if not cell_row_pairs:
return 0.0
return max([value for value, _ in cell_row_pairs])
def min(self,
row_expression_list: NestedList,
column_name: str) -> float:
"""
Takes an expression list that evaluates to a list of rows and a column, and returns the min
of the values under that column in those rows.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
if not cell_row_pairs:
return 0.0
return min([value for value, _ in cell_row_pairs])
def sum(self,
row_expression_list: NestedList,
column_name) -> float:
"""
Takes an expression list that evaluates to a list of rows and a column, and returns the sum
of the values under that column in those rows.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
if not cell_row_pairs:
return 0.0
return sum([value for value, _ in cell_row_pairs])
def average(self,
row_expression_list: NestedList,
column_name: str) -> float:
"""
Takes an expression list that evaluates to a list of rows and a column, and returns the mean
of the values under that column in those rows.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
cell_row_pairs = self._get_number_row_pairs_to_filter(row_list, column_name)
if not cell_row_pairs:
return 0.0
return sum([value for value, _ in cell_row_pairs]) / len(cell_row_pairs)
def mode(self,
row_expression_list: NestedList,
column_name: str) -> List[str]:
"""
Takes an expression that evaluates to a list of rows, and a column and returns the most
frequent values (one or more) under that column in those rows.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
if not row_list:
return []
value_frequencies: Dict[str, int] = defaultdict(int)
max_frequency = 0
most_frequent_list: List[str] = []
for row in row_list:
cell_value = row[column_name]
value_frequencies[cell_value] += 1
frequency = value_frequencies[cell_value]
if frequency > max_frequency:
max_frequency = frequency
most_frequent_list = [cell_value]
elif frequency == max_frequency:
most_frequent_list.append(cell_value)
return most_frequent_list
def same_as(self,
row_expression_list: NestedList,
column_name: str) -> List[Dict[str, str]]:
"""
Takes an expression that evaluates to a row, and a column and returns a list of rows from
the full set of rows that contain the same value under the given column as the given row.
"""
row_list: List[Dict[str, str]] = self._handle_expression(row_expression_list)
if not row_list:
return []
if len(row_list) > 1:
logger.warning("same_as function got multiple rows. Taking the first one: "
f"{row_expression_list}")
cell_value = row_list[0][column_name]
return_list = []
for row in self.table_data:
if row[column_name] == cell_value:
return_list.append(row)
return return_list
def diff(self,
first_row_expression_list: NestedList,
second_row_expression_list: NestedList,
column_name: str) -> float:
"""
Takes an expressions that evaluate to two rows, and a column name, and returns the
difference between the values under that column in those two rows.
"""
first_row_list = self._handle_expression(first_row_expression_list)
second_row_list = self._handle_expression(second_row_expression_list)
if not first_row_list or not second_row_list:
return 0.0
if len(first_row_list) > 1:
logger.warning("diff got multiple rows for first argument. Taking the first one: "
f"{first_row_expression_list}")
if len(second_row_list) > 1:
logger.warning("diff got multiple rows for second argument. Taking the first one: "
f"{second_row_expression_list}")
first_row = first_row_list[0]
second_row = second_row_list[0]
try:
first_value = float(first_row[column_name])
second_value = float(second_row[column_name])
return first_value - second_value
except ValueError:
raise ExecutionError(f"Invalid column for diff: {column_name}")
@staticmethod
def date(year_string: str, month_string: str, day_string: str) -> Date:
"""
Takes three numbers as strings, and returns a ``Date`` object whose year, month, and day are
the three numbers in that order.
"""
try:
year = int(str(year_string))
month = int(str(month_string))
day = int(str(day_string))
return Date(year, month, day)
except ValueError:
raise ExecutionError(f"Invalid date! Got {year_string}, {month_string}, {day_string}")
| 48.23522 | 108 | 0.612721 |
617579dd380a4caf539e1789a77a386aefb8127a | 2,197 | py | Python | error_fit.py | chamillj/SDN-data-with-LSTM | 9dba368a5251872b1d2fef589520f137013112f8 | [
"MIT"
] | null | null | null | error_fit.py | chamillj/SDN-data-with-LSTM | 9dba368a5251872b1d2fef589520f137013112f8 | [
"MIT"
] | null | null | null | error_fit.py | chamillj/SDN-data-with-LSTM | 9dba368a5251872b1d2fef589520f137013112f8 | [
"MIT"
] | 2 | 2019-04-30T11:28:44.000Z | 2022-02-17T08:43:08.000Z | from keras.models import load_model
from data_import import analyse_data, prepare_data
import numpy as np
from lstm import build_lstm
import h5py
import matplotlib.pyplot as plt
from scipy.stats import norm, multivariate_normal
from sklearn.preprocessing import MinMaxScaler
import pickle
def generate_error(model, X, Y, scaler):
Y_hat = np.empty((len(X), Y.shape[1]))
for x in range(len(X)):
Y_hat[x] = model.predict(np.array([X[x,:,:]]), batch_size=1)
Y_unscaled = scaler.inverse_transform(Y)
Y_hat_unscaled = scaler.inverse_transform(Y_hat)
return Y_unscaled - Y_hat_unscaled
if __name__ == '__main__':
######Load Data
##VALidation
with h5py.File('Data/Infocom/Normal/data.h5', 'r') as hf:
x_val = hf['x_val'][:]
y_val = hf['y_val'][:]
with open('Data/Infocom/Normal/scaler.pkl', 'rb') as pkfile:
scaler = pickle.load(pkfile)
###TEST
with h5py.File('Data/Infocom/Attack/data.h5', 'r') as hf:
x_test = hf['x_test'][:]
y_test = hf['y_test'][:]
# with open('Data/Attack/scaler.pkl', 'rb') as pkfile:
# scaler_test = pickle.load(pkfile)
model = load_model('Data/Infocom/my_model.h5')
p_model = build_lstm((x_val.shape[1], x_val.shape[2]), 1, [50, 50])
model.save_weights('Data/Infocom/weights.h5')
p_model.load_weights('Data/Infocom/weights.h5')
errors = generate_error(p_model, x_val, y_val, scaler)
# plt.figure(1)
# plt.hist(errors[:,0], 50, normed=1, facecolor='green', alpha=0.5)
# plt.figure(2)
# plt.hist(errors[:, 1], 50, normed=1, facecolor='blue', alpha=0.5)
# plt.show()
mean = np.mean(errors, axis=0)
var = np.var(errors, axis=0)
cov = np.cov(errors, rowvar=False)
mvn = multivariate_normal(mean=mean, cov=cov, allow_singular = True)
# nd = [norm(mean[i], var[i]) for i in range(len(mean))] ##array of gaussin distru=ibut for each feature
errors_p = generate_error(p_model, x_test, y_test, scaler)
plt.figure(1)
plt.plot(np.log10(mvn.pdf(errors)), 'r-')
plt.figure(2)
plt.plot(np.log10(mvn.pdf(errors_p)))
plt.show()
print("Done")
# scipy.stats.gaussian_kde(errors()) | 29.293333 | 108 | 0.651798 |
f67fba486e0bfa01da3a2e662a00fff396056b20 | 3,661 | py | Python | neurodsp/tests/sim/test_aperiodic.py | voytekresearch/neurodsp | a44845fb3638a5cc72b11eef340fb22e917c22e8 | [
"MIT"
] | 40 | 2017-06-21T08:56:04.000Z | 2019-01-24T03:36:10.000Z | neurodsp/tests/sim/test_aperiodic.py | voytekresearch/neurodsp | a44845fb3638a5cc72b11eef340fb22e917c22e8 | [
"MIT"
] | 106 | 2017-06-21T01:01:48.000Z | 2019-01-24T03:09:16.000Z | neurodsp/tests/sim/test_aperiodic.py | voytekresearch/neurodsp | a44845fb3638a5cc72b11eef340fb22e917c22e8 | [
"MIT"
] | 16 | 2017-06-20T18:58:16.000Z | 2018-11-23T17:20:27.000Z | """Tests for neurodsp.sim.aperiodic."""
import pytest
import numpy as np
from scipy.stats import skew, kurtosis
from scipy.optimize import curve_fit
from neurodsp.tests.settings import N_SECONDS, FS, EXP1, EXP2, KNEE, EPS
from neurodsp.tests.tutils import check_sim_output, check_exponent
from neurodsp.sim.aperiodic import *
from neurodsp.sim.aperiodic import _create_powerlaw
from neurodsp.spectral import compute_spectrum
###################################################################################################
###################################################################################################
def test_sim_poisson_pop():
sig = sim_poisson_pop(N_SECONDS, FS)
check_sim_output(sig)
def test_sim_synaptic_current():
sig = sim_synaptic_current(N_SECONDS, FS)
check_sim_output(sig)
def test_sim_knee():
# Build the signal and run a smoke test
sig = sim_knee(N_SECONDS, FS, EXP1, EXP2, KNEE)
check_sim_output(sig, N_SECONDS, FS)
# Check against the power spectrum when you take the Fourier transform
sig_len = int(FS*N_SECONDS)
freqs = np.linspace(0, FS/2, num=sig_len//2, endpoint=True)
# Ignore the DC component to avoid division by zero in the Lorentzian
freqs = freqs[1:]
true_psd = 1 / ((freqs ** -EXP1 * (freqs ** (-EXP2 - EXP1)+ KNEE)))
# Only look at the frequencies (ignoring DC component) up to the nyquist rate
sig_hat = np.fft.fft(sig)[1:sig_len//2]
numerical_psd = np.abs(sig_hat)**2
scale = numerical_psd / true_psd
np.allclose(true_psd*scale, numerical_psd, atol=EPS)
# Accuracy test for a single exponent
sig = sim_knee(N_SECONDS, FS, 0, EXP2, KNEE)
freqs, powers = compute_spectrum(sig, FS, f_range=(1, 200))
def _estimate_single_knee(xs, offset, knee, exponent):
return np.zeros_like(xs) + offset - np.log10(xs**exponent + knee)
ap_params, _ = curve_fit(_estimate_single_knee, freqs, np.log10(powers))
_, KNEE_hat, EXP2_hat = ap_params[:]
np.testing.assert_approx_equal(-EXP2_hat, EXP2, significant=1)
np.testing.assert_approx_equal(KNEE_hat, KNEE, significant=1)
def test_sim_random_walk():
sig = sim_random_walk(N_SECONDS, FS)
check_sim_output(sig)
def test_sim_powerlaw():
sig = sim_powerlaw(N_SECONDS, FS)
check_sim_output(sig)
# Test with a filter applied
sig = sim_powerlaw(N_SECONDS, FS, f_range=(2, None))
check_sim_output(sig)
@pytest.mark.parametrize('exponent', [-.5, 0, .5])
def test_sim_frac_gaussian_noise(exponent):
# Simulate & check time series
sig = sim_frac_gaussian_noise(N_SECONDS, FS, exponent=exponent)
check_sim_output(sig)
# Linear fit the log-log power spectrum & check error based on expected 1/f exponent
freqs = np.linspace(1, FS//2, num=FS//2)
powers = np.abs(np.fft.fft(sig)[1:FS//2 + 1]) ** 2
[_, exponent_hat], _ = curve_fit(check_exponent, np.log10(freqs), np.log10(powers))
assert abs(exponent_hat - exponent) < 0.2
@pytest.mark.parametrize('exponent', [-1.5, -2, -2.5])
def test_sim_frac_brownian_motion(exponent):
# Simulate & check time series
sig = sim_frac_brownian_motion(N_SECONDS, FS, exponent=exponent)
check_sim_output(sig)
# Linear fit the log-log power spectrum & check error based on expected 1/f exponent
freqs = np.linspace(1, FS//2, num=FS//2)
powers = np.abs(np.fft.fft(sig)[1:FS//2 + 1]) ** 2
[_, exponent_hat], _ = curve_fit(check_exponent, np.log10(freqs), np.log10(powers))
assert abs(exponent_hat - exponent) < 0.4
def test_create_powerlaw():
sig = _create_powerlaw(int(N_SECONDS*FS), FS, -2)
check_sim_output(sig)
| 33.898148 | 99 | 0.671128 |
c4e279d09562a4bc3000c0977727c41b60d71bf0 | 13,805 | py | Python | tests/templates/test_embeddings/test_qaoa_emb.py | doomhammerhell/pennylane | f147f22d8d99ba5891edd45a6a1f7dd679c8a23c | [
"Apache-2.0"
] | 712 | 2020-07-29T03:46:52.000Z | 2022-03-27T11:21:51.000Z | tests/templates/test_embeddings/test_qaoa_emb.py | doomhammerhell/pennylane | f147f22d8d99ba5891edd45a6a1f7dd679c8a23c | [
"Apache-2.0"
] | 1,627 | 2020-07-28T13:07:58.000Z | 2022-03-31T21:47:29.000Z | tests/templates/test_embeddings/test_qaoa_emb.py | doomhammerhell/pennylane | f147f22d8d99ba5891edd45a6a1f7dd679c8a23c | [
"Apache-2.0"
] | 249 | 2020-07-29T03:26:18.000Z | 2022-03-31T19:59:48.000Z | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the QAOAEmbedding template.
"""
import pytest
import numpy as np
import pennylane as qml
from pennylane import numpy as pnp
class TestDecomposition:
"""Tests that the template defines the correct decomposition."""
QUEUES = [
(1, (1, 1), ["RX", "RY", "RX"]),
(2, (1, 3), ["RX", "RX", "MultiRZ", "RY", "RY", "RX", "RX"]),
(
2,
(2, 3),
["RX", "RX", "MultiRZ", "RY", "RY", "RX", "RX", "MultiRZ", "RY", "RY", "RX", "RX"],
),
(
3,
(1, 6),
["RX", "RX", "RX", "MultiRZ", "MultiRZ", "MultiRZ", "RY", "RY", "RY", "RX", "RX", "RX"],
),
]
@pytest.mark.parametrize("n_wires, weight_shape, expected_names", QUEUES)
def test_expansion(self, n_wires, weight_shape, expected_names):
"""Checks the queue for the default settings."""
features = list(range(n_wires))
weights = np.zeros(shape=weight_shape)
op = qml.templates.QAOAEmbedding(features, weights, wires=range(n_wires))
tape = op.expand()
for i, gate in enumerate(tape.operations):
assert gate.name == expected_names[i]
@pytest.mark.parametrize("local_field", ["X", "Y", "Z"])
def test_local_field(self, local_field):
"""Checks that custom local field is used."""
get_name = {"X": "RX", "Y": "RY", "Z": "RZ"}
features = list(range(2))
weights = np.zeros(shape=(1, 3))
op = qml.templates.QAOAEmbedding(features, weights, wires=range(2), local_field=local_field)
tape = op.expand()
gate_names = [gate.name for gate in tape.operations]
assert gate_names[3] == get_name[local_field]
assert gate_names[4] == get_name[local_field]
def test_exception_wrongrot(self):
"""Verifies exception raised if the
rotation strategy is unknown."""
n_wires = 1
weights = np.zeros(shape=(1, 1))
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev)
def circuit(x=None):
qml.templates.QAOAEmbedding(
features=x, weights=weights, wires=range(n_wires), local_field="A"
)
return [qml.expval(qml.PauliZ(i)) for i in range(n_wires)]
with pytest.raises(ValueError, match="did not recognize"):
circuit(x=[1])
def test_state_zero_weights(self, qubit_device, n_subsystems, tol):
"""Checks the state is correct if the weights are zero."""
features = [np.pi, np.pi / 2, np.pi / 4, 0]
if n_subsystems == 1:
shp = (1, 1)
elif n_subsystems == 2:
shp = (1, 3)
else:
shp = (1, 2 * n_subsystems)
weights = np.zeros(shape=shp)
@qml.qnode(qubit_device)
def circuit(x=None):
qml.templates.QAOAEmbedding(features=x, weights=weights, wires=range(n_subsystems))
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
res = circuit(x=features[:n_subsystems])
target = [1, -1, 0, 1, 1]
assert np.allclose(res, target[:n_subsystems], atol=tol, rtol=0)
@pytest.mark.parametrize(
"weights, target",
[([[np.pi, 0, 0]], [1, 1]), ([[np.pi / 2, 0, 0]], [0, 0]), ([[0, 0, 0]], [-1, -1])],
)
def test_output_zz(self, weights, target, tol):
"""Checks the output if the features and entangler weights are nonzero,
which makes the circuit only depend on the ZZ gate."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x=None):
qml.templates.QAOAEmbedding(features=x, weights=weights, wires=range(2))
return [qml.expval(qml.PauliZ(i)) for i in range(2)]
res = circuit(x=[np.pi / 2, np.pi / 2])
assert np.allclose(res, target, atol=tol, rtol=0)
@pytest.mark.parametrize(
"n_wires, features, weights, target",
[
(2, [0], [[0, 0, np.pi / 2]], [1, 0]),
(3, [0, 0], [[0, 0, 0, 0, 0, np.pi / 2]], [1, 1, 0]),
],
)
def test_state_more_qubits_than_features(self, n_wires, features, weights, target, tol):
"""Checks the state is correct if there are more qubits than features."""
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev)
def circuit(x=None):
qml.templates.QAOAEmbedding(
features=x, weights=weights, wires=range(n_wires), local_field="Z"
)
return [qml.expval(qml.PauliZ(i)) for i in range(n_wires)]
res = circuit(x=features)
assert np.allclose(res, target, atol=tol, rtol=0)
def test_custom_wire_labels(self, tol):
"""Test that template can deal with non-numeric, nonconsecutive wire labels."""
weights = np.random.random(size=(1, 6))
features = np.random.random(size=(3,))
dev = qml.device("default.qubit", wires=3)
dev2 = qml.device("default.qubit", wires=["z", "a", "k"])
@qml.qnode(dev)
def circuit():
qml.templates.QAOAEmbedding(features, weights, wires=range(3))
return qml.expval(qml.Identity(0))
@qml.qnode(dev2)
def circuit2():
qml.templates.QAOAEmbedding(features, weights, wires=["z", "a", "k"])
return qml.expval(qml.Identity("z"))
circuit()
circuit2()
assert np.allclose(dev.state, dev2.state, atol=tol, rtol=0)
class TestInputs:
"""Test inputs and pre-processing."""
def test_exception_fewer_qubits_than_features(
self,
):
"""Verifies that exception raised if there are fewer
wires than features."""
features = [0, 0, 0, 0]
n_wires = 1
weights = np.zeros(shape=(1, 2 * n_wires))
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev)
def circuit(x=None):
qml.templates.QAOAEmbedding(features=x, weights=weights, wires=range(n_wires))
return [qml.expval(qml.PauliZ(i)) for i in range(n_wires)]
with pytest.raises(ValueError, match="Features must be of "):
circuit(x=features)
def test_exception_wrong_feature_shape(self):
"""Verifies that exception is raised if the shape of features is incorrect."""
n_wires = 1
weights = np.zeros(shape=(1, 1))
features = np.zeros(shape=(2, 1))
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev)
def circuit():
qml.templates.QAOAEmbedding(features, weights, wires=range(n_wires))
return [qml.expval(qml.PauliZ(i)) for i in range(n_wires)]
with pytest.raises(ValueError, match="Features must be a one-dimensional"):
circuit()
@pytest.mark.parametrize(
"weights, n_wires",
[(np.zeros(shape=(1, 2)), 1), (np.zeros(shape=(1, 4)), 2), (np.zeros(shape=(1, 3)), 3)],
)
def test_exception_wrong_weight_shape(self, weights, n_wires):
"""Verifies that exception is raised if the shape of weights is incorrect."""
features = np.zeros(shape=(n_wires,))
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev)
def circuit():
qml.templates.QAOAEmbedding(features, weights, wires=range(n_wires))
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match="Weights tensor must be of shape"):
circuit()
@pytest.mark.parametrize(
"n_layers, n_wires, expected_shape",
[
(2, 3, (2, 6)),
(2, 1, (2, 1)),
(2, 2, (2, 3)),
],
)
def test_shape(self, n_layers, n_wires, expected_shape):
"""Test that the shape method returns the correct shape of the weights tensor"""
shape = qml.templates.QAOAEmbedding.shape(n_layers, n_wires)
assert shape == expected_shape
def test_id(self):
"""Tests that the id attribute can be set."""
template = qml.templates.QAOAEmbedding(
np.array([0]), weights=np.array([[0]]), wires=[0], id="a"
)
assert template.id == "a"
def circuit_template(features, weights):
qml.templates.QAOAEmbedding(features, weights, range(2))
return qml.expval(qml.PauliZ(0))
def circuit_decomposed(features, weights):
qml.RX(features[0], wires=0)
qml.RX(features[1], wires=1)
qml.MultiRZ(weights[0, 0], wires=[0, 1])
qml.RY(weights[0, 1], wires=0)
qml.RY(weights[0, 2], wires=1)
qml.RX(features[0], wires=0)
qml.RX(features[1], wires=1)
return qml.expval(qml.PauliZ(0))
class TestInterfaces:
"""Tests that the template is compatible with all interfaces, including the computation
of gradients."""
def test_list_and_tuples(self, tol):
"""Tests common iterables as inputs."""
features = [0.1, -1.3]
weights = [[0.1, -1.1, 0.2]]
dev = qml.device("default.qubit", wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(features, weights)
res2 = circuit2(features, weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
res = circuit(tuple(features), tuple(weights))
res2 = circuit2(tuple(features), tuple(weights))
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
def test_autograd(self, tol):
"""Tests the autograd interface."""
features = np.random.random(size=(2,))
features = pnp.array(features, requires_grad=True)
weights = np.random.random(size=(1, 3))
weights = pnp.array(weights, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(features, weights)
res2 = circuit2(features, weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
grad_fn = qml.grad(circuit)
grads = grad_fn(features, weights)
grad_fn2 = qml.grad(circuit2)
grads2 = grad_fn2(features, weights)
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)
def test_jax(self, tol):
"""Tests the jax interface."""
jax = pytest.importorskip("jax")
import jax.numpy as jnp
features = jnp.array(np.random.random(size=(2,)))
weights = jnp.array(np.random.random(size=(1, 3)))
dev = qml.device("default.qubit", wires=2)
circuit = qml.QNode(circuit_template, dev, interface="jax")
circuit2 = qml.QNode(circuit_decomposed, dev, interface="jax")
res = circuit(features, weights)
res2 = circuit2(features, weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
grad_fn = jax.grad(circuit)
grads = grad_fn(features, weights)
grad_fn2 = jax.grad(circuit2)
grads2 = grad_fn2(features, weights)
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)
def test_tf(self, tol):
"""Tests the tf interface."""
tf = pytest.importorskip("tensorflow")
features = tf.Variable(np.random.random(size=(2,)))
weights = tf.Variable(np.random.random(size=(1, 3)))
dev = qml.device("default.qubit", wires=2)
circuit = qml.QNode(circuit_template, dev, interface="tf")
circuit2 = qml.QNode(circuit_decomposed, dev, interface="tf")
res = circuit(features, weights)
res2 = circuit2(features, weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
with tf.GradientTape() as tape:
res = circuit(features, weights)
grads = tape.gradient(res, [features, weights])
with tf.GradientTape() as tape2:
res2 = circuit2(features, weights)
grads2 = tape2.gradient(res2, [features, weights])
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)
def test_torch(self, tol):
"""Tests the torch interface."""
torch = pytest.importorskip("torch")
features = torch.tensor(np.random.random(size=(2,)), requires_grad=True)
weights = torch.tensor(np.random.random(size=(1, 3)), requires_grad=True)
dev = qml.device("default.qubit", wires=2)
circuit = qml.QNode(circuit_template, dev, interface="torch")
circuit2 = qml.QNode(circuit_decomposed, dev, interface="torch")
res = circuit(features, weights)
res2 = circuit2(features, weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
res = circuit(features, weights)
res.backward()
grads = [features.grad, weights.grad]
res2 = circuit2(features, weights)
res2.backward()
grads2 = [features.grad, weights.grad]
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)
| 34.598997 | 100 | 0.601087 |
c154e2d220dde63638adee2f375c5e018fccff46 | 1,224 | py | Python | pyfastq_reader/__init__.py | ahcm/pyfastq_reader | d65ff88f17b7515587909407fca620ad50a36fe5 | [
"MIT"
] | 1 | 2019-01-28T09:31:41.000Z | 2019-01-28T09:31:41.000Z | pyfastq_reader/__init__.py | ahcm/pyfastq_reader | d65ff88f17b7515587909407fca620ad50a36fe5 | [
"MIT"
] | null | null | null | pyfastq_reader/__init__.py | ahcm/pyfastq_reader | d65ff88f17b7515587909407fca620ad50a36fe5 | [
"MIT"
] | 1 | 2020-04-11T04:58:27.000Z | 2020-04-11T04:58:27.000Z | #!/usr/bin/env python3
# MIT License see LICENSE
# -- Andy Hauser <Andreas.Hauser@LMU.de>
from __future__ import print_function
import sys
def fasta_reader(filename):
return fasta_reader_fh(open(filename, 'r'))
def fasta_reader_fh(infile):
name = infile.readline().rstrip()
while True:
seq = ""
for s in infile:
if len(s) > 0 and s[0] == '>':
yield name,seq
name = s.rstrip()
break
else:
seq += s.rstrip()
else:
yield name, seq
return
def fastq_reader(filename):
return fastq_reader_fh(open(filename, 'r'))
def fastq_reader_fh(infile):
name = infile.readline().rstrip()
while True:
seq = ""
for s in infile:
if s[0] == '+':
commentp = s.rstrip()
break
else:
seq += s.rstrip()
qual = ""
for q in infile:
if len(qual) > 0 and q[0] == '@':
yield name, seq, qual
name = q.rstrip()
break
else:
qual += q.rstrip()
else:
yield name, seq, qual
return
def main():
for filename in sys.argv[1:]:
count = 0
for head, seq, qual in fastq_reader(filename):
count += 1
print(count)
if __name__ == '__main__':
main()
| 19.741935 | 50 | 0.564542 |
0f29cd8b96bf5e1b9eebe2569ef7cd27216418f4 | 904 | py | Python | tests/asyncio/test_worker.py | grignards/sqlalchemy_aio | cbcc9e4503cd0183fa6d12db9ea812544867e1c3 | [
"MIT"
] | 321 | 2016-10-04T12:58:42.000Z | 2022-01-19T13:47:53.000Z | tests/asyncio/test_worker.py | grignards/sqlalchemy_aio | cbcc9e4503cd0183fa6d12db9ea812544867e1c3 | [
"MIT"
] | 42 | 2017-07-10T17:41:51.000Z | 2022-03-16T08:59:56.000Z | tests/asyncio/test_worker.py | grignards/sqlalchemy_aio | cbcc9e4503cd0183fa6d12db9ea812544867e1c3 | [
"MIT"
] | 25 | 2016-10-10T08:45:31.000Z | 2021-08-04T05:44:37.000Z | import asyncio
import pytest
from sqlalchemy_aio import AlreadyQuit
from sqlalchemy_aio.asyncio import AsyncioThreadWorker
@pytest.mark.asyncio
async def test_already_quit():
worker = AsyncioThreadWorker()
await worker.quit()
with pytest.raises(AlreadyQuit):
await worker.run(lambda: None)
with pytest.raises(AlreadyQuit):
await worker.quit()
@pytest.mark.asyncio
async def test_interrupted_run():
worker = AsyncioThreadWorker()
loop = asyncio.get_event_loop()
event = asyncio.Event()
async def set_event():
event.set()
def returns_number(number):
asyncio.run_coroutine_threadsafe(set_event(), loop)
return number
task = asyncio.ensure_future(worker.run(returns_number, [2]))
await event.wait()
task.cancel()
value = await worker.run(returns_number, [3])
assert 3 == value
await worker.quit()
| 22.6 | 65 | 0.701327 |
13a037a5dcb50998cf07b29f3cb8f576ac9507f5 | 1,088 | py | Python | setup.py | oshaughnessy/cdk-sam-lambda-rest | f499793a59618d0fb6df826ceb0dcebb4ac99db9 | [
"MIT"
] | 1 | 2021-08-13T15:52:49.000Z | 2021-08-13T15:52:49.000Z | setup.py | oshaughnessy/cdk-sam-lambda-rest | f499793a59618d0fb6df826ceb0dcebb4ac99db9 | [
"MIT"
] | null | null | null | setup.py | oshaughnessy/cdk-sam-lambda-rest | f499793a59618d0fb6df826ceb0dcebb4ac99db9 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="cdk_sam_lambda_rest",
version="0.0.1",
description="An empty CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "cdklib"},
packages=setuptools.find_packages(where="cdklib"),
install_requires=[
"aws-cdk.core>=1.111",
"aws-cdk.aws-apigatewayv2",
"aws-cdk.aws-apigatewayv2-integrations",
"aws-cdk.aws-lambda"
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
| 23.148936 | 59 | 0.604779 |
2afc89eedac655f72de7cdc4badf3469a55e3218 | 23 | py | Python | gfapy/line/group/path/__init__.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 44 | 2017-03-18T08:08:04.000Z | 2021-11-10T16:11:15.000Z | gfapy/line/group/path/__init__.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 22 | 2017-04-04T21:20:31.000Z | 2022-03-09T19:05:30.000Z | gfapy/line/group/path/__init__.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 5 | 2017-07-07T02:56:56.000Z | 2020-09-30T20:10:49.000Z | from .path import Path
| 11.5 | 22 | 0.782609 |
726c25aeb160808d2931dc704201428b040afac6 | 17,463 | py | Python | python/ray/tune/tests/test_cluster.py | mikewlange/ray | 48fdebf9569bef6f1259c100c719efad696b78e3 | [
"Apache-2.0"
] | 1 | 2019-12-09T03:10:57.000Z | 2019-12-09T03:10:57.000Z | python/ray/tune/tests/test_cluster.py | anhuaxiang/ray | 1a9948eef940e702aad07dac4d89db0113a7e3d4 | [
"Apache-2.0"
] | null | null | null | python/ray/tune/tests/test_cluster.py | anhuaxiang/ray | 1a9948eef940e702aad07dac4d89db0113a7e3d4 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import json
import time
import os
import pytest
import shutil
import sys
import ray
from ray import tune
from ray.rllib import _register_all
from ray.cluster_utils import Cluster
from ray.test_utils import run_string_as_driver_nonblocking
from ray.tune.error import TuneError
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.experiment import Experiment
from ray.tune.trial import Trial
from ray.tune.resources import Resources
from ray.tune.trial_runner import TrialRunner
from ray.tune.suggest import BasicVariantGenerator
if sys.version_info >= (3, 3):
from unittest.mock import MagicMock
else:
from mock import MagicMock
def _start_new_cluster():
cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 1,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 10
})
})
# Pytest doesn't play nicely with imports
_register_all()
return cluster
@pytest.fixture
def start_connected_cluster():
# Start the Ray processes.
cluster = _start_new_cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
@pytest.fixture
def start_connected_emptyhead_cluster():
"""Starts head with no resources."""
cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 0,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 10
})
})
# Pytest doesn't play nicely with imports
_register_all()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_counting_resources(start_connected_cluster):
"""Tests that Tune accounting is consistent with actual cluster."""
cluster = start_connected_cluster
nodes = []
assert ray.cluster_resources()["CPU"] == 1
runner = TrialRunner(BasicVariantGenerator())
kwargs = {"stopping_criterion": {"training_iteration": 10}}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step() # run 1
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 2
cluster.remove_node(nodes.pop())
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
runner.step() # run 2
assert sum(t.status == Trial.RUNNING for t in runner.get_trials()) == 1
for i in range(5):
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 6
runner.step() # 1 result
assert sum(t.status == Trial.RUNNING for t in runner.get_trials()) == 2
def test_trial_processed_after_node_failure(start_connected_emptyhead_cluster):
"""Tests that Tune processes a trial as failed if its node died."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
mock_process_failure = MagicMock(side_effect=runner._process_trial_failure)
runner._process_trial_failure = mock_process_failure
runner.add_trial(Trial("__fake"))
runner.step()
runner.step()
assert not mock_process_failure.called
cluster.remove_node(node)
runner.step()
assert mock_process_failure.called
def test_remove_node_before_result(start_connected_emptyhead_cluster):
"""Tune continues when node is removed before trial returns."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"checkpoint_freq": 2,
"max_failures": 2
}
trial = Trial("__fake", **kwargs)
runner.add_trial(trial)
runner.step() # run 1
assert trial.status == Trial.RUNNING
cluster.remove_node(node)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
for i in range(3):
runner.step()
assert trial.status == Trial.TERMINATED
with pytest.raises(TuneError):
runner.step()
def test_queue_trials(start_connected_emptyhead_cluster):
"""Tests explicit oversubscription for autoscaling.
Tune oversubscribes a trial when `queue_trials=True`, but
does not block other trials from running.
"""
cluster = start_connected_emptyhead_cluster
runner = TrialRunner()
def create_trial(cpu, gpu=0):
kwargs = {
"resources": Resources(cpu=cpu, gpu=gpu),
"stopping_criterion": {
"training_iteration": 3
}
}
return Trial("__fake", **kwargs)
runner.add_trial(create_trial(cpu=1))
with pytest.raises(TuneError):
runner.step() # run 1
del runner
executor = RayTrialExecutor(queue_trials=True)
runner = TrialRunner(trial_executor=executor)
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
cpu_only = create_trial(cpu=1)
runner.add_trial(cpu_only)
runner.step() # add cpu_only trial
gpu_trial = create_trial(cpu=1, gpu=1)
runner.add_trial(gpu_trial)
runner.step() # queue gpu_trial
# This tests that the cpu_only trial should bypass the queued trial.
for i in range(3):
runner.step()
assert cpu_only.status == Trial.TERMINATED
assert gpu_trial.status == Trial.RUNNING
# Scale up
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.wait_for_nodes()
for i in range(3):
runner.step()
assert gpu_trial.status == Trial.TERMINATED
def test_trial_migration(start_connected_emptyhead_cluster):
"""Removing a node while cluster has space should migrate trial.
The trial state should also be consistent with the checkpoint.
"""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"checkpoint_freq": 2,
"max_failures": 2
}
# Test recovery of trial that hasn't been checkpointed
t = Trial("__fake", **kwargs)
runner.add_trial(t)
runner.step() # start
runner.step() # 1 result
assert t.last_result
node2 = cluster.add_node(num_cpus=1)
cluster.remove_node(node)
cluster.wait_for_nodes()
runner.step() # Recovery step
# TODO(rliaw): This assertion is not critical but will not pass
# because checkpoint handling is messy and should be refactored
# rather than hotfixed.
# assert t.last_result is None, "Trial result not restored correctly."
for i in range(3):
runner.step()
assert t.status == Trial.TERMINATED
# Test recovery of trial that has been checkpointed
t2 = Trial("__fake", **kwargs)
runner.add_trial(t2)
runner.step() # start
runner.step() # 1 result
runner.step() # 2 result and checkpoint
assert t2.has_checkpoint()
node3 = cluster.add_node(num_cpus=1)
cluster.remove_node(node2)
cluster.wait_for_nodes()
runner.step() # Recovery step
assert t2.last_result["training_iteration"] == 2
for i in range(1):
runner.step()
assert t2.status == Trial.TERMINATED
# Test recovery of trial that won't be checkpointed
t3 = Trial("__fake", **{"stopping_criterion": {"training_iteration": 3}})
runner.add_trial(t3)
runner.step() # start
runner.step() # 1 result
cluster.add_node(num_cpus=1)
cluster.remove_node(node3)
cluster.wait_for_nodes()
runner.step() # Error handling step
assert t3.status == Trial.ERROR
with pytest.raises(TuneError):
runner.step()
def test_trial_requeue(start_connected_emptyhead_cluster):
"""Removing a node in full cluster causes Trial to be requeued."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"checkpoint_freq": 1,
"max_failures": 1
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step() # start
runner.step() # 1 result
cluster.remove_node(node)
cluster.wait_for_nodes()
runner.step()
assert all(t.status == Trial.PENDING for t in trials)
with pytest.raises(TuneError):
runner.step()
def test_migration_checkpoint_removal(start_connected_emptyhead_cluster):
"""Test checks that trial restarts if checkpoint is lost w/ node fail."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"checkpoint_freq": 2,
"max_failures": 2
}
# Test recovery of trial that has been checkpointed
t1 = Trial("__fake", **kwargs)
runner.add_trial(t1)
runner.step() # start
runner.step() # 1 result
runner.step() # 2 result and checkpoint
assert t1.has_checkpoint()
cluster.add_node(num_cpus=1)
cluster.remove_node(node)
cluster.wait_for_nodes()
shutil.rmtree(os.path.dirname(t1.checkpoint.value))
runner.step() # Recovery step
for i in range(3):
runner.step()
assert t1.status == Trial.TERMINATED
def test_cluster_down_simple(start_connected_cluster, tmpdir):
"""Tests that TrialRunner save/restore works on cluster shutdown."""
cluster = start_connected_cluster
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
dirpath = str(tmpdir)
runner = TrialRunner(local_checkpoint_dir=dirpath, checkpoint_period=0)
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"checkpoint_freq": 1,
"max_failures": 1
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step() # start
runner.step() # start2
runner.step() # step
assert all(t.status == Trial.RUNNING for t in runner.get_trials())
runner.checkpoint()
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
runner = TrialRunner(resume="LOCAL", local_checkpoint_dir=dirpath)
runner.step() # start
runner.step() # start2
for i in range(3):
runner.step()
with pytest.raises(TuneError):
runner.step()
assert all(t.status == Trial.TERMINATED for t in runner.get_trials())
ray.shutdown()
cluster.shutdown()
def test_cluster_down_full(start_connected_cluster, tmpdir):
"""Tests that run_experiment restoring works on cluster shutdown."""
cluster = start_connected_cluster
dirpath = str(tmpdir)
exp1_args = dict(
run="__fake",
stop=dict(training_iteration=3),
local_dir=dirpath,
checkpoint_freq=1)
exp2_args = dict(run="__fake", stop=dict(training_iteration=3))
exp3_args = dict(
run="__fake",
stop=dict(training_iteration=3),
config=dict(mock_error=True))
exp4_args = dict(
run="__fake",
stop=dict(training_iteration=3),
config=dict(mock_error=True),
checkpoint_freq=1)
all_experiments = {
"exp1": exp1_args,
"exp2": exp2_args,
"exp3": exp3_args,
"exp4": exp4_args
}
tune.run_experiments(all_experiments, raise_on_failed_trial=False)
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
trials = tune.run_experiments(
all_experiments, resume=True, raise_on_failed_trial=False)
assert len(trials) == 4
assert all(t.status in [Trial.TERMINATED, Trial.ERROR] for t in trials)
ray.shutdown()
cluster.shutdown()
@pytest.mark.skip(reason="Not very consistent.")
def test_cluster_rllib_restore(start_connected_cluster, tmpdir):
cluster = start_connected_cluster
dirpath = str(tmpdir)
script = """
import time
import ray
from ray import tune
ray.init(address="{address}")
tune.run(
"PG",
name="experiment",
config=dict(env="CartPole-v1"),
stop=dict(training_iteration=10),
local_dir="{checkpoint_dir}",
checkpoint_freq=1,
max_failures=1,
dict(experiment=kwargs),
raise_on_failed_trial=False)
""".format(
address=cluster.address, checkpoint_dir=dirpath)
run_string_as_driver_nonblocking(script)
# Wait until the right checkpoint is saved.
# The trainable returns every 0.5 seconds, so this should not miss
# the checkpoint.
local_checkpoint_dir = os.path.join(dirpath, "experiment")
for i in range(100):
if TrialRunner.checkpoint_exists(local_checkpoint_dir):
# Inspect the internal trialrunner
runner = TrialRunner(
resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir)
trials = runner.get_trials()
last_res = trials[0].last_result
if last_res and last_res.get("training_iteration"):
break
time.sleep(0.3)
if not TrialRunner.checkpoint_exists(local_checkpoint_dir):
raise RuntimeError("Checkpoint file didn't appear.")
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
cluster.wait_for_nodes()
# Restore properly from checkpoint
trials2 = tune.run_experiments(
{
"experiment": {
"run": "PG",
"checkpoint_freq": 1,
"local_dir": dirpath
}
},
resume=True)
assert all(t.status == Trial.TERMINATED for t in trials2)
ray.shutdown()
cluster.shutdown()
def test_cluster_interrupt(start_connected_cluster, tmpdir):
"""Tests run_experiment on cluster shutdown with actual interrupt.
This is an end-to-end test.
"""
cluster = start_connected_cluster
dirpath = str(tmpdir)
# Needs to be in scope for pytest
class _Mock(tune.Trainable):
"""Finishes on the 4th iteration."""
def _setup(self, config):
self.state = {"hi": 0}
def _train(self):
self.state["hi"] += 1
time.sleep(0.5)
return {"done": self.state["hi"] >= 4}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
# Removes indent from class.
reformatted = "\n".join(line[4:] if len(line) else line
for line in inspect.getsource(_Mock).split("\n"))
script = """
import time
import ray
from ray import tune
ray.init(address="{address}")
{fail_class_code}
tune.run(
{fail_class},
name="experiment",
stop=dict(training_iteration=5),
local_dir="{checkpoint_dir}",
checkpoint_freq=1,
global_checkpoint_period=0,
max_failures=1,
raise_on_failed_trial=False)
""".format(
address=cluster.address,
checkpoint_dir=dirpath,
fail_class_code=reformatted,
fail_class=_Mock.__name__)
run_string_as_driver_nonblocking(script)
# Wait until the right checkpoint is saved.
# The trainable returns every 0.5 seconds, so this should not miss
# the checkpoint.
local_checkpoint_dir = os.path.join(dirpath, "experiment")
for i in range(50):
if TrialRunner.checkpoint_exists(local_checkpoint_dir):
# Inspect the internal trialrunner
runner = TrialRunner(
resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir)
trials = runner.get_trials()
last_res = trials[0].last_result
if last_res and last_res.get("training_iteration") == 3:
break
time.sleep(0.2)
if not TrialRunner.checkpoint_exists(local_checkpoint_dir):
raise RuntimeError("Checkpoint file didn't appear.")
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
Experiment.register_if_needed(_Mock)
# Inspect the internal trialrunner
runner = TrialRunner(
resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir)
trials = runner.get_trials()
assert trials[0].last_result["training_iteration"] == 3
assert trials[0].status == Trial.PENDING
# Restore properly from checkpoint
trials2 = tune.run_experiments(
{
"experiment": {
"run": _Mock,
"local_dir": dirpath,
"checkpoint_freq": 1
}
},
resume=True,
raise_on_failed_trial=False)
assert all(t.status == Trial.TERMINATED for t in trials2)
assert {t.trial_id for t in trials2} == {t.trial_id for t in trials}
ray.shutdown()
cluster.shutdown()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 29.008306 | 79 | 0.660826 |
2fba2a36d7b0c4093f5cabc19f0bcee33b09742e | 59,693 | py | Python | test/test_support.py | stefanor/pyexpat-cffi | b8d3353081d62b5cb7b97643d30eb39496b059b3 | [
"PSF-2.0"
] | null | null | null | test/test_support.py | stefanor/pyexpat-cffi | b8d3353081d62b5cb7b97643d30eb39496b059b3 | [
"PSF-2.0"
] | null | null | null | test/test_support.py | stefanor/pyexpat-cffi | b8d3353081d62b5cb7b97643d30eb39496b059b3 | [
"PSF-2.0"
] | null | null | null | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError('test_support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import UserDict
import re
import time
import struct
import sysconfig
try:
import thread
except ImportError:
thread = None
__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
"verbose", "use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "requires_mac_ver",
"find_unused_port", "bind_port",
"fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
"SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
"open_urlresource", "check_warnings", "check_py3k_warnings",
"CleanImport", "EnvironmentVarGuard", "captured_output",
"captured_stdout", "TransientResource", "transient_internet",
"run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_threads", "start_threads", "cpython_only",
"check_impl_detail", "get_attribute", "py3k_bytes",
"import_fresh_module", "threading_cleanup", "reap_children",
"strip_python_stderr", "IPV6_ENABLED", "run_with_tz"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not been enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError, msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json, and test_warnings include extra sanity
# checks to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("module %s has no attribute %s" % (
obj.__name__, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from Tkinter import Tk
root = Tk()
root.withdraw()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
# Don't use "localhost", since resolving it uses the DNS under recent
# Windows versions (see issue #18792).
HOST = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it."""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except EnvironmentError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if isinstance(x, float) or isinstance(y, float):
try:
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and isinstance(x, (tuple, list)):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
is_jython = sys.platform.startswith('java')
try:
unicode
have_unicode = True
except NameError:
have_unicode = False
requires_unicode = unittest.skipUnless(have_unicode, 'no unicode support')
def u(s):
return unicode(s, 'unicode-escape')
# FS_NONASCII: non-ASCII Unicode character encodable by
# sys.getfilesystemencoding(), or None if there is no such character.
FS_NONASCII = None
if have_unicode:
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
unichr(0x00E6),
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
unichr(0x0130),
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
unichr(0x0141),
# U+03C6 (Greek Small Letter Phi): cp1253
unichr(0x03C6),
# U+041A (Cyrillic Capital Letter Ka): cp1251
unichr(0x041A),
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
unichr(0x05D0),
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
unichr(0x060C),
# U+062A (Arabic Letter Teh): cp720
unichr(0x062A),
# U+0E01 (Thai Character Ko Kai): cp874
unichr(0x0E01),
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
unichr(0x00A0),
# U+20AC (Euro Sign)
unichr(0x20AC),
):
try:
character.encode(sys.getfilesystemencoding())\
.decode(sys.getfilesystemencoding())
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNENCODABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNENCODABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNENCODABLE
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change CWD to: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that creates a temporary directory and set it as CWD.
The new CWD is created in the current directory and it's named *name*.
If *quiet* is False (default) and it's not possible to create or change
the CWD, an error is raised. If it's True, only a warning is raised
and the original CWD is used.
"""
if (have_unicode and isinstance(name, unicode) and
not os.path.supports_unicode_filenames):
try:
name = name.encode(sys.getfilesystemencoding() or 'ascii')
except UnicodeEncodeError:
if not quiet:
raise unittest.SkipTest('unable to encode the cwd name with '
'the filesystem encoding.')
saved_dir = os.getcwd()
is_temporary = False
try:
os.mkdir(name)
os.chdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + name,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
def findfile(file, here=__file__, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, check=None):
import urlparse, urllib2
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
f = urllib2.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource "%s"' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = [warning.message for warning in w]
missing = []
for msg, cat in filters:
seen = False
for exc in reraise[:]:
message = str(exc)
# Filter out the matching messages
if (re.match(msg, message, re.I) and
issubclass(exc.__class__, cat)):
seen = True
reraise.remove(exc)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %r" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_py3k_warnings(*filters, **kwargs):
"""Context manager to silence py3k warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default False)
Without argument, it defaults to:
check_py3k_warnings(("", DeprecationWarning), quiet=False)
"""
if sys.py3kwarning:
if not filters:
filters = (("", DeprecationWarning),)
else:
# It should not raise any py3k warning
filters = ()
return _filterwarnings(filters, kwargs.get('quiet'))
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(UserDict.DictMixin):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.iteritems():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
@contextlib.contextmanager
def transient_internet(resource_name, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
# implementation actually returns WSANO_DATA i.e. 11004.
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource '%s' is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout and captured_stdin
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print "hello"
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
_header = '2P'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_vheader = _header + 'P'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + '0P')
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + '0P')
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independent of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = guards.values()[0]
assert guards.values() == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose:
err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
sys.platform == 'win32' or
sysconfig.get_config_var('WITH_DOC_STRINGS'))
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if thread:
return thread._count(),
else:
return 1,
def threading_cleanup(nb_threads):
if not thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
n = thread._count()
if n == nb_threads:
break
time.sleep(0.1)
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def start_threads(threads, unlock=None):
threads = list(threads)
started = []
try:
try:
for t in threads:
t.start()
started.append(t)
except:
if verbose:
print("Can't start %d threads, only %d threads started" %
(len(threads), len(started)))
raise
yield
finally:
if unlock:
unlock()
endtime = starttime = time.time()
for timeout in range(1, 16):
endtime += 60
for t in started:
t.join(max(endtime - time.time(), 0.01))
started = [t for t in started if t.isAlive()]
if not started:
break
if verbose:
print('Unable to join %d threads during a period of '
'%d minutes' % (len(started), timeout))
started = [t for t in started if t.isAlive()]
if started:
raise AssertionError('Unable to join %d threads' % len(started))
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
def py3k_bytes(b):
"""Emulate the py3k bytes() constructor.
NOTE: This is only a best effort function.
"""
try:
# memoryview?
return b.tobytes()
except AttributeError:
try:
# iterable of ints?
return b"".join(chr(x) for x in b)
except TypeError:
return bytes(b)
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags."""
import subprocess
return subprocess._args_from_interpreter_flags()
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
return stderr
def check_free_after_iterating(test, iter, cls, args=()):
class A(cls):
def __del__(self):
done[0] = True
try:
next(it)
except StopIteration:
pass
done = [False]
it = iter(A(*args))
# Issue 26494: Shouldn't crash
test.assertRaises(StopIteration, next, it)
# The sequence should be deallocated just after the end of iterating
gc_collect()
test.assertTrue(done[0])
| 35.51041 | 95 | 0.616957 |
27c1e4e53b6c905f94952661db3d688f6885bdfb | 4,909 | py | Python | applications/timeflow/pages/epic_areas.py | dyvenia/timeflow | 7852872f591fbdb88be19f7b41ebb226e805fbd9 | [
"MIT"
] | 3 | 2022-02-24T10:51:22.000Z | 2022-03-27T08:54:35.000Z | applications/timeflow/pages/epic_areas.py | dyvenia/timeflow | 7852872f591fbdb88be19f7b41ebb226e805fbd9 | [
"MIT"
] | 62 | 2022-02-15T09:52:52.000Z | 2022-03-31T12:50:13.000Z | applications/timeflow/pages/epic_areas.py | dyvenia/timeflow | 7852872f591fbdb88be19f7b41ebb226e805fbd9 | [
"MIT"
] | 4 | 2022-03-02T15:24:03.000Z | 2022-03-30T10:59:38.000Z | from idom import html, use_state, component, event
from uiflow.components.controls import (
activation_button,
deactivation_button,
submit_button,
Button,
)
from uiflow.components.input import Input, Selector2
from uiflow.components.layout import Row, Column, Container
from uiflow.components.table import SimpleTable
from ..data.epics import epics_names
from ..data.epic_areas import (
epic_area_activation,
epic_area_deactivation,
get_active_epic_area_rows,
post_epic_area,
epic_areas_names,
)
from .utils import switch_state
@component
def page():
epic_id, set_epic_id = use_state("")
name, set_name = use_state("")
is_event, set_is_event = use_state(True)
_, set_deact_name = use_state("")
_, set_activ_name = use_state("")
return html.div(
{"class": "w-full"},
Row(
Container(
create_epic_area_form(
epic_id,
set_epic_id,
name,
set_name,
is_event,
set_is_event,
)
),
bg="bg-filter-block-bg",
),
Container(
Column(
Row(list_epic_areas(is_event)),
),
Row(deactivate_epic_area(is_event, set_is_event)),
Row(activate_epic_area(is_event, set_is_event)),
),
)
@component
def create_epic_area_form(epic_id, set_epic_id, name, set_name, is_event, set_is_event):
"""
Create a form that allows admin to add a new epic area.
post endpoint: /api/epic_areas
schema: {
"epic_id": "int",
"name": "string,
"is_active": True
"created_at": "2022-02-17T15:31:39.103Z",
"updated_at": "2022-02-17T15:31:39.103Z"
}
"""
@event(prevent_default=True)
async def handle_submit(event):
"""Call a post request for the given epic area when given event is triggered."""
post_epic_area(epic_id, name)
# Change the states
switch_state(is_event, set_is_event)
# Create dropdown of active epics which can then be selected
selector_epic_id = Selector2(
set_value=set_epic_id,
data=epics_names(is_active=True),
width="48%",
md_width="48%",
)
# Create input field for the name of the epic area
inp_name = Input(set_value=set_name, label="name", width="[48%]")
# Create submit button
btn = submit_button(handle_submit, epic_id, name)
return html.div(
{"class": "bg-filter-block-bg py-4 text-sm"},
Column(
Row(selector_epic_id, inp_name, justify="justify-between"),
Row(btn),
),
)
@component
def list_epic_areas(is_event):
"""
Return rows consisting of each epic area along with its epic.
Obtain a json response from a get request to the active epic areas endpoint.
Store in rows the names of the epic and epic area, along with the id.
Return an HTML div that contains the rows in a table.
"""
rows = get_active_epic_area_rows()
return html.div({"class": "flex w-full"}, SimpleTable(rows=rows))
@component
def deactivate_epic_area(is_event, set_is_event):
"""Deactivate an epic area without deleting it."""
name_to_deact, set_name_to_deact = use_state("")
def handle_deactivation(event):
"""Set the given epic area's active column to False."""
epic_area_deactivation(name_to_deact)
switch_state(is_event, set_is_event)
# Create input field for id of epic area to be deactivated
selector_deact_name = Selector2(
set_name_to_deact,
data=epic_areas_names(is_active=True, label="epic area to be deactivated"),
width="96%",
md_width="96%",
)
# Create the deactivation button
is_disabled = True
if name_to_deact != "":
is_disabled = False
btn = Button(is_disabled, handle_submit=handle_deactivation, label="Deactivate")
return Column(Row(selector_deact_name), Row(btn))
@component
def activate_epic_area(is_event, set_is_event):
"""Activate an epic area."""
name_to_activ, set_name_to_activ = use_state("")
def handle_activation(event):
"""Set the given epic area's active column to True."""
epic_area_activation(name_to_activ)
switch_state(is_event, set_is_event)
# Create input field for name of epic area to be activated
selector_act_name = Selector2(
set_name_to_activ,
data=epic_areas_names(is_active=False, label="epic area to be activated"),
width="96%",
md_width="96%",
)
# Create the activation button
is_disabled = True
if name_to_activ != "":
is_disabled = False
btn = Button(is_disabled, handle_submit=handle_activation, label="Activate")
return Column(Row(selector_act_name), Row(btn))
| 29.572289 | 88 | 0.642086 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.