hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794b756382e3cdaf08185845317edc4752c798c0
| 2,894
|
py
|
Python
|
vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics statistics reporter."""
import itertools
from collections import OrderedDict
from matplotlib import pyplot as plt
from typing import Dict, List, Tuple, Optional, Any
from modnas.registry.callback import register
from modnas.callback.base import CallbackBase
from modnas.estim.base import EstimBase
from modnas.optim.base import OptimBase
from vega.common import FileOps
plt.switch_backend('Agg')
@register
class MetricsStatsReporter(CallbackBase):
"""Metrics statistics reporter class."""
def __init__(self, axis_list: List[Tuple[int, int]] = None) -> None:
super().__init__({
'after:EstimBase.step_done': self.on_step_done,
'after:EstimBase.run': self.save_stats,
})
self.results = []
self.axis_list = axis_list
def on_step_done(
self, ret: Dict[str, bool], estim: EstimBase, params: Optional[OrderedDict],
value: Dict[str, float], arch_desc: Optional[Any] = None
) -> None:
"""Record Estimator evaluation result on each step."""
self.results.append((params, value))
def save_stats(self, ret: Dict[str, Any], estim: EstimBase, optim: OptimBase) -> Dict[str, Any]:
"""Save statistics on search end."""
results = self.results
if not results:
return
axis_list = self.axis_list
if axis_list is None:
metrics = list(results[0][1].keys())
axis_list = list(itertools.combinations(metrics, r=2))
self.logger.info('metrics stats: {} axis: {}'.format(len(results), axis_list))
for i, axis in enumerate(axis_list):
plt.figure(i)
axis_str = '-'.join(axis)
plt.title('metrics: {}'.format(axis_str))
values = [[res[1][ax] for res in results] for ax in axis]
plt.scatter(values[0], values[1])
plt.xlabel(axis[0])
plt.ylabel(axis[1])
plt.savefig(estim.expman.join('plot', 'metrics_{}.png'.format(axis_str)))
result_path = estim.expman.join('output', 'metrics_results.pkl')
FileOps.dump_pickle(results, result_path)
self.logger.info('metrics results saved to {}'.format(result_path))
self.results = []
return ret
| 39.108108
| 100
| 0.662405
|
794b772c435a8d005001d784d0c1ed3078a9f98c
| 1,218
|
py
|
Python
|
drf_nested_forms/parsers.py
|
emperorDuke/nested_formdata
|
f6850b5a3836dbc4af9a7d1104981f2198cc1a30
|
[
"MIT"
] | 12
|
2020-03-30T05:41:37.000Z
|
2022-02-08T17:52:14.000Z
|
drf_nested_forms/parsers.py
|
emperorDuke/nested_formdata
|
f6850b5a3836dbc4af9a7d1104981f2198cc1a30
|
[
"MIT"
] | 6
|
2020-03-28T21:39:49.000Z
|
2021-11-24T21:37:27.000Z
|
drf_nested_forms/parsers.py
|
emperorDuke/nested_formdata
|
f6850b5a3836dbc4af9a7d1104981f2198cc1a30
|
[
"MIT"
] | 2
|
2021-05-14T17:36:42.000Z
|
2021-06-06T19:33:04.000Z
|
from rest_framework.parsers import MultiPartParser, JSONParser
from .utils import NestedForm
from .settings import api_settings
class NestedMultiPartParser(MultiPartParser):
"""
Parser for multipart form data that is nested and also
it may include files
"""
options = api_settings.OPTIONS
def parse(self, stream, media_type=None, parser_context=None):
parsed = super().parse(stream, media_type, parser_context)
# files and data have to be merged into one
if parsed.files:
self._full_data = parsed.data.copy()
self._full_data.update(parsed.files)
else:
self._full_data = parsed.data
form = NestedForm(self._full_data, **self.options)
if form.is_nested():
return form.data
return parsed
class NestedJSONParser(JSONParser):
"""
Parser for JSON data that is nested
"""
options = api_settings.OPTIONS
def parse(self, stream, media_type=None, parser_context=None):
parsed = super().parse(stream, media_type, parser_context)
form = NestedForm(parsed, **self.options)
if form.is_nested():
return form.data
return parsed
| 25.914894
| 66
| 0.659278
|
794b77434f287fd6880ca3d2ac2739f0321059d6
| 5,180
|
py
|
Python
|
tests/unit/tuner/pytorch/test_loss.py
|
jina-ai/finetuner
|
6b8701c6ca372310364e6791c1c2761700dfc150
|
[
"Apache-2.0"
] | 270
|
2021-10-20T10:16:02.000Z
|
2022-03-31T07:58:54.000Z
|
tests/unit/tuner/pytorch/test_loss.py
|
jina-ai/finetuner
|
6b8701c6ca372310364e6791c1c2761700dfc150
|
[
"Apache-2.0"
] | 211
|
2021-10-20T12:08:52.000Z
|
2022-03-28T12:53:59.000Z
|
tests/unit/tuner/pytorch/test_loss.py
|
jina-ai/finetuner
|
6b8701c6ca372310364e6791c1c2761700dfc150
|
[
"Apache-2.0"
] | 19
|
2021-10-20T10:21:43.000Z
|
2022-02-09T20:26:43.000Z
|
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from finetuner.tuner.pytorch.losses import (
NTXentLoss,
SiameseLoss,
TripletLoss,
get_distance,
)
N_BATCH = 10
N_DIM = 128
ALL_LOSSES = [SiameseLoss, TripletLoss]
@pytest.mark.parametrize('margin', [0.0, 0.5, 1.0])
@pytest.mark.parametrize('distance', ['cosine', 'euclidean'])
@pytest.mark.parametrize('loss_cls', ALL_LOSSES)
def test_loss_output(loss_cls, distance, margin):
"""Test that we get a single positive number as output"""
loss = loss_cls(distance=distance, margin=margin)
labels = torch.ones((N_BATCH,))
labels[: N_BATCH // 2] = 0
embeddings = torch.rand((N_BATCH, N_DIM))
output = loss(embeddings, labels)
assert output.ndim == 0
assert output >= 0
@pytest.mark.parametrize('distance', ['cosine', 'euclidean'])
@pytest.mark.parametrize('loss_cls', ALL_LOSSES)
def test_loss_zero_same(loss_cls, distance):
"""Sanity check that with perfectly separated embeddings, loss is zero"""
# Might need to specialize this later
loss = loss_cls(distance=distance, margin=0.0)
labels = torch.ones((N_BATCH,))
labels[: N_BATCH // 2] = 0
embeddings = torch.ones((N_BATCH, N_DIM))
embeddings[: N_BATCH // 2] *= -1
output = loss(embeddings, labels)
np.testing.assert_almost_equal(output.item(), 0, decimal=5)
@pytest.mark.parametrize(
'loss_cls,indices,exp_result',
[
(SiameseLoss, [[0, 2], [1, 3], [0, 1]], 0.64142),
(TripletLoss, [[0, 2], [1, 3], [2, 1]], 0.9293),
],
)
def test_compute(loss_cls, indices, exp_result):
"""Check that the compute function returns numerically correct results"""
indices = [torch.tensor(x) for x in indices]
embeddings = torch.tensor([[0.1, 0.1], [0.2, 0.2], [0.4, 0.4], [0.7, 0.7]])
result = loss_cls(distance='euclidean').compute(embeddings, indices)
np.testing.assert_almost_equal(result.item(), exp_result, decimal=5)
@pytest.mark.parametrize(
'loss_cls',
[SiameseLoss, TripletLoss],
)
def test_compute_loss_given_insufficient_data(loss_cls):
indices = [torch.tensor([]) for _ in range(3)]
embeddings = torch.tensor([[0.0, 0.1, 0.2, 0.4]])
with pytest.raises(ValueError):
loss_cls(distance='euclidean').compute(embeddings, indices)
@pytest.mark.gpu
@pytest.mark.parametrize(
'loss_cls',
[SiameseLoss, TripletLoss],
)
def test_compute_loss_given_insufficient_data_gpu(loss_cls):
indices = [torch.tensor([]).to('cuda') for _ in range(3)]
embeddings = torch.tensor([[0.0, 0.1, 0.2, 0.4]]).to('cuda')
with pytest.raises(ValueError):
loss_cls(distance='euclidean').compute(embeddings, indices)
@pytest.mark.parametrize('labels', [[0, 1], [0, 0, 1], [0, 0, 0, 1, 1]])
def test_wrong_labels_ntxent_loss(labels):
"""Test cases where are not two views of each instance"""
labels = torch.tensor(labels)
embeddings = torch.randn((len(labels), 2))
loss_fn = NTXentLoss()
with pytest.raises(ValueError, match="There need to be two views"):
loss_fn(embeddings, labels)
@pytest.mark.parametrize('temp', [0.3, 0.5, 1.0])
@pytest.mark.parametrize('labels', [[0, 0, 1, 1], [0, 1, 0, 1], [0, 1, 2, 0, 1, 2]])
def test_correct_ntxent_loss(labels, temp):
"""Test that returned loss matches cross-entropy calculated semi-manually"""
labels_tensor = torch.tensor(labels)
embeddings = torch.randn((len(labels), 2))
loss_fn = NTXentLoss(temperature=temp)
# Compute losses manually
sim = (1 - get_distance(embeddings, 'cosine')) / temp
losses = []
for i in range(len(labels)):
exclude_self = [j for j in range(len(labels)) if j != i]
other_pos_ind = [labels[j] for j in exclude_self].index(labels[i])
losses.append(-F.log_softmax(sim[i, exclude_self], dim=0)[other_pos_ind])
np.testing.assert_approx_equal(
loss_fn(embeddings, labels_tensor).numpy(), np.mean(losses), 4
)
def test_requires_grad_ntxent_loss():
"""Test that requires_grad is perserved on returned loss"""
embeddings = torch.rand((4, 2), requires_grad=True)
labels = torch.tensor([0, 0, 1, 1])
loss = NTXentLoss()(embeddings, labels)
assert loss.requires_grad
@pytest.mark.gpu
@pytest.mark.parametrize('temp', [0.3, 0.5, 1.0])
@pytest.mark.parametrize('labels', [[0, 0, 1, 1], [0, 1, 0, 1], [0, 1, 2, 0, 1, 2]])
def test_correct_ntxent_loss_gpu(labels, temp):
"""Test that returned loss matches cross-entropy calculated semi-manually"""
labels_tensor = torch.tensor(labels, device='cuda')
embeddings = torch.randn((len(labels), 2), device='cuda')
loss_fn = NTXentLoss(temperature=temp)
# Compute losses manually
sim = (1 - get_distance(embeddings, 'cosine')) / temp
losses = []
for i in range(len(labels)):
exclude_self = [j for j in range(len(labels)) if j != i]
other_pos_ind = [labels[j] for j in exclude_self].index(labels[i])
losses.append(-F.log_softmax(sim[i, exclude_self], dim=0)[other_pos_ind].item())
np.testing.assert_approx_equal(
loss_fn(embeddings, labels_tensor).cpu().numpy(), np.mean(losses), 4
)
| 33.419355
| 88
| 0.668147
|
794b77ed2dfdaf6bfe4dc4d1c97480fcf1af3248
| 10,866
|
py
|
Python
|
bokeh/tests/test_transform.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 6
|
2020-02-21T15:50:34.000Z
|
2021-11-09T19:45:50.000Z
|
bokeh/tests/test_transform.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 1
|
2021-05-08T06:24:26.000Z
|
2021-05-08T06:24:26.000Z
|
bokeh/tests/test_transform.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 2
|
2019-08-28T14:57:54.000Z
|
2019-11-26T16:18:30.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.models import (
CategoricalColorMapper, CategoricalMarkerMapper, CategoricalPatternMapper, CumSum, Dodge,
FactorRange, Jitter, LinearColorMapper, LogColorMapper, Stack
)
from bokeh._testing.util.api import verify_all
# Module under test
import bokeh.transform as bt
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'cumsum',
'dodge',
'factor_cmap',
'factor_hatch',
'factor_mark',
'jitter',
'linear_cmap',
'log_cmap',
'stack',
'transform',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bt, ALL)
class Test_cumsum(object):
def test_basic(object):
s = bt.cumsum("foo")
assert isinstance(s, dict)
assert list(s.keys()) == ["expr"]
assert isinstance(s['expr'], CumSum)
assert s['expr'].field == 'foo'
assert s['expr'].include_zero == False
def test_include_zero(object):
s = bt.cumsum("foo", include_zero=True)
assert isinstance(s, dict)
assert list(s.keys()) == ["expr"]
assert isinstance(s['expr'], CumSum)
assert s['expr'].field == 'foo'
assert s['expr'].include_zero == True
class Test_dodge(object):
def test_basic(self):
t = bt.dodge("foo", 0.5)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], Dodge)
assert t['transform'].value == 0.5
assert t['transform'].range is None
def test_with_range(self):
r = FactorRange("a")
t = bt.dodge("foo", 0.5, range=r)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], Dodge)
assert t['transform'].value == 0.5
assert t['transform'].range is r
assert t['transform'].range.factors == ["a"]
class Test_factor_cmap(object):
def test_basic(self):
t = bt.factor_cmap("foo", ["red", "green"], ["foo", "bar"], start=1, end=2, nan_color="pink")
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], CategoricalColorMapper)
assert t['transform'].palette == ["red", "green"]
assert t['transform'].factors == ["foo", "bar"]
assert t['transform'].start == 1
assert t['transform'].end == 2
assert t['transform'].nan_color == "pink"
def test_defaults(self):
t = bt.factor_cmap("foo", ["red", "green"], ["foo", "bar"])
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], CategoricalColorMapper)
assert t['transform'].palette == ["red", "green"]
assert t['transform'].factors == ["foo", "bar"]
assert t['transform'].start == 0
assert t['transform'].end is None
assert t['transform'].nan_color == "gray"
class Test_factor_hatch(object):
def test_basic(self):
t = bt.factor_hatch("foo", ["+", "-"], ["foo", "bar"], start=1, end=2)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], CategoricalPatternMapper)
assert t['transform'].patterns == ["+", "-"]
assert t['transform'].factors == ["foo", "bar"]
assert t['transform'].start == 1
assert t['transform'].end == 2
def test_defaults(self):
t = bt.factor_hatch("foo", ["+", "-"], ["foo", "bar"])
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], CategoricalPatternMapper)
assert t['transform'].patterns == ["+", "-"]
assert t['transform'].factors == ["foo", "bar"]
assert t['transform'].start == 0
assert t['transform'].end is None
class Test_factor_mark(object):
def test_basic(self):
t = bt.factor_mark("foo", ["hex", "square"], ["foo", "bar"], start=1, end=2)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], CategoricalMarkerMapper)
assert t['transform'].markers == ["hex", "square"]
assert t['transform'].factors == ["foo", "bar"]
assert t['transform'].start == 1
assert t['transform'].end == 2
def test_defaults(self):
t = bt.factor_mark("foo", ["hex", "square"], ["foo", "bar"])
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], CategoricalMarkerMapper)
assert t['transform'].markers == ["hex", "square"]
assert t['transform'].factors == ["foo", "bar"]
assert t['transform'].start == 0
assert t['transform'].end is None
class Test_jitter(object):
def test_basic(self):
t = bt.jitter("foo", width=0.5, mean=0.1, distribution="normal")
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], Jitter)
assert t['transform'].width == 0.5
assert t['transform'].mean == 0.1
assert t['transform'].distribution == "normal"
assert t['transform'].range is None
def test_defaults(self):
t = bt.jitter("foo", width=0.5)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], Jitter)
assert t['transform'].width == 0.5
assert t['transform'].mean == 0
assert t['transform'].distribution == "uniform"
assert t['transform'].range is None
def test_with_range(self):
r = FactorRange("a")
t = bt.jitter("foo", width=0.5, mean=0.1, range=r)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], Jitter)
assert t['transform'].width == 0.5
assert t['transform'].mean == 0.1
assert t['transform'].distribution == "uniform"
assert t['transform'].range is r
assert t['transform'].range.factors == ["a"]
class Test_linear_cmap(object):
def test_basic(self):
t = bt.linear_cmap("foo", ["red", "green"], 0, 10, low_color="orange", high_color="blue", nan_color="pink")
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], LinearColorMapper)
assert t['transform'].palette == ["red", "green"]
assert t['transform'].low == 0
assert t['transform'].high == 10
assert t['transform'].low_color == "orange"
assert t['transform'].high_color == "blue"
assert t['transform'].nan_color == "pink"
def test_defaults(self):
t = bt.linear_cmap("foo", ["red", "green"], 0, 10)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], LinearColorMapper)
assert t['transform'].palette == ["red", "green"]
assert t['transform'].low == 0
assert t['transform'].high == 10
assert t['transform'].low_color is None
assert t['transform'].high_color is None
assert t['transform'].nan_color == "gray"
class Test_log_cmap(object):
def test_basic(self):
t = bt.log_cmap("foo", ["red", "green"], 0, 10, low_color="orange", high_color="blue", nan_color="pink")
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], LogColorMapper)
assert t['transform'].palette == ["red", "green"]
assert t['transform'].low == 0
assert t['transform'].high == 10
assert t['transform'].low_color == "orange"
assert t['transform'].high_color == "blue"
assert t['transform'].nan_color == "pink"
def test_defaults(self):
t = bt.log_cmap("foo", ["red", "green"], 0, 10)
assert isinstance(t, dict)
assert set(t) == {"field", "transform"}
assert t['field'] == "foo"
assert isinstance(t['transform'], LogColorMapper)
assert t['transform'].palette == ["red", "green"]
assert t['transform'].low == 0
assert t['transform'].high == 10
assert t['transform'].low_color is None
assert t['transform'].high_color is None
assert t['transform'].nan_color == "gray"
class Test_stack(object):
def test_basic(object):
s = bt.stack("foo", "junk")
assert isinstance(s, dict)
assert list(s.keys()) == ["expr"]
assert isinstance(s['expr'], Stack)
assert s['expr'].fields == ('foo', 'junk')
class Test_transform(object):
def test_basic(object):
t = bt.transform("foo", "junk")
assert t == dict(field="foo", transform="junk")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 37.340206
| 115
| 0.513252
|
794b77ff945c9833cfbbd6cb860171a4c3b3f1fb
| 560
|
py
|
Python
|
Python/Chittle_Ben_FSE 2019-2020/0.3.3/ui.py
|
benchittle/misc
|
a5332f3696d36609154713bdc5e6367b73fdb8be
|
[
"MIT"
] | null | null | null |
Python/Chittle_Ben_FSE 2019-2020/0.3.3/ui.py
|
benchittle/misc
|
a5332f3696d36609154713bdc5e6367b73fdb8be
|
[
"MIT"
] | null | null | null |
Python/Chittle_Ben_FSE 2019-2020/0.3.3/ui.py
|
benchittle/misc
|
a5332f3696d36609154713bdc5e6367b73fdb8be
|
[
"MIT"
] | null | null | null |
"""
Date: April 9, 2020
Version: 0.3.3
>Added this module:
"""
import pygame as pg
import config
class Hotbar:
def __init__(self, game):
self.game = game
self.image = pg.Surface(config.HOTBAR_SIZE)
self.rect = self.image.get_rect(midleft=(0, config.SCREEN_HEIGHT / 2))
self.image.fill(config.WHITE)
self.image.fill(config.BLACK, (0, 4, self.rect.w - 4, self.rect.h - 8))
def update(self):
pass
def main():
print("RUN THE OTHER ONE DAMMIT BEN")
if __name__ == "__main__":
main()
| 14.358974
| 79
| 0.608929
|
794b791cba4f53e6c50dabbb3ba818fa62a00905
| 8,037
|
py
|
Python
|
nixui/graphics/generic_widgets.py
|
yater/nix-gui
|
e7d1137ff85ef6bfe33ccc7cd8eee944475d4217
|
[
"MIT"
] | null | null | null |
nixui/graphics/generic_widgets.py
|
yater/nix-gui
|
e7d1137ff85ef6bfe33ccc7cd8eee944475d4217
|
[
"MIT"
] | null | null | null |
nixui/graphics/generic_widgets.py
|
yater/nix-gui
|
e7d1137ff85ef6bfe33ccc7cd8eee944475d4217
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtWidgets, QtCore, QtGui
from nixui.graphics import richtext, icon
class ReplacableWidget(QtWidgets.QStackedWidget):
def __init__(self, starting_widget=None):
super().__init__()
starting_widget = starting_widget or QtWidgets.QLabel()
self.current_widget = starting_widget
self.addWidget(self.current_widget)
def replace_widget(self, widget):
old_widget = self.current_widget
self.addWidget(widget)
self.setCurrentWidget(widget)
self.removeWidget(old_widget)
self.current_widget = widget
class ExclusiveButtonGroup(QtWidgets.QFrame):
selection_changed = QtCore.pyqtSignal(str)
def __init__(self, choices=[], vertical=True):
super().__init__()
if vertical:
layout = QtWidgets.QVBoxLayout(self)
else:
layout = QtWidgets.QHBoxLayout(self)
self.btn_group = QtWidgets.QButtonGroup()
self.btn_group.setExclusive(True)
for i, (choice, handler, color) in enumerate(choices):
btn = QtWidgets.QPushButton(choice)
p = btn.palette()
p.setColor(btn.backgroundRole(), color)
btn.setPalette(p)
btn.setCheckable(True)
btn.clicked.connect(handler)
layout.addWidget(btn)
self.btn_group.addButton(btn, id=i)
layout.setSpacing(0)
self.setLayout(layout)
def select(self, idx):
self.btn_group.buttons()[idx].click()
def checked_index(self):
return self.btn_group.checkedId()
def SeparatorLine():
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
return line
# modified version of https://github.com/abrytanczyk/JPWP---zadania
class StringListEditorWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
layout = QtWidgets.QHBoxLayout(self)
buttons_layout = QtWidgets.QVBoxLayout(self)
self.list_widget = QtWidgets.QListWidget(self)
self.list_widget.itemSelectionChanged.connect(self.item_selection_changed)
self.add_btn = QtWidgets.QPushButton("", self)
self.add_btn.setIcon(icon.get_icon('plus.png'))
self.add_btn.clicked.connect(self.add_clicked)
self.edit_btn = QtWidgets.QPushButton("", self)
self.edit_btn.setIcon(icon.get_icon('edit.png'))
self.edit_btn.clicked.connect(self.edit_clicked)
self.remove_btn = QtWidgets.QPushButton("", self)
self.remove_btn.setIcon(icon.get_icon('trash.png'))
self.remove_btn.clicked.connect(self.remove_clicked)
buttons_layout.addWidget(self.add_btn)
buttons_layout.addWidget(self.edit_btn)
buttons_layout.addWidget(self.remove_btn)
buttons_layout.addStretch()
layout.addWidget(self.list_widget)
layout.addLayout(buttons_layout)
self.setLayout(layout)
def add_item(self, item):
self.list_widget.addItem(QtWidgets.QListWidgetItem(item, self.list_widget))
def update_buttons(self):
any_items = self.list_widget.count() > 0
self.edit_btn.setEnabled(any_items)
self.remove_btn.setEnabled(any_items)
self.update_list()
def update_list(self):
new_arguments = []
for i in range(self.list_widget.count()):
new_arguments.append(self.list_widget.item(i).text())
self.string_list.clear()
self.string_list.extend(new_arguments)
def item_selection_changed(self, *args):
self.update_buttons()
def add_clicked(self):
text, okPressed = QtWidgets.QInputDialog.getText(self, "Add Item", "Item Value:", QtWidgets.QLineEdit.Normal, "")
if okPressed and text != '' and not str.isspace(text):
self.list_widget.addItem(QtWidgets.QListWidgetItem(text, self.list_widget))
self.list_widget.setCurrentRow(self.list_widget.count() - 1)
self.list_widget.scrollToItem(self.list_widget.currentItem())
self.update_buttons()
def edit_clicked(self):
current = self.list_widget.currentItem()
original = current.text()
if str.isspace(original) or original == '':
self.add_clicked()
else:
text, okPressed = QtWidgets.QInputDialog.getText(self, "Edit Item", "Item Value:", QtWidgets.QLineEdit.Normal, original)
if okPressed and text != '' and not str.isspace(text):
current.setText(text)
self.update_buttons()
def remove_clicked(self):
current = self.list_widget.currentItem()
original = current.text()
if original == '' or \
str.isspace(original) or \
QtWidgets.QMessageBox.question(self, "Remove", f"Remove Item: `{original}`",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes) == QtWidgets.QMessageBox.Yes:
self.list_widget.takeItem(self.list_widget.currentRow())
self.update_buttons()
class ScrollListStackSelector(QtWidgets.QWidget):
ItemCls = QtWidgets.QListWidgetItem
ListCls = QtWidgets.QListWidget
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_item = None
# setup stack
self.current_widget = QtWidgets.QLabel()
self.stack = QtWidgets.QStackedWidget()
self.stack.addWidget(self.current_widget)
self.item_list = self.ListCls()
self.insert_items()
self.item_list.currentItemChanged.connect(self.change_selected_item)
self.item_list.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.item_list.setItemDelegate(richtext.OptionListItemDelegate())
self.item_list.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.item_list.setMinimumWidth(self.item_list.sizeHintForColumn(0))
self.nav_layout = QtWidgets.QVBoxLayout()
if self.get_title():
self.nav_layout.addWidget(
QtWidgets.QLabel(self.get_title())
)
self.nav_layout.addWidget(self.item_list)
self.hbox = QtWidgets.QHBoxLayout()
self.hbox.setSpacing(0)
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addLayout(self.nav_layout)
self.hbox.addWidget(self.stack)
self.set_layout()
def get_title(self):
return None
def set_layout(self):
self.setLayout(self.hbox)
class OptionListItem(QtWidgets.QListWidgetItem):
# TODO: remove this widget and break dependency with diff_widget.py by considering diff widget as part of navlist
def __init__(self, option, icon_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.option = option
self.set_text()
if icon_path:
self.setIcon(QtGui.QIcon(icon_path))
def set_text(self):
self.setText(richtext.get_option_html(self.option))
class ToolTip(QtWidgets.QLabel):
def __init__(self, text, *args, **kwargs):
super().__init__()
self.setPixmap(
icon.get_pixmap('info_circle.png').scaled(30, 30)
)
self.setToolTip(text)
class ClickableLabel(QtWidgets.QLabel):
clicked = QtCore.pyqtSignal()
def mousePressEvent(self, ev):
self.clicked.emit()
def enterEvent(self, ev):
f = self.font()
f.setUnderline(True)
self.setFont(f)
def leaveEvent(self, ev):
f = self.font()
f.setUnderline(False)
self.setFont(f)
class CenteredContainer(QtWidgets.QWidget):
def __init__(self, child_widget):
super().__init__()
layout = QtWidgets.QHBoxLayout()
layout.setAlignment(QtCore.Qt.AlignVCenter)
layout.addWidget(child_widget)
self.setLayout(layout)
| 32.148
| 132
| 0.6541
|
794b79724346b2f622f7afbde9c9e2daf696b037
| 860
|
py
|
Python
|
zcls/config/configs/dataloader.py
|
YinAoXiong/ZCls
|
8aeea3640f8456937db35d043e37cf2c03ac9017
|
[
"Apache-2.0"
] | null | null | null |
zcls/config/configs/dataloader.py
|
YinAoXiong/ZCls
|
8aeea3640f8456937db35d043e37cf2c03ac9017
|
[
"Apache-2.0"
] | null | null | null |
zcls/config/configs/dataloader.py
|
YinAoXiong/ZCls
|
8aeea3640f8456937db35d043e37cf2c03ac9017
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@date: 2020/11/25 下午6:50
@file: dataloader.py
@author: zj
@description:
"""
from yacs.config import CfgNode as CN
def add_config(_C):
# ---------------------------------------------------------------------------- #
# DataLoader
# ---------------------------------------------------------------------------- #
_C.DATALOADER = CN()
# batch size per GPU
_C.DATALOADER.TRAIN_BATCH_SIZE = 16
_C.DATALOADER.TEST_BATCH_SIZE = 16
# refert to [torch Dataloader中的num_workers](https://zhuanlan.zhihu.com/p/69250939)
_C.DATALOADER.NUM_WORKERS = 2
# overlapped prefetching cpu->gpu memory copy
_C.DATALOADER.PREFETCHER = False
# shuffle dataset per epoch
_C.DATALOADER.SHUFFLE = True
# random sample or sequential sample in train/test stage
_C.DATALOADER.RANDOM_SAMPLE = True
| 26.060606
| 86
| 0.55814
|
794b79a7b1daedaab2c4d458753347ed1bfbbc6e
| 40,105
|
py
|
Python
|
mlflow/store/tracking/sqlalchemy_store.py
|
jene4ekjene4ek/my_mlflow
|
1ac1b5d6657789168253101ae3a8477cff54dd9e
|
[
"Apache-2.0"
] | 1
|
2021-01-10T14:00:57.000Z
|
2021-01-10T14:00:57.000Z
|
mlflow/store/tracking/sqlalchemy_store.py
|
jene4ekjene4ek/my_mlflow
|
1ac1b5d6657789168253101ae3a8477cff54dd9e
|
[
"Apache-2.0"
] | 5
|
2019-02-19T18:17:03.000Z
|
2019-07-12T06:48:03.000Z
|
mlflow/store/tracking/sqlalchemy_store.py
|
jene4ekjene4ek/my_mlflow
|
1ac1b5d6657789168253101ae3a8477cff54dd9e
|
[
"Apache-2.0"
] | 1
|
2020-12-29T18:01:42.000Z
|
2020-12-29T18:01:42.000Z
|
import json
import logging
import uuid
import math
import sqlalchemy
import sqlalchemy.sql.expression as sql
from mlflow.entities.lifecycle_stage import LifecycleStage
from mlflow.models import Model
from mlflow.store.tracking import SEARCH_MAX_RESULTS_THRESHOLD
from mlflow.store.db.db_types import MYSQL, MSSQL
import mlflow.store.db.utils
from mlflow.store.tracking.dbmodels.models import SqlExperiment, SqlRun, \
SqlMetric, SqlParam, SqlTag, SqlExperimentTag, SqlLatestMetric
from mlflow.store.db.base_sql_model import Base
from mlflow.entities import RunStatus, SourceType, Experiment
from mlflow.store.tracking.abstract_store import AbstractStore
from mlflow.entities import ViewType
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS, \
INVALID_STATE, RESOURCE_DOES_NOT_EXIST, INTERNAL_ERROR
from mlflow.utils.uri import is_local_uri, extract_db_type_from_uri
from mlflow.utils.file_utils import mkdir, local_file_uri_to_path
from mlflow.utils.search_utils import SearchUtils
from mlflow.utils.string_utils import is_string_type
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.validation import _validate_batch_log_limits, _validate_batch_log_data, \
_validate_run_id, _validate_metric, _validate_experiment_tag, _validate_tag
from mlflow.utils.mlflow_tags import MLFLOW_LOGGED_MODELS
_logger = logging.getLogger(__name__)
# For each database table, fetch its columns and define an appropriate attribute for each column
# on the table's associated object representation (Mapper). This is necessary to ensure that
# columns defined via backreference are available as Mapper instance attributes (e.g.,
# ``SqlExperiment.tags`` and ``SqlRun.params``). For more information, see
# https://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.configure_mappers
# and https://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper
sqlalchemy.orm.configure_mappers()
class SqlAlchemyStore(AbstractStore):
"""
SQLAlchemy compliant backend store for tracking meta data for MLflow entities. MLflow
supports the database dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``.
As specified in the
`SQLAlchemy docs <https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_ ,
the database URI is expected in the format
``<dialect>+<driver>://<username>:<password>@<host>:<port>/<database>``. If you do not
specify a driver, SQLAlchemy uses a dialect's default driver.
This store interacts with SQL store using SQLAlchemy abstractions defined for MLflow entities.
:py:class:`mlflow.store.dbmodels.models.SqlExperiment`,
:py:class:`mlflow.store.dbmodels.models.SqlRun`,
:py:class:`mlflow.store.dbmodels.models.SqlTag`,
:py:class:`mlflow.store.dbmodels.models.SqlMetric`, and
:py:class:`mlflow.store.dbmodels.models.SqlParam`.
Run artifacts are stored in a separate location using artifact stores conforming to
:py:class:`mlflow.store.artifact_repo.ArtifactRepository`. Default artifact locations for
user experiments are stored in the database along with metadata. Each run artifact location
is recorded in :py:class:`mlflow.store.dbmodels.models.SqlRun` and stored in the backend DB.
"""
ARTIFACTS_FOLDER_NAME = "artifacts"
DEFAULT_EXPERIMENT_ID = "0"
def __init__(self, db_uri, default_artifact_root):
"""
Create a database backed store.
:param db_uri: The SQLAlchemy database URI string to connect to the database. See
the `SQLAlchemy docs
<https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_
for format specifications. Mlflow supports the dialects ``mysql``,
``mssql``, ``sqlite``, and ``postgresql``.
:param default_artifact_root: Path/URI to location suitable for large data (such as a blob
store object, DBFS path, or shared NFS file system).
"""
super(SqlAlchemyStore, self).__init__()
self.db_uri = db_uri
self.db_type = extract_db_type_from_uri(db_uri)
self.artifact_root_uri = default_artifact_root
self.engine = mlflow.store.db.utils.create_sqlalchemy_engine(db_uri)
# On a completely fresh MLflow installation against an empty database (verify database
# emptiness by checking that 'experiments' etc aren't in the list of table names), run all
# DB migrations
expected_tables = [
SqlExperiment.__tablename__,
SqlRun.__tablename__,
SqlMetric.__tablename__,
SqlParam.__tablename__,
SqlTag.__tablename__,
SqlExperimentTag.__tablename__,
SqlLatestMetric.__tablename__,
]
inspected_tables = set(sqlalchemy.inspect(self.engine).get_table_names())
if any([table not in inspected_tables for table in expected_tables]):
mlflow.store.db.utils._initialize_tables(self.engine)
Base.metadata.bind = self.engine
SessionMaker = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.ManagedSessionMaker = mlflow.store.db.utils._get_managed_session_maker(SessionMaker,
self.db_type)
mlflow.store.db.utils._verify_schema(self.engine)
if is_local_uri(default_artifact_root):
mkdir(local_file_uri_to_path(default_artifact_root))
if len(self.list_experiments()) == 0:
with self.ManagedSessionMaker() as session:
self._create_default_experiment(session)
def _set_zero_value_insertion_for_autoincrement_column(self, session):
if self.db_type == MYSQL:
# config letting MySQL override default
# to allow 0 value for experiment ID (auto increment column)
session.execute("SET @@SESSION.sql_mode='NO_AUTO_VALUE_ON_ZERO';")
if self.db_type == MSSQL:
# config letting MSSQL override default
# to allow any manual value inserted into IDENTITY column
session.execute("SET IDENTITY_INSERT experiments ON;")
# DB helper methods to allow zero values for columns with auto increments
def _unset_zero_value_insertion_for_autoincrement_column(self, session):
if self.db_type == MYSQL:
session.execute("SET @@SESSION.sql_mode='';")
if self.db_type == MSSQL:
session.execute("SET IDENTITY_INSERT experiments OFF;")
def _create_default_experiment(self, session):
"""
MLflow UI and client code expects a default experiment with ID 0.
This method uses SQL insert statement to create the default experiment as a hack, since
experiment table uses 'experiment_id' column is a PK and is also set to auto increment.
MySQL and other implementation do not allow value '0' for such cases.
ToDo: Identify a less hacky mechanism to create default experiment 0
"""
table = SqlExperiment.__tablename__
default_experiment = {
SqlExperiment.experiment_id.name: int(SqlAlchemyStore.DEFAULT_EXPERIMENT_ID),
SqlExperiment.name.name: Experiment.DEFAULT_EXPERIMENT_NAME,
SqlExperiment.artifact_location.name: str(self._get_artifact_location(0)),
SqlExperiment.lifecycle_stage.name: LifecycleStage.ACTIVE
}
def decorate(s):
if is_string_type(s):
return "'{}'".format(s)
else:
return "{}".format(s)
# Get a list of keys to ensure we have a deterministic ordering
columns = list(default_experiment.keys())
values = ", ".join([decorate(default_experiment.get(c)) for c in columns])
try:
self._set_zero_value_insertion_for_autoincrement_column(session)
session.execute("INSERT INTO {} ({}) VALUES ({});".format(
table, ", ".join(columns), values))
finally:
self._unset_zero_value_insertion_for_autoincrement_column(session)
def _save_to_db(self, session, objs):
"""
Store in db
"""
if type(objs) is list:
session.add_all(objs)
else:
# single object
session.add(objs)
def _get_or_create(self, session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
created = False
if instance:
return instance, created
else:
instance = model(**kwargs)
self._save_to_db(objs=instance, session=session)
created = True
return instance, created
def _get_artifact_location(self, experiment_id):
return append_to_uri_path(self.artifact_root_uri, str(experiment_id))
def create_experiment(self, name, artifact_location=None):
if name is None or name == '':
raise MlflowException('Invalid experiment name', INVALID_PARAMETER_VALUE)
with self.ManagedSessionMaker() as session:
try:
experiment = SqlExperiment(
name=name, lifecycle_stage=LifecycleStage.ACTIVE,
artifact_location=artifact_location
)
session.add(experiment)
if not artifact_location:
# this requires a double write. The first one to generate an autoincrement-ed ID
eid = session.query(SqlExperiment).filter_by(name=name).first().experiment_id
experiment.artifact_location = self._get_artifact_location(eid)
except sqlalchemy.exc.IntegrityError as e:
raise MlflowException('Experiment(name={}) already exists. '
'Error: {}'.format(name, str(e)), RESOURCE_ALREADY_EXISTS)
session.flush()
return str(experiment.experiment_id)
def _list_experiments(self, session, ids=None, names=None, view_type=ViewType.ACTIVE_ONLY,
eager=False):
"""
:param eager: If ``True``, eagerly loads each experiments's tags. If ``False``, these tags
are not eagerly loaded and will be loaded if/when their corresponding
object properties are accessed from a resulting ``SqlExperiment`` object.
"""
stages = LifecycleStage.view_type_to_stages(view_type)
conditions = [SqlExperiment.lifecycle_stage.in_(stages)]
if ids and len(ids) > 0:
int_ids = [int(eid) for eid in ids]
conditions.append(SqlExperiment.experiment_id.in_(int_ids))
if names and len(names) > 0:
conditions.append(SqlExperiment.name.in_(names))
query_options = self._get_eager_experiment_query_options() if eager else []
return session \
.query(SqlExperiment) \
.options(*query_options) \
.filter(*conditions) \
.all()
def list_experiments(self, view_type=ViewType.ACTIVE_ONLY):
with self.ManagedSessionMaker() as session:
return [exp.to_mlflow_entity() for exp in
self._list_experiments(session=session, view_type=view_type, eager=True)]
def _get_experiment(self, session, experiment_id, view_type, eager=False):
"""
:param eager: If ``True``, eagerly loads the experiments's tags. If ``False``, these tags
are not eagerly loaded and will be loaded if/when their corresponding
object properties are accessed from the resulting ``SqlExperiment`` object.
"""
experiment_id = experiment_id or SqlAlchemyStore.DEFAULT_EXPERIMENT_ID
stages = LifecycleStage.view_type_to_stages(view_type)
query_options = self._get_eager_experiment_query_options() if eager else []
experiment = session \
.query(SqlExperiment) \
.options(*query_options) \
.filter(
SqlExperiment.experiment_id == experiment_id,
SqlExperiment.lifecycle_stage.in_(stages)) \
.one_or_none()
if experiment is None:
raise MlflowException('No Experiment with id={} exists'.format(experiment_id),
RESOURCE_DOES_NOT_EXIST)
return experiment
@staticmethod
def _get_eager_experiment_query_options():
"""
:return: A list of SQLAlchemy query options that can be used to eagerly load the following
experiment attributes when fetching an experiment: ``tags``.
"""
return [
# Use a subquery load rather than a joined load in order to minimize the memory overhead
# of the eager loading procedure. For more information about relationship loading
# techniques, see https://docs.sqlalchemy.org/en/13/orm/
# loading_relationships.html#relationship-loading-techniques
sqlalchemy.orm.subqueryload(SqlExperiment.tags),
]
def get_experiment(self, experiment_id):
with self.ManagedSessionMaker() as session:
return self._get_experiment(
session, experiment_id, ViewType.ALL, eager=True).to_mlflow_entity()
def get_experiment_by_name(self, experiment_name):
"""
Specialized implementation for SQL backed store.
"""
with self.ManagedSessionMaker() as session:
stages = LifecycleStage.view_type_to_stages(ViewType.ALL)
experiment = session \
.query(SqlExperiment) \
.options(*self._get_eager_experiment_query_options()) \
.filter(
SqlExperiment.name == experiment_name,
SqlExperiment.lifecycle_stage.in_(stages)) \
.one_or_none()
return experiment.to_mlflow_entity() if experiment is not None else None
def delete_experiment(self, experiment_id):
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session, experiment_id, ViewType.ACTIVE_ONLY)
experiment.lifecycle_stage = LifecycleStage.DELETED
self._save_to_db(objs=experiment, session=session)
def restore_experiment(self, experiment_id):
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session, experiment_id, ViewType.DELETED_ONLY)
experiment.lifecycle_stage = LifecycleStage.ACTIVE
self._save_to_db(objs=experiment, session=session)
def rename_experiment(self, experiment_id, new_name):
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session, experiment_id, ViewType.ALL)
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException('Cannot rename a non-active experiment.', INVALID_STATE)
experiment.name = new_name
self._save_to_db(objs=experiment, session=session)
def create_run(self, experiment_id, user_id, start_time, tags):
with self.ManagedSessionMaker() as session:
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
run_id = uuid.uuid4().hex
artifact_location = append_to_uri_path(experiment.artifact_location, run_id,
SqlAlchemyStore.ARTIFACTS_FOLDER_NAME)
run = SqlRun(name="", artifact_uri=artifact_location, run_uuid=run_id,
experiment_id=experiment_id,
source_type=SourceType.to_string(SourceType.UNKNOWN),
source_name="", entry_point_name="",
user_id=user_id, status=RunStatus.to_string(RunStatus.RUNNING),
start_time=start_time, end_time=None,
source_version="", lifecycle_stage=LifecycleStage.ACTIVE)
tags_dict = {}
for tag in tags:
tags_dict[tag.key] = tag.value
run.tags = [SqlTag(key=key, value=value) for key, value in tags_dict.items()]
self._save_to_db(objs=run, session=session)
return run.to_mlflow_entity()
def _get_run(self, session, run_uuid, eager=False):
"""
:param eager: If ``True``, eagerly loads the run's summary metrics (``latest_metrics``),
params, and tags when fetching the run. If ``False``, these attributes
are not eagerly loaded and will be loaded when their corresponding
object properties are accessed from the resulting ``SqlRun`` object.
"""
query_options = self._get_eager_run_query_options() if eager else []
runs = session \
.query(SqlRun) \
.options(*query_options) \
.filter(SqlRun.run_uuid == run_uuid).all()
if len(runs) == 0:
raise MlflowException('Run with id={} not found'.format(run_uuid),
RESOURCE_DOES_NOT_EXIST)
if len(runs) > 1:
raise MlflowException('Expected only 1 run with id={}. Found {}.'.format(run_uuid,
len(runs)),
INVALID_STATE)
return runs[0]
@staticmethod
def _get_eager_run_query_options():
"""
:return: A list of SQLAlchemy query options that can be used to eagerly load the following
run attributes when fetching a run: ``latest_metrics``, ``params``, and ``tags``.
"""
return [
# Use a subquery load rather than a joined load in order to minimize the memory overhead
# of the eager loading procedure. For more information about relationship loading
# techniques, see https://docs.sqlalchemy.org/en/13/orm/
# loading_relationships.html#relationship-loading-techniques
sqlalchemy.orm.subqueryload(SqlRun.latest_metrics),
sqlalchemy.orm.subqueryload(SqlRun.params),
sqlalchemy.orm.subqueryload(SqlRun.tags),
]
def _check_run_is_active(self, run):
if run.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException("The run {} must be in the 'active' state. Current state is {}."
.format(run.run_uuid, run.lifecycle_stage),
INVALID_PARAMETER_VALUE)
def _check_experiment_is_active(self, experiment):
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException("The experiment {} must be in the 'active' state. "
"Current state is {}."
.format(experiment.experiment_id, experiment.lifecycle_stage),
INVALID_PARAMETER_VALUE)
def _check_run_is_deleted(self, run):
if run.lifecycle_stage != LifecycleStage.DELETED:
raise MlflowException("The run {} must be in the 'deleted' state. Current state is {}."
.format(run.run_uuid, run.lifecycle_stage),
INVALID_PARAMETER_VALUE)
def update_run_info(self, run_id, run_status, end_time):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
run.status = RunStatus.to_string(run_status)
run.end_time = end_time
self._save_to_db(objs=run, session=session)
run = run.to_mlflow_entity()
return run.info
def _try_get_run_tag(self, session, run_id, tagKey, eager=False):
query_options = self._get_eager_run_query_options() if eager else []
tags = session \
.query(SqlTag) \
.options(*query_options) \
.filter(SqlTag.run_uuid == run_id and SqlTag.key == tagKey).all()
return None if not tags else tags[0]
def get_run(self, run_id):
with self.ManagedSessionMaker() as session:
# Load the run with the specified id and eagerly load its summary metrics, params, and
# tags. These attributes are referenced during the invocation of
# ``run.to_mlflow_entity()``, so eager loading helps avoid additional database queries
# that are otherwise executed at attribute access time under a lazy loading model.
run = self._get_run(run_uuid=run_id, session=session, eager=True)
return run.to_mlflow_entity()
def restore_run(self, run_id):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_deleted(run)
run.lifecycle_stage = LifecycleStage.ACTIVE
self._save_to_db(objs=run, session=session)
def delete_run(self, run_id):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
run.lifecycle_stage = LifecycleStage.DELETED
self._save_to_db(objs=run, session=session)
def _hard_delete_run(self, run_id):
"""
Permanently delete a run (metadata and metrics, tags, parameters).
This is used by the ``mlflow gc`` command line and is not intended to be used elsewhere.
"""
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
session.delete(run)
def _get_deleted_runs(self):
with self.ManagedSessionMaker() as session:
run_ids = session\
.query(SqlRun.run_uuid) \
.filter(SqlRun.lifecycle_stage == LifecycleStage.DELETED) \
.all()
return [run_id[0] for run_id in run_ids]
def log_metric(self, run_id, metric):
_validate_metric(metric.key, metric.value, metric.timestamp, metric.step)
is_nan = math.isnan(metric.value)
if is_nan:
value = 0
elif math.isinf(metric.value):
# NB: Sql can not represent Infs = > We replace +/- Inf with max/min 64b float value
value = 1.7976931348623157e308 if metric.value > 0 else -1.7976931348623157e308
else:
value = metric.value
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
# ToDo: Consider prior checks for null, type, metric name validations, ... etc.
logged_metric, just_created = self._get_or_create(
model=SqlMetric, run_uuid=run_id, key=metric.key, value=value,
timestamp=metric.timestamp, step=metric.step, session=session, is_nan=is_nan)
# Conditionally update the ``latest_metrics`` table if the logged metric was not
# already present in the ``metrics`` table. If the logged metric was already present,
# we assume that the ``latest_metrics`` table already accounts for its presence
if just_created:
self._update_latest_metric_if_necessary(logged_metric, session)
@staticmethod
def _update_latest_metric_if_necessary(logged_metric, session):
def _compare_metrics(metric_a, metric_b):
"""
:return: True if ``metric_a`` is strictly more recent than ``metric_b``, as determined
by ``step``, ``timestamp``, and ``value``. False otherwise.
"""
return (metric_a.step, metric_a.timestamp, metric_a.value) > \
(metric_b.step, metric_b.timestamp, metric_b.value)
# Fetch the latest metric value corresponding to the specified run_id and metric key and
# lock its associated row for the remainder of the transaction in order to ensure
# isolation
latest_metric = session \
.query(SqlLatestMetric) \
.filter(
SqlLatestMetric.run_uuid == logged_metric.run_uuid,
SqlLatestMetric.key == logged_metric.key) \
.with_for_update() \
.one_or_none()
if latest_metric is None or _compare_metrics(logged_metric, latest_metric):
session.merge(
SqlLatestMetric(
run_uuid=logged_metric.run_uuid, key=logged_metric.key,
value=logged_metric.value, timestamp=logged_metric.timestamp,
step=logged_metric.step, is_nan=logged_metric.is_nan))
def get_metric_history(self, run_id, metric_key):
with self.ManagedSessionMaker() as session:
metrics = session.query(SqlMetric).filter_by(run_uuid=run_id, key=metric_key).all()
return [metric.to_mlflow_entity() for metric in metrics]
def log_param(self, run_id, param):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
# if we try to update the value of an existing param this will fail
# because it will try to create it with same run_uuid, param key
try:
# This will check for various integrity checks for params table.
# ToDo: Consider prior checks for null, type, param name validations, ... etc.
self._get_or_create(model=SqlParam, session=session, run_uuid=run_id,
key=param.key, value=param.value)
# Explicitly commit the session in order to catch potential integrity errors
# while maintaining the current managed session scope ("commit" checks that
# a transaction satisfies uniqueness constraints and throws integrity errors
# when they are violated; "get_or_create()" does not perform these checks). It is
# important that we maintain the same session scope because, in the case of
# an integrity error, we want to examine the uniqueness of parameter values using
# the same database state that the session uses during "commit". Creating a new
# session synchronizes the state with the database. As a result, if the conflicting
# parameter value were to be removed prior to the creation of a new session,
# we would be unable to determine the cause of failure for the first session's
# "commit" operation.
session.commit()
except sqlalchemy.exc.IntegrityError:
# Roll back the current session to make it usable for further transactions. In the
# event of an error during "commit", a rollback is required in order to continue
# using the session. In this case, we re-use the session because the SqlRun, `run`,
# is lazily evaluated during the invocation of `run.params`.
session.rollback()
existing_params = [p.value for p in run.params if p.key == param.key]
if len(existing_params) > 0:
old_value = existing_params[0]
raise MlflowException(
"Changing param values is not allowed. Param with key='{}' was already"
" logged with value='{}' for run ID='{}'. Attempted logging new value"
" '{}'.".format(
param.key, old_value, run_id, param.value), INVALID_PARAMETER_VALUE)
else:
raise
def set_experiment_tag(self, experiment_id, tag):
"""
Set a tag for the specified experiment
:param experiment_id: String ID of the experiment
:param tag: ExperimentRunTag instance to log
"""
_validate_experiment_tag(tag.key, tag.value)
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session,
experiment_id,
ViewType.ALL).to_mlflow_entity()
self._check_experiment_is_active(experiment)
session.merge(SqlExperimentTag(experiment_id=experiment_id,
key=tag.key,
value=tag.value))
def set_tag(self, run_id, tag):
"""
Set a tag on a run.
:param run_id: String ID of the run
:param tag: RunTag instance to log
"""
with self.ManagedSessionMaker() as session:
_validate_tag(tag.key, tag.value)
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
session.merge(SqlTag(run_uuid=run_id, key=tag.key, value=tag.value))
def delete_tag(self, run_id, key):
"""
Delete a tag from a run. This is irreversible.
:param run_id: String ID of the run
:param key: Name of the tag
"""
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
filtered_tags = session.query(SqlTag).filter_by(run_uuid=run_id, key=key).all()
if len(filtered_tags) == 0:
raise MlflowException(
"No tag with name: {} in run with id {}".format(key, run_id),
error_code=RESOURCE_DOES_NOT_EXIST)
elif len(filtered_tags) > 1:
raise MlflowException(
"Bad data in database - tags for a specific run must have "
"a single unique value."
"See https://mlflow.org/docs/latest/tracking.html#adding-tags-to-runs",
error_code=INVALID_STATE)
session.delete(filtered_tags[0])
def _search_runs(self, experiment_ids, filter_string, run_view_type, max_results, order_by,
page_token):
def compute_next_token(current_size):
next_token = None
if max_results == current_size:
final_offset = offset + max_results
next_token = SearchUtils.create_page_token(final_offset)
return next_token
if max_results > SEARCH_MAX_RESULTS_THRESHOLD:
raise MlflowException("Invalid value for request parameter max_results. It must be at "
"most {}, but got value {}".format(SEARCH_MAX_RESULTS_THRESHOLD,
max_results),
INVALID_PARAMETER_VALUE)
stages = set(LifecycleStage.view_type_to_stages(run_view_type))
with self.ManagedSessionMaker() as session:
# Fetch the appropriate runs and eagerly load their summary metrics, params, and
# tags. These run attributes are referenced during the invocation of
# ``run.to_mlflow_entity()``, so eager loading helps avoid additional database queries
# that are otherwise executed at attribute access time under a lazy loading model.
parsed_filters = SearchUtils.parse_search_filter(filter_string)
parsed_orderby, sorting_joins = _get_orderby_clauses(order_by, session)
query = session.query(SqlRun)
for j in _get_sqlalchemy_filter_clauses(parsed_filters, session):
query = query.join(j)
# using an outer join is necessary here because we want to be able to sort
# on a column (tag, metric or param) without removing the lines that
# do not have a value for this column (which is what inner join would do)
for j in sorting_joins:
query = query.outerjoin(j)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
queried_runs = query.distinct() \
.options(*self._get_eager_run_query_options()) \
.filter(
SqlRun.experiment_id.in_(experiment_ids),
SqlRun.lifecycle_stage.in_(stages),
*_get_attributes_filtering_clauses(parsed_filters)) \
.order_by(*parsed_orderby) \
.offset(offset).limit(max_results).all()
runs = [run.to_mlflow_entity() for run in queried_runs]
next_page_token = compute_next_token(len(runs))
return runs, next_page_token
def log_batch(self, run_id, metrics, params, tags):
_validate_run_id(run_id)
_validate_batch_log_data(metrics, params, tags)
_validate_batch_log_limits(metrics, params, tags)
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
try:
for param in params:
self.log_param(run_id, param)
for metric in metrics:
self.log_metric(run_id, metric)
for tag in tags:
self.set_tag(run_id, tag)
except MlflowException as e:
raise e
except Exception as e:
raise MlflowException(e, INTERNAL_ERROR)
def record_logged_model(self, run_id, mlflow_model):
if not isinstance(mlflow_model, Model):
raise TypeError("Argument 'mlflow_model' should be mlflow.models.Model, got '{}'"
.format(type(mlflow_model)))
model_dict = mlflow_model.to_dict()
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
previous_tag = [t for t in run.tags if t.key == MLFLOW_LOGGED_MODELS]
if previous_tag:
value = json.dumps(json.loads(previous_tag[0].value) + [model_dict])
else:
value = json.dumps([model_dict])
_validate_tag(MLFLOW_LOGGED_MODELS, value)
session.merge(SqlTag(key=MLFLOW_LOGGED_MODELS, value=value, run_uuid=run_id))
def _get_attributes_filtering_clauses(parsed):
clauses = []
for sql_statement in parsed:
key_type = sql_statement.get('type')
key_name = sql_statement.get('key')
value = sql_statement.get('value')
comparator = sql_statement.get('comparator').upper()
if SearchUtils.is_attribute(key_type, comparator):
# key_name is guaranteed to be a valid searchable attribute of entities.RunInfo
# by the call to parse_search_filter
attribute = getattr(SqlRun, SqlRun.get_attribute_name(key_name))
if comparator in SearchUtils.CASE_INSENSITIVE_STRING_COMPARISON_OPERATORS:
op = SearchUtils.get_sql_filter_ops(attribute, comparator)
clauses.append(op(value))
elif comparator in SearchUtils.filter_ops:
op = SearchUtils.filter_ops.get(comparator)
clauses.append(op(attribute, value))
return clauses
def _to_sqlalchemy_filtering_statement(sql_statement, session):
key_type = sql_statement.get('type')
key_name = sql_statement.get('key')
value = sql_statement.get('value')
comparator = sql_statement.get('comparator').upper()
if SearchUtils.is_metric(key_type, comparator):
entity = SqlLatestMetric
value = float(value)
elif SearchUtils.is_param(key_type, comparator):
entity = SqlParam
elif SearchUtils.is_tag(key_type, comparator):
entity = SqlTag
elif SearchUtils.is_attribute(key_type, comparator):
return None
else:
raise MlflowException("Invalid search expression type '%s'" % key_type,
error_code=INVALID_PARAMETER_VALUE)
if comparator in SearchUtils.CASE_INSENSITIVE_STRING_COMPARISON_OPERATORS:
op = SearchUtils.get_sql_filter_ops(entity.value, comparator)
return (
session
.query(entity)
.filter(entity.key == key_name, op(value))
.subquery()
)
elif comparator in SearchUtils.filter_ops:
op = SearchUtils.filter_ops.get(comparator)
return (
session
.query(entity)
.filter(entity.key == key_name, op(entity.value, value))
.subquery()
)
else:
return None
def _get_sqlalchemy_filter_clauses(parsed, session):
"""creates SqlAlchemy subqueries
that will be inner-joined to SQLRun to act as multi-clause filters."""
filters = []
for sql_statement in parsed:
filter_query = _to_sqlalchemy_filtering_statement(sql_statement, session)
if filter_query is not None:
filters.append(filter_query)
return filters
def _get_orderby_clauses(order_by_list, session):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
clauses = []
ordering_joins = []
clause_id = 0
# contrary to filters, it is not easily feasible to separately handle sorting
# on attributes and on joined tables as we must keep all clauses in the same order
if order_by_list:
for order_by_clause in order_by_list:
clause_id += 1
(key_type, key, ascending) = SearchUtils.parse_order_by_for_search_runs(order_by_clause)
if SearchUtils.is_attribute(key_type, '='):
order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
else:
if SearchUtils.is_metric(key_type, '='): # any valid comparator
entity = SqlLatestMetric
elif SearchUtils.is_tag(key_type, '='):
entity = SqlTag
elif SearchUtils.is_param(key_type, '='):
entity = SqlParam
else:
raise MlflowException("Invalid identifier type '%s'" % key_type,
error_code=INVALID_PARAMETER_VALUE)
# build a subquery first because we will join it in the main request so that the
# metric we want to sort on is available when we apply the sorting clause
subquery = session \
.query(entity) \
.filter(entity.key == key) \
.subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
# sqlite does not support NULLS LAST expression, so we sort first by
# presence of the field (and is_nan for metrics), then by actual value
# As the subqueries are created independently and used later in the
# same main query, the CASE WHEN columns need to have unique names to
# avoid ambiguity
if SearchUtils.is_metric(key_type, '='):
clauses.append(sql.case([
(subquery.c.is_nan.is_(True), 1),
(order_value.is_(None), 1)
], else_=0).label('clause_%s' % clause_id))
else: # other entities do not have an 'is_nan' field
clauses.append(sql.case([(order_value.is_(None), 1)], else_=0)
.label('clause_%s' % clause_id))
if ascending:
clauses.append(order_value)
else:
clauses.append(order_value.desc())
clauses.append(SqlRun.start_time.desc())
clauses.append(SqlRun.run_uuid)
return clauses, ordering_joins
| 48.671117
| 100
| 0.631318
|
794b79c0d26ace7db0ca083f80f6ecdf3959e123
| 146
|
py
|
Python
|
setup.py
|
Yongjie-Zhu/cpp_wrapper
|
233f934844e9e65602c5b3073c54873fc368794c
|
[
"RSA-MD"
] | null | null | null |
setup.py
|
Yongjie-Zhu/cpp_wrapper
|
233f934844e9e65602c5b3073c54873fc368794c
|
[
"RSA-MD"
] | null | null | null |
setup.py
|
Yongjie-Zhu/cpp_wrapper
|
233f934844e9e65602c5b3073c54873fc368794c
|
[
"RSA-MD"
] | null | null | null |
from setuptools import setup
setup(name='cpp_wrapper',
version='0.1',
packages=['cpp_wrapper'],
scripts=['create_project.py'])
| 20.857143
| 36
| 0.657534
|
794b79dba88c9147bfa4fae06d0ada385cc504f7
| 6,614
|
py
|
Python
|
pySSG.tab/Project Planning.Panel/Create Views.pulldown/Elevations.pushbutton/script_back.py
|
kbruxvoort/SSG.extension
|
ea0e0472e1c3940a63cab8c013228cba6425595a
|
[
"MIT"
] | 2
|
2021-09-29T10:49:16.000Z
|
2022-02-18T04:05:18.000Z
|
pySSG.tab/Project Planning.Panel/Create Views.pulldown/Elevations.pushbutton/script_back.py
|
kbruxvoort/SSG.extension
|
ea0e0472e1c3940a63cab8c013228cba6425595a
|
[
"MIT"
] | 1
|
2022-02-28T14:01:43.000Z
|
2022-02-28T14:01:43.000Z
|
pySSG.tab/Project Planning.Panel/Create Views.pulldown/Elevations.pushbutton/script_back.py
|
kbruxvoort/SSG.extension
|
ea0e0472e1c3940a63cab8c013228cba6425595a
|
[
"MIT"
] | null | null | null |
"""
This script creates interior elevations each room based on room boundary.
"""
#pylint: disable=import-error,invalid-name,broad-except
import clr
import math
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
from pyrevit import revit
from pyrevit import script
from pyrevit import forms
__title__ = " Interior Elevations"
__author__ = "{{author}}"
# forms.inform_wip()
def checkCurves(curve1, curve2):
v1 = curve1.Direction.Normalize()
v2 = curve2.Direction.Normalize()
if v1.IsAlmostEqualTo(v2):
newLine = Line.CreateBound(curve2.GetEndPoint(0), curve1.GetEndPoint(1))
return newLine
else:
return curve1
rooms, elems, names = [], [], []
col1 = FilteredElementCollector(revit.doc).OfCategory(BuiltInCategory.OST_Rooms).ToElements()
col2 = FilteredElementCollector(revit.doc).OfClass(ViewFamilyType).ToElements()
for v in col2:
if Element.Name.GetValue(v) == "SSG_Interior Elevation":
elems.append(v)
viewType = elems[0]
for room in col1:
if room.Area != 0:
rooms.append(room)
# names.append(DB.Element.Name.GetValue(room))
res = forms.SelectFromList.show(rooms,
multiselect=True,
name_attr= 'Number',
button_name='Select Rooms')
bOptions = SpatialElementBoundaryOptions()
heights = []
curveLists = []
for r in res:
heights.append(r.UnboundedHeight)
boundSegs = r.GetBoundarySegments(bOptions)
for bounds in boundSegs:
curveLists.append(bounds)
output = []
for cList in curveLists:
count = 0
newCurves = []
for c in cList:
curve = c.GetCurve()
if count == 0:
newCurves.append(curve)
else:
newCurve = checkCurves(curve, newCurves[len(newCurves)-1])
if newCurve == curve:
newCurves.append(curve)
else:
newCurves[len(newCurves)-1] = newCurve
count += 1
if newCurves[0].Direction.Normalize().IsAlmostEqualTo(newCurves[len(newCurves)-1].Direction.Normalize()):
newCurve = Line.ByStartPointEndPoint(newCurves[len(newCurves)-1].StartPoint, newCurves[0].EndPoint)
newCurves.pop()
newCurves[0] = newCurve
output.append(newCurves)
# print(len(newCurves))
# print("-----")
# print(output)
# print("-----")
curves = []
old_curves = []
for out in output:
# points = []
# angles = []
# curves = []
for n in out:
# print(str(n.GetEndPoint(0)) + " " + str(n.GetEndPoint(1)))
rev = n.CreateReversed()
# print(str(rev.GetEndPoint(0)) + " " + str(rev.GetEndPoint(1)))
rev_dir = rev.Direction
# print(rev_dir)
zAxis = XYZ(0,0,1)
cross = rev_dir.CrossProduct(zAxis)
# print(cross)
# print(rev)
# print(rev_dir)
# startPT = n.GetEndPoint(0)
# endPT = n.GetEndPoint(1)
# midPT = (startPT + endPT) / 2
# midPT = n.Evaluate(n.Length/2, False)
# cross = midPT.CrossProduct(rev_dir)
# cross = midPT.CrossProduct(n.Direction)
# newLine = n.CreateOffset(5, cross)
newLine = n.CreateTransformed(Transform.CreateTranslation(5*cross))
# newLineS = newLine.GetEndPoint(0)
# newLineE = newLine.GetEndPoint(1)
# newLineM = (newLineS + newLineE)/2
# combY = newLineM.Y-midPT.Y
# combX = newLineM.X-midPT.X
# ang = math.atan2(combY, combX)
# points.append(newLineM)
# angles.append(ang)
curves.append(newLine)
old_curves.append(n)
# print(newLine)
# print("---")
# print(str(newLine.GetEndPoint(0)) + " - " + str(newLine.GetEndPoint(1)))
# print(len(points))
# print(angles)
# print(curves)
views = []
with revit.Transaction("Create Elevations by Room"):
for curve, old_curve in zip(curves, old_curves):
startPT = curve.GetEndPoint(0)
endPT = curve.GetEndPoint(1)
midPT = (startPT + endPT)/2
old_startPT = old_curve.GetEndPoint(0)
old_endPT = old_curve.GetEndPoint(1)
old_midPT = (old_startPT + old_endPT)/2
combY = midPT.Y-old_midPT.Y
combX = midPT.X-old_midPT.X
ang = math.atan2(combY, combX)
rotPT = XYZ(midPT.X, midPT.Y, midPT.Z + 1)
ln = Line.CreateBound(midPT, rotPT)
eleMarker = ElevationMarker.CreateElevationMarker(revit.doc, viewType.Id, midPT, 100)
ele = eleMarker.CreateElevation(revit.doc, revit.doc.ActiveView.Id, 0)
ElementTransformUtils.RotateElement(revit.doc, eleMarker.Id, ln, ang)
wall_direct = old_curve.Direction
print(wall_direct)
view_direct = ele.ViewDirection
print(view_direct)
print(view_direct.AngleTo(wall_direct))
print("---")
# if math.degrees(ang) >= 170 and math.degrees(ang) <= 190:
# ElementTransformUtils.RotateElement(revit.doc, eleMarker.Id, ln, ang)
views.append(ele)
# print(math.degrees(ang))
'''
# for curveList, viewList, height in zip(curves, views, heights):
for height in heights:
for curve, view in zip(curves,views):
viewCropManager = view.GetCropRegionShapeManager()
cLoop = viewCropManager.GetCropShape()[0]
cLoopCurves = [x for x in cLoop]
PointA = cLoopCurves[3].GetEndPoint(1)
PointD = cLoopCurves[3].GetEndPoint(0)
curveStart = curve.GetEndPoint(1)
curveEnd = curve.GetEndPoint(0)
curveMP = (curveStart + curveEnd)/2
if curveMP.DistanceTo(curve.GetEndPoint(1)) < curveMP.DistanceTo(PointA):
PointA = curve.GetEndPoint(1)
if curveMP.DistanceTo(curve.GetEndPoint(0)) < curveMP.DistanceTo(PointD):
PointD = curve.GetEndPoint(0)
PointB = XYZ(PointA.X, PointA.Y, PointA.Z + height)
PointC = XYZ(PointD.X, PointD.Y, PointD.Z + height)
print(PointA)
print(PointB)
print(PointC)
print(PointD)
print("---")
# try:
LineA = Line.CreateBound(PointA,PointB)
LineB = Line.CreateBound(PointB,PointC)
LineC = Line.CreateBound(PointC,PointD)
LineD = Line.CreateBound(PointD,PointA)
# curveLoop = CurveLoop.Create([LineA, LineB, LineC, LineD])
# with revit.Transaction("Crop Elevations"):
# viewCropManager.SetCropShape(curveLoop)
# except:
# print("Problem creating lines")
'''
| 30.200913
| 109
| 0.612186
|
794b7a01dbc1fb17c314ec94755e276166a4a75b
| 7,557
|
py
|
Python
|
generator/game/game_state.py
|
andCelli/efg_tools
|
0de50f41e971d6ebb461ae4733843096af87e014
|
[
"MIT"
] | null | null | null |
generator/game/game_state.py
|
andCelli/efg_tools
|
0de50f41e971d6ebb461ae4733843096af87e014
|
[
"MIT"
] | null | null | null |
generator/game/game_state.py
|
andCelli/efg_tools
|
0de50f41e971d6ebb461ae4733843096af87e014
|
[
"MIT"
] | null | null | null |
from typing import Dict, List
from game.game_structures import Card, PlayerId, Team, Bid
class TurnInfo:
def __init__(self, first, current, cards, tricks_won: Dict[int, int]):
self.first_player_id = first
self.current_player_id = current
self.cards = cards
# map player ids with number of tricks won
self.tricks_won = tricks_won
def __str__(self):
s = "First player of the turn: Player" + str(self.first_player_id+1) + ", current player: Player" + str(self.current_player_id + 1) + "\nCards played: "
if len(self.cards) == 0:
s += "none"
else:
for card in self.cards:
s += card.short_string() + ", "
return s
"""
GameState is the class that contains the core gameplay logic, such as turn order,
performable actions and actions history.
Players and relative hands are passed as parameter because they were necessary
already for the bidding part.
"""
class GameState:
def __init__(self, n_players, teams: Dict[int, Team], hands: Dict[PlayerId, List[Card]], ranks: int, bid: Bid, bid_winner_id = 0):
self.ranks = ranks
# players is a map with player id as key
self.teams = teams
self.n_players = n_players
self.bid = bid
self.trump = bid.trump
self.hands = hands
# Sorting hands to avoid errors during exploration
for hand in hands.values():
hand.sort()
self.turn_info = []
tricks_won = {}
for p in range(self.n_players):
tricks_won[p] = 0
self.turn_info.append(TurnInfo(bid_winner_id, bid_winner_id, [], tricks_won))
# actions is a vector of (player_id, action_performed)
self.actions = []
self.declarer_id = bid_winner_id
self.dummy_id = -1
self.dummy_exists = False
for team in teams.values():
if self.declarer_id in team.members and len(team.members) == 2: # teams can only have 1 or 2 players
# declarer has a partner
self.dummy_id = team.get_other_member(self.declarer_id)
self.dummy_exists = True
def get_curr_turn_info(self) -> TurnInfo:
return self.turn_info[len(self.turn_info) - 1]
def get_current_player_id(self):
return self.get_curr_turn_info().current_player_id
"""
Returns True if the game is over, namely if there are no cards left in the
players' hands
"""
def is_game_over(self) -> bool:
game_over = True
for p in range(self.n_players):
game_over = game_over and (len(self.hands[p]) == 0)
return game_over
def push_action(self, card_played: Card):
# assert that the card is legal
assert card_played in self.available_actions()
turn = self.get_curr_turn_info()
self.actions.append( (turn.current_player_id, card_played) )
self.hands[turn.current_player_id].remove(card_played)
next_player = (turn.current_player_id+1)%self.n_players
# cards = updated cards-on-table vector
cards = turn.cards.copy()
cards.append(card_played)
if next_player == turn.first_player_id:
# turn ended
winner_id = self._find_winner_id(cards, turn.first_player_id)
new_tricks_won = turn.tricks_won.copy()
new_tricks_won[winner_id] += 1
# pushing a new turn info object on the stack
self.turn_info.append(TurnInfo(winner_id, winner_id, [], new_tricks_won))
else:
self.turn_info.append(TurnInfo(turn.first_player_id, next_player, cards, turn.tricks_won.copy()))
def pop_action(self):
if len(self.actions) == 0:
return 0
(popped_player_id, popped_card) = self.actions.pop()
prev_turn_info = self.turn_info.pop()
hand = self.hands[popped_player_id]
hand.append(popped_card)
hand.sort()
def _find_winner_id(self, cards: List[Card], first_player_id: int) -> int:
assert len(cards) == self.n_players, "the turn hasn't ended yet"
leader_suit = cards[0].suit
best_card_index = 0
for i in range(1, len(cards)):
if cards[i].compare_to(cards[best_card_index], leader_suit, self.trump) == 1:
best_card_index = i
return (first_player_id + best_card_index) % self.n_players
"""
Returns the list of cards that the current player can play.
If he's the leader, any card can be played. Same if he doesn't have any card that can
follow the leading suit.
If he has some cards that follow the leader, he must play one of those.
"""
def available_actions(self) -> List[Card]:
turn = self.get_curr_turn_info()
leader_suit = turn.cards[0].suit if len(turn.cards) != 0 else None
if leader_suit is None:
return self.hands[turn.current_player_id]
else:
# cards that have the same suit as the leader
following_cards = list(filter(lambda c: c.suit == leader_suit, self.hands[turn.current_player_id]))
if len(following_cards) == 0:
# no cards can follow, so play any card
return self.hands[turn.current_player_id]
else:
# must follow the leading suit
return following_cards
"""
Generates a compact string that associates the current player with his hand and the past actions,
in chronological order (ie the first card printed is the first card he played and so on).
"""
def gen_infoset_name(self) -> str:
s = ""
if not self.dummy_exists:
pid = self.get_current_player_id()
s += "P" + str(pid+1) + "-"
if len(self.hands[pid]) == 0:
s += "/"
else:
for c in self.hands[pid]:
s += "/" + c.short_string()
s += "-"
if len(self.actions) == 0:
s += "/"
else:
for (id, action) in self.actions:
# print the action
s += "/P" + str(id+1) + ":" + action.short_string()
else:
pid = self.get_current_player_id()
dummy_hand = self.hands[self.dummy_id]
if pid == self.dummy_id:
pid = self.declarer_id
s += "P" + str(pid+1) + "-"
if len(self.hands[pid]) == 0:
s += "/"
else:
for c in self.hands[pid]:
s += "/" + c.short_string()
s += "-"
if len(dummy_hand) == 0:
s += "/"
else:
for c in dummy_hand:
s += "/" + c.short_string()
s += "-"
if len(self.actions) == 0:
s += "/"
else:
for (id, action) in self.actions:
# print the action
s += "/P" + str(self.fix_id(id) + 1) + ":" + action.short_string()
return s
"""
Change the dummy id into the declearer one, if necessary
"""
def fix_id(self, pid: int) -> int:
if self.dummy_exists and pid == self.dummy_id:
return self.declarer_id
return pid
"""
Debugging function, prints the whole array of actions
"""
def print_actions(self):
for (a,b) in self.actions:
print("Player%d played %s" % (a+1, b), end=", ")
print("")
| 35.646226
| 160
| 0.573773
|
794b7a9716e99fd99271a25699cf5b1026284a00
| 1,105
|
py
|
Python
|
vendor/packages/translate-toolkit/translate/filters/test_prefilters.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/translate-toolkit/translate/filters/test_prefilters.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-13T20:55:07.000Z
|
2021-12-13T20:55:07.000Z
|
vendor/packages/translate-toolkit/translate/filters/test_prefilters.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tests decoration handling functions that are used by checks"""
from translate.filters import prefilters
def test_removekdecomments():
assert prefilters.removekdecomments(u"Some sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\nSome sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\n") == u""
def test_filterwordswithpunctuation():
string = u"Nothing in here."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == string
# test listed words (start / end with apostrophe)
string = u"'n Boom het 'n tak."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "n Boom het n tak."
# test words containing apostrophe
string = u"It's in it's own place."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "Its in its own place."
# test strings in unicode
string = u"Iṱ'š"
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == u"Iṱš"
| 35.645161
| 88
| 0.713122
|
794b7b151ab61c39dda3bdc4d4b7bc01a05d5826
| 607
|
py
|
Python
|
dbas/views/discussion/tests/test_attitude.py
|
tbsschroeder/dbas
|
9c86eccde65cd64bc5719573b3b8449d8f333e08
|
[
"MIT"
] | 23
|
2017-05-18T13:33:51.000Z
|
2021-12-26T18:04:09.000Z
|
dbas/views/discussion/tests/test_attitude.py
|
tbsschroeder/dbas
|
9c86eccde65cd64bc5719573b3b8449d8f333e08
|
[
"MIT"
] | 8
|
2019-12-26T17:19:44.000Z
|
2020-05-28T15:38:31.000Z
|
dbas/views/discussion/tests/test_attitude.py
|
tbsschroeder/dbas
|
9c86eccde65cd64bc5719573b3b8449d8f333e08
|
[
"MIT"
] | 7
|
2017-09-27T11:15:42.000Z
|
2021-12-26T18:12:38.000Z
|
import unittest
from pyramid import testing
from dbas.helper.test import verify_dictionary_of_view
from dbas.tests.utils import construct_dummy_request
from dbas.views.discussion.rendered import attitude
class DiscussionAttitudeViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.config.include('pyramid_chameleon')
def test_page(self):
request = construct_dummy_request(matchdict={
'slug': 'cat-or-dog',
'statement_id': 2,
})
response = attitude(request)
verify_dictionary_of_view(response)
| 27.590909
| 54
| 0.710049
|
794b7b1f4ba949bdf4a286a838c5345c0d12a81f
| 5,118
|
py
|
Python
|
src/tela/tela_organizador.py
|
Franco904/sistema-eventos
|
4461fb79d21bf17d3f740cd82bb2e2f2fff870ae
|
[
"MIT"
] | null | null | null |
src/tela/tela_organizador.py
|
Franco904/sistema-eventos
|
4461fb79d21bf17d3f740cd82bb2e2f2fff870ae
|
[
"MIT"
] | 2
|
2022-02-02T21:31:43.000Z
|
2022-03-05T22:54:10.000Z
|
src/tela/tela_organizador.py
|
Franco904/sistema-eventos
|
4461fb79d21bf17d3f740cd82bb2e2f2fff870ae
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
class TelaOrganizador:
def __init__(self):
self.__window = None
def tela_opcoes(self):
opcao = -1
while opcao == -1:
self.inicializar_opcoes()
button, values = self.__window.read()
if values['0'] or button is None:
opcao = 0
break
for i, key in enumerate(values, 1):
if values[key]:
opcao = i
self.__window.close()
self.__window.close()
return opcao
def inicializar_opcoes(self):
sg.ChangeLookAndFeel('DarkTeal4')
layout = [
[sg.Text('Organizadores', font=('Arial', 16), justification='center')],
[sg.Text('Escolha uma opção abaixo:')],
[sg.Radio('Adicionar organizador', 'RB', key='1')],
[sg.Radio('Excluir organizador', 'RB', key='2')],
[sg.Radio('Alterar organizador', 'RB', key='3')],
[sg.Radio('Mostrar organizador', 'RB', key='4')],
[sg.Radio('Listar organizadores', 'RB', key='5')],
[sg.Radio('Retornar', 'RB', key='0')],
[sg.Button('Confirmar')]
]
self.__window = sg.Window('Sistema de Eventos', layout)
def pegar_dados_organizador(self, editando: bool):
self.inicializar_pegar_dados(editando)
button, values = self.__window.read()
if button == 'Confirmar':
self.__window.close()
if editando:
values['cpf'] = -1
cpf = values['cpf']
nome = values['nome']
dia_nascimento = int(values['dia_nascimento'])
mes_nascimento = int(values['mes_nascimento'])
ano_nascimento = int(values['ano_nascimento'])
return {'cpf': cpf, 'nome': nome, 'dia_nascimento': dia_nascimento, 'mes_nascimento': mes_nascimento,
'ano_nascimento': ano_nascimento}
self.__window.close()
return None
def inicializar_pegar_dados(self, editando: bool):
sg.ChangeLookAndFeel('DarkTeal4')
if not editando:
column = [
[sg.Text('Cadastrar Organizador', font=('Arial', 14))],
[sg.Text('CPF:'), sg.InputText(size=(11, 1), key='cpf')]
]
else:
column = [[sg.Text('Alterar Organizador', font=('Arial', 14))]]
layout = [
[sg.Column(column, pad=0)],
[sg.Text('Nome:'), sg.InputText(size=(24, 1), key='nome')],
[sg.Text('Dia de nascimento:'), sg.InputText(size=(2, 1), key='dia_nascimento')],
[sg.Text('Mês de nascimento:'), sg.InputText(size=(2, 1), key='mes_nascimento')],
[sg.Text('Ano de nascimento:'), sg.InputText(size=(4, 4), key='ano_nascimento')],
[sg.Button('Confirmar'), sg.Cancel('Cancelar')]
]
self.__window = sg.Window('Sistema de Eventos', layout)
def mostrar_organizador(self, dados_organizador):
self.inicializar_mostrar_organizador(dados_organizador)
button, values = self.__window.read()
if button in [None, 'OK']:
self.__window.close()
def inicializar_mostrar_organizador(self, dados_organizador):
sg.ChangeLookAndFeel('DarkTeal4')
layout = [
[sg.Text('Dados do Organizador', font=('Arial', 14))],
[sg.Text('CPF:'), sg.Text(dados_organizador['cpf'])],
[sg.Text('Nome:'), sg.Text(dados_organizador['nome'])],
[sg.Text('Data de nascimento:'), sg.Text(dados_organizador['data_nascimento'].strftime('%d/%m/%Y'))],
[sg.Cancel('OK')]
]
self.__window = sg.Window('Sistema de Eventos', layout)
def selecionar_organizador(self, dados_organizadores: dict):
self.inicializar_selecionar_organizador(dados_organizadores)
button, values = self.__window.read()
if button == 'Confirmar':
self.__window.close()
if values['cpf'] == '':
self.mostrar_mensagem('Nenhuma opção selecionada para mostrar.')
return None
cpf_organizador = values['cpf'].split()[-1]
return cpf_organizador
self.__window.close()
return None
def inicializar_selecionar_organizador(self, dados_organizadores: dict):
sg.ChangeLookAndFeel('DarkTeal4')
organizadores_labels = []
for contador in range(len(dados_organizadores["cpfs"])):
organizadores_labels.append(
f'{dados_organizadores["nomes"][contador]} - CPF: {dados_organizadores["cpfs"][contador]}')
organizadores_labels.sort()
layout = [
[sg.Text('Selecionar Organizador', font=('Arial', 14))],
[sg.Text('Organizador:', size=(12, 1)),
sg.Combo(organizadores_labels, readonly=True, size=(40, 1), key='cpf')],
[sg.Button('Confirmar'), sg.Cancel('Cancelar')]
]
self.__window = sg.Window('Sistema de Eventos', layout)
@staticmethod
def mostrar_mensagem(msg):
sg.Popup(msg)
| 34.816327
| 113
| 0.565064
|
794b7b1ff5dd6afa72ea61d89cf8c5368e27c26f
| 2,819
|
py
|
Python
|
clevrer_dev/clevrer/run_net.py
|
gabrielsluz/SlowFast
|
bd06eac47fa236b070fd9a3b39518eea08d02947
|
[
"Apache-2.0"
] | null | null | null |
clevrer_dev/clevrer/run_net.py
|
gabrielsluz/SlowFast
|
bd06eac47fa236b070fd9a3b39518eea08d02947
|
[
"Apache-2.0"
] | null | null | null |
clevrer_dev/clevrer/run_net.py
|
gabrielsluz/SlowFast
|
bd06eac47fa236b070fd9a3b39518eea08d02947
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Wrapper to train and test the CLEVRER model.
Example:
----Clevrer dataset-----
python3 clevrer_dev/clevrer/run_net.py \
--cfg clevrer_dev/clevrer/clevrer.yaml \
DATA.PATH_TO_DATA_DIR /datasets/clevrer_dummy \
DATA.PATH_PREFIX /datasets/clevrer_dummy \
MONET.CHECKPOINT_LOAD /datasets/checkpoint_epoch_00020.pyth \
NUM_GPUS 0 \
LOG_PERIOD 1 \
TRAIN.BATCH_SIZE 1 \
TRAIN.EVAL_PERIOD 1 \
TRAIN.CHECKPOINT_PERIOD 1 \
SOLVER.MAX_EPOCH 1
python3 clevrer_dev/clevrer/run_net.py \
--cfg clevrer_dev/clevrer/clevrer.yaml \
DATA.PATH_TO_DATA_DIR /datasets/clevrer \
DATA.PATH_PREFIX /datasets/clevrer \
MONET.CHECKPOINT_LOAD ./monet_checkpoints/checkpoint_epoch_00140.pyth \
DATA.NUM_FRAMES 25 \
DATA.SAMPLING_RATE 5 \
CLEVRERMAIN.T_HID_DIM 1024 \
NUM_GPUS 1 \
LOG_PERIOD 25 \
TRAIN.BATCH_SIZE 4 \
TRAIN.EVAL_PERIOD 1 \
TRAIN.CHECKPOINT_PERIOD 1 \
SOLVER.MAX_EPOCH 24
----Slot Clevrer-----
python3 clevrer_dev/clevrer/run_net.py \
--cfg clevrer_dev/clevrer/slot_clevrer.yaml \
DATA.PATH_TO_DATA_DIR /datasets/slot_dataset \
DATA.PATH_PREFIX /datasets/slot_dataset \
MONET.CHECKPOINT_LOAD /datasets/checkpoint_epoch_00020.pyth \
NUM_GPUS 0 \
LOG_PERIOD 1 \
TRAIN.BATCH_SIZE 1 \
TRAIN.EVAL_PERIOD 1 \
TRAIN.CHECKPOINT_PERIOD 1 \
SOLVER.MAX_EPOCH 1
python3 clevrer_dev/clevrer/run_net.py \
--cfg clevrer_dev/clevrer/slot_clevrer.yaml \
DATA.PATH_TO_DATA_DIR /datasets/slot_dataset \
DATA.PATH_PREFIX /datasets/slot_dataset \
MONET.CHECKPOINT_LOAD ./monet_checkpoints/checkpoint_epoch_00180.pyth \
DATA.NUM_FRAMES 25 \
DATA.SAMPLING_RATE 5 \
NUM_GPUS 1 \
LOG_PERIOD 100 \
TRAIN.BATCH_SIZE 32 \
TRAIN.EVAL_PERIOD 5 \
TRAIN.CHECKPOINT_PERIOD 10 \
SOLVER.MAX_EPOCH 100
"""
from slowfast.utils.misc import launch_job
from slowfast.utils.parser import load_config, parse_args
#from demo_net import demo
# from test_net import test
from train_net import train
# from visualization import visualize
def main():
"""
Main function to spawn the train and test process.
"""
args = parse_args()
cfg = load_config(args)
# Perform training.
if cfg.TRAIN.ENABLE:
launch_job(cfg=cfg, init_method=args.init_method, func=train)
# Perform multi-clip testing.
# if cfg.TEST.ENABLE:
# launch_job(cfg=cfg, init_method=args.init_method, func=test)
# Perform model visualization.
# if cfg.TENSORBOARD.ENABLE and (
# cfg.TENSORBOARD.MODEL_VIS.ENABLE
# or cfg.TENSORBOARD.WRONG_PRED_VIS.ENABLE
# ):
# launch_job(cfg=cfg, init_method=args.init_method, func=visualize)
# Run demo.
# if cfg.DEMO.ENABLE:
# demo(cfg)
if __name__ == "__main__":
main()
| 28.19
| 75
| 0.732175
|
794b7d06ed14ac6629aec3420d87ccc30d375c54
| 145
|
py
|
Python
|
atcoder/abc176/c.py
|
sugitanishi/competitive-programming
|
51af65fdce514ece12f8afbf142b809d63eefb5d
|
[
"MIT"
] | null | null | null |
atcoder/abc176/c.py
|
sugitanishi/competitive-programming
|
51af65fdce514ece12f8afbf142b809d63eefb5d
|
[
"MIT"
] | null | null | null |
atcoder/abc176/c.py
|
sugitanishi/competitive-programming
|
51af65fdce514ece12f8afbf142b809d63eefb5d
|
[
"MIT"
] | null | null | null |
ans=0
n=int(input())
d=list(map(int,input().split()))
mx=0
for i in range(len(d)):
if mx<=d[i]:
mx=d[i]
else:
ans+=mx-d[i]
print(ans)
| 14.5
| 32
| 0.558621
|
794b7d26a7995925acdc721dbda7217062e8a79c
| 574
|
py
|
Python
|
mapbackend/settings_local_example.py
|
copoutapp/copout-back
|
f02a0a9fc4299a204185af15880fdee0e1ce40d2
|
[
"MIT"
] | null | null | null |
mapbackend/settings_local_example.py
|
copoutapp/copout-back
|
f02a0a9fc4299a204185af15880fdee0e1ce40d2
|
[
"MIT"
] | 3
|
2021-04-08T21:34:53.000Z
|
2021-09-22T19:31:20.000Z
|
mapbackend/settings_local_example.py
|
copoutapp/copout-back
|
f02a0a9fc4299a204185af15880fdee0e1ce40d2
|
[
"MIT"
] | null | null | null |
"""
local settings example
"""
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<YOUR SECRET KEY HERE>'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': '<YOUR DB NAME HERE>',
'USER': '<YOUR USERNAME HERE>',
'PASSWORD': '<YOUR PASSWORD HERE>',
'HOST': '<YOUR DB HOST HERE>',
'PORT': '5432',
}
}
ALLOWED_HOSTS = [
'www.copout.app',
'localhost',
'127.0.0.1',
]
| 22.96
| 66
| 0.592334
|
794b7deb2a8b06b992ccd97beb0127e03d020c7d
| 5,173
|
py
|
Python
|
examples/trials/mnist-keras/mnist-keras.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 9,680
|
2019-05-07T01:42:30.000Z
|
2022-03-31T16:48:33.000Z
|
examples/trials/mnist-keras/mnist-keras.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,957
|
2019-05-06T21:44:21.000Z
|
2022-03-31T09:21:53.000Z
|
examples/trials/mnist-keras/mnist-keras.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,571
|
2019-05-07T06:42:55.000Z
|
2022-03-31T03:19:24.000Z
|
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
import os
import keras
import numpy as np
from keras import backend as K
from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
import nni
LOG = logging.getLogger('mnist_keras')
K.set_image_data_format('channels_last')
TENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR']
H, W = 28, 28
NUM_CLASSES = 10
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model
def load_mnist_data(args):
'''
Load MNIST dataset
'''
mnist_path = os.path.join(os.environ.get('NNI_OUTPUT_DIR'), 'mnist.npz')
(x_train, y_train), (x_test, y_test) = mnist.load_data(path=mnist_path)
os.remove(mnist_path)
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
LOG.debug('x_train shape: %s', (x_train.shape,))
LOG.debug('x_test shape: %s', (x_test.shape,))
return x_train, y_train, x_test, y_test
class SendMetrics(keras.callbacks.Callback):
'''
Keras callback to send metrics to NNI framework
'''
def on_epoch_end(self, epoch, logs={}):
'''
Run on end of each epoch
'''
LOG.debug(logs)
# TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy`
if 'val_acc' in logs:
nni.report_intermediate_result(logs['val_acc'])
else:
nni.report_intermediate_result(logs['val_accuracy'])
def train(args, params):
'''
Train model
'''
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])
_, acc = model.evaluate(x_test, y_test, verbose=0)
LOG.debug('Final result is: %d', acc)
nni.report_final_result(acc)
def generate_default_params():
'''
Generate default hyper parameters
'''
return {
'optimizer': 'Adam',
'learning_rate': 0.001
}
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=60000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=10000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = generate_default_params()
PARAMS.update(RECEIVED_PARAMS)
# train
train(ARGS, PARAMS)
except Exception as e:
LOG.exception(e)
raise
| 37.485507
| 137
| 0.7023
|
794b7e1a1f53a39f9268dc6f1c7067cabd55733c
| 2,267
|
py
|
Python
|
code_block_timer/storage.py
|
cclauss/code_block_timer
|
2c94490013bf072ae0fab8a5a48bd07d9c80b14f
|
[
"Apache-2.0"
] | 1
|
2020-04-14T05:36:06.000Z
|
2020-04-14T05:36:06.000Z
|
code_block_timer/storage.py
|
cclauss/code_block_timer
|
2c94490013bf072ae0fab8a5a48bd07d9c80b14f
|
[
"Apache-2.0"
] | 4
|
2015-02-13T01:21:28.000Z
|
2015-02-15T18:45:36.000Z
|
code_block_timer/storage.py
|
cclauss/code_block_timer
|
2c94490013bf072ae0fab8a5a48bd07d9c80b14f
|
[
"Apache-2.0"
] | 2
|
2015-02-13T03:20:29.000Z
|
2020-01-02T12:33:32.000Z
|
# Copyright 2014 Julia Eskew
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sqlite3
MODULE_DIR = os.path.dirname(__file__)
class TimingDataStorage(object):
SCHEMA_NAME = 'schema.sql'
SCHEMA_PATH = '%s/%s' % (MODULE_DIR, SCHEMA_NAME)
DEFAULT_DB_NAME = 'block_times.db'
def __init__(self, **kwargs):
# Verify that the sqlite DB and schema exists.
db_name = self.DEFAULT_DB_NAME
if 'db_name' in kwargs:
db_name = kwargs['db_name']
if not os.path.exists(db_name):
self._createDB(db_name)
self.conn = sqlite3.connect(db_name)
def _createDB(self, db_name):
# Create the sqlite DB file.
with open(db_name, "w") as f:
conn = sqlite3.connect(db_name)
with open(self.SCHEMA_PATH, "r") as schema_file:
schema = schema_file.read()
cur = conn.cursor()
conn.executescript(schema)
conn.commit()
def run_id(self, desc=""):
"""
Creates a new run ID and returns it.
A single run ID can be used for multiple store()s for times in the same test.
"""
cur = self.conn.cursor()
cur.execute('insert into test_run (run_desc) values (?)', (desc,))
self.conn.commit()
cur.execute('select max(id) from test_run')
return cur.fetchone()[0]
def store(self, run_id, desc, elapsed):
"""
Store the description and elapsed time in the DB, under the passed-in run_id.
"""
cur = self.conn.cursor()
cur.execute(
'insert into block_times (run_id, block_desc, elapsed)'
'values (?, ?, ?)', (run_id, desc, elapsed)
)
self.conn.commit()
| 32.855072
| 85
| 0.625055
|
794b7e7f697f7cb51d02dfed4007f6de9d8c40ff
| 3,908
|
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210714124523.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210714124523.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210714124523.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
"""Decorators
Recall the simple closure example we did which allowed us to maintain a count of ho9w many times a function was called:
def counter(fn):
count = 0
def inner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments
nonlocal count
count += 1
print('Function {0} was called {1} times'.format(fn.__name__, count))
return fn(*args, **kwargs)
return inner
def add(a, b=0):
return a + b
add = counter(add)
result = add(1, 2) # Function add was called 1 times
# result = 3
print(result)
I essentially modified our add function by wrapping it inside another function that added some functionally to it
I can also say that we decorated ourfunction add with the function counter
And I call counter a decorator function
In general a decorator function:
takes a function as an argument
returns a closure
the closure usually accepts any combination of parameters
runs some code in the inner function(closure)
the closure function calls the original function using the arguments passed to the closure
returns whatever is returned by that function call
Decorators and the @ symbool
In our previous example, we saw that the counter was a decorator and we could decorate our add function using: add = counter(add)
In general, if func is a decorator function, we decorate another function my_func using:
my_func = func(my_func)
This is so common that Python provides a convenient way of writing that:
@counter (is the sameas writing) @func
def add(a, b): def my_func(...):
return a + b ...
is the same as writing is the same as writing
def add(a, b): def my_func(...):
return a + b ...
add = counter(add) my_func = func(my_func)
Introspecting Decorated Functions
Let's use the same count decorator def counter(fn):
count = 0
def inner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments
nonlocal count
count += 1
print('Function {0} was called {1} times'.format(fn.__name__, count))
return fn(*args, **kwargs)
return inner
"""
# @counter # if not commented out, python shows it is not defined
from itertools import count
def mult(a, b, c=1):
# returns the product of three values I could have written:
return a * b* c # mult = counter (the same thing as @counter)
mult.__name__ # mult is now inner # The dunder 'name' property
help(mult) # Help on function inner in module __main__:
# inner(*args, kwargs)
# we have lost our docstring, and even the original function signature
# even using the inspect module's signature does not yield better results
""" One approach to fixing this
We can try to fix this problem, at least for the docstring and function name as follows:
"""
def counter(fn):
count = 0
def inner(*args, **kwargs):
nonlocal count
count += 1
print*'unction {0} was called {1} times',.format(fn._)
| 38.313725
| 205
| 0.554759
|
794b7f256f4fc36a8e9b03ebbc8dd3752c2ce6bf
| 36,001
|
py
|
Python
|
tests/api_connexion/endpoints/test_dag_run_endpoint.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79
|
2021-10-15T07:32:27.000Z
|
2022-03-28T04:10:19.000Z
|
tests/api_connexion/endpoints/test_dag_run_endpoint.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153
|
2021-10-15T05:23:46.000Z
|
2022-02-23T06:07:10.000Z
|
tests/api_connexion/endpoints/test_dag_run_endpoint.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23
|
2021-10-15T02:36:37.000Z
|
2022-03-17T02:59:27.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
from parameterized import parameterized
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models import DagModel, DagRun
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.session import create_session, provide_session
from airflow.utils.types import DagRunType
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs
class TestDagRunEndpoint(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
],
)
create_user(
cls.app, # type: ignore
username="test_granular_permissions",
role_name="TestGranularDag",
permissions=[(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN)],
)
cls.app.appbuilder.sm.sync_perm_for_dag( # type: ignore # pylint: disable=no-member
"TEST_DAG_ID",
access_control={'TestGranularDag': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_granular_permissions") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
def setUp(self) -> None:
self.client = self.app.test_client() # type:ignore
self.default_time = "2020-06-11T18:00:00+00:00"
self.default_time_2 = "2020-06-12T18:00:00+00:00"
clear_db_runs()
clear_db_dags()
def tearDown(self) -> None:
clear_db_runs()
# clear_db_dags()
def _create_test_dag_run(self, state='running', extra_dag=False, commit=True):
dag_runs = []
dags = [DagModel(dag_id="TEST_DAG_ID")]
dagrun_model_1 = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID_1",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
external_trigger=True,
state=state,
)
dag_runs.append(dagrun_model_1)
dagrun_model_2 = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID_2",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time_2),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
dag_runs.append(dagrun_model_2)
if extra_dag:
for i in range(3, 5):
dags.append(DagModel(dag_id='TEST_DAG_ID_' + str(i)))
dag_runs.append(
DagRun(
dag_id='TEST_DAG_ID_' + str(i),
run_id='TEST_DAG_RUN_ID_' + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time_2),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
)
if commit:
with create_session() as session:
session.add_all(dag_runs)
session.add_all(dags)
return dag_runs
class TestDeleteDagRun(TestDagRunEndpoint):
@provide_session
def test_should_respond_204(self, session):
session.add_all(self._create_test_dag_run())
session.commit()
response = self.client.delete(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1", environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 204)
# Check if the Dag Run is deleted from the database
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1", environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 404)
def test_should_respond_404(self):
response = self.client.delete(
"api/v1/dags/INVALID_DAG_RUN/dagRuns/INVALID_DAG_RUN", environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.json,
{
"detail": "DAGRun with DAG ID: 'INVALID_DAG_RUN' and DagRun ID: 'INVALID_DAG_RUN' not found",
"status": 404,
"title": "Not Found",
"type": EXCEPTIONS_LINK_MAP[404],
},
)
@provide_session
def test_should_raises_401_unauthenticated(self, session):
session.add_all(self._create_test_dag_run())
session.commit()
response = self.client.delete(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1",
)
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID",
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
class TestGetDagRun(TestDagRunEndpoint):
@provide_session
def test_should_respond_200(self, session):
dagrun_model = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
session.add(dagrun_model)
session.commit()
result = session.query(DagRun).all()
assert len(result) == 1
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected_response = {
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
}
assert response.json == expected_response
def test_should_respond_404(self):
response = self.client.get(
"api/v1/dags/invalid-id/dagRuns/invalid-id", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
expected_resp = {
'detail': "DAGRun with DAG ID: 'invalid-id' and DagRun ID: 'invalid-id' not found",
'status': 404,
'title': 'DAGRun not found',
'type': EXCEPTIONS_LINK_MAP[404],
}
assert expected_resp == response.json
@provide_session
def test_should_raises_401_unauthenticated(self, session):
dagrun_model = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
session.add(dagrun_model)
session.commit()
response = self.client.get("api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID")
assert_401(response)
class TestGetDagRuns(TestDagRunEndpoint):
@provide_session
def test_should_respond_200(self, session):
self._create_test_dag_run()
result = session.query(DagRun).all()
assert len(result) == 2
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == {
"dag_runs": [
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_1',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_2',
'end_date': None,
'state': 'running',
'execution_date': self.default_time_2,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
],
"total_entries": 2,
}
def test_should_return_all_with_tilde_as_dag_id_and_all_dag_permissions(self):
self._create_test_dag_run(extra_dag=True)
expected_dag_run_ids = ['TEST_DAG_ID', 'TEST_DAG_ID', "TEST_DAG_ID_3", "TEST_DAG_ID_4"]
response = self.client.get("api/v1/dags/~/dagRuns", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_run_ids = [dag_run["dag_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_return_accessible_with_tilde_as_dag_id_and_dag_level_permissions(self):
self._create_test_dag_run(extra_dag=True)
expected_dag_run_ids = ['TEST_DAG_ID', 'TEST_DAG_ID']
response = self.client.get(
"api/v1/dags/~/dagRuns", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
dag_run_ids = [dag_run["dag_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_raises_401_unauthenticated(self):
self._create_test_dag_run()
response = self.client.get("api/v1/dags/TEST_DAG_ID/dagRuns")
assert_401(response)
class TestGetDagRunsPagination(TestDagRunEndpoint):
@parameterized.expand(
[
("api/v1/dags/TEST_DAG_ID/dagRuns?limit=1", ["TEST_DAG_RUN_ID1"]),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?limit=2",
["TEST_DAG_RUN_ID1", "TEST_DAG_RUN_ID2"],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?offset=5",
[
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?offset=0",
[
"TEST_DAG_RUN_ID1",
"TEST_DAG_RUN_ID2",
"TEST_DAG_RUN_ID3",
"TEST_DAG_RUN_ID4",
"TEST_DAG_RUN_ID5",
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
("api/v1/dags/TEST_DAG_ID/dagRuns?limit=1&offset=5", ["TEST_DAG_RUN_ID6"]),
("api/v1/dags/TEST_DAG_ID/dagRuns?limit=1&offset=1", ["TEST_DAG_RUN_ID2"]),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?limit=2&offset=2",
["TEST_DAG_RUN_ID3", "TEST_DAG_RUN_ID4"],
),
]
)
def test_handle_limit_and_offset(self, url, expected_dag_run_ids):
self._create_dag_runs(10)
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 10
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_respect_page_size_limit(self):
self._create_dag_runs(200)
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == 200
assert len(response.json["dag_runs"]) == 100 # default is 100
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self):
self._create_dag_runs(200)
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns?limit=180", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
self.assertEqual(len(response.json["dag_runs"]), 150)
def _create_dag_runs(self, count):
dag_runs = [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time) + timedelta(minutes=i),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
for i in range(1, count + 1)
]
dag = DagModel(dag_id="TEST_DAG_ID")
with create_session() as session:
session.add_all(dag_runs)
session.add(dag)
class TestGetDagRunsPaginationFilters(TestDagRunEndpoint):
@parameterized.expand(
[
(
"api/v1/dags/TEST_DAG_ID/dagRuns?start_date_gte=2020-06-18T18:00:00+00:00",
["TEST_START_EXEC_DAY_18", "TEST_START_EXEC_DAY_19"],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?start_date_lte=2020-06-11T18:00:00+00:00",
["TEST_START_EXEC_DAY_10", "TEST_START_EXEC_DAY_11"],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?start_date_lte= 2020-06-15T18:00:00+00:00"
"&start_date_gte=2020-06-12T18:00:00Z",
[
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
"TEST_START_EXEC_DAY_14",
"TEST_START_EXEC_DAY_15",
],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?execution_date_lte=2020-06-13T18:00:00+00:00",
[
"TEST_START_EXEC_DAY_10",
"TEST_START_EXEC_DAY_11",
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?execution_date_gte=2020-06-16T18:00:00+00:00",
[
"TEST_START_EXEC_DAY_16",
"TEST_START_EXEC_DAY_17",
"TEST_START_EXEC_DAY_18",
"TEST_START_EXEC_DAY_19",
],
),
]
)
@provide_session
def test_date_filters_gte_and_lte(self, url, expected_dag_run_ids, session):
dagrun_models = self._create_dag_runs()
session.add_all(dagrun_models)
session.commit()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def _create_dag_runs(self):
dates = [
"2020-06-10T18:00:00+00:00",
"2020-06-11T18:00:00+00:00",
"2020-06-12T18:00:00+00:00",
"2020-06-13T18:00:00+00:00",
"2020-06-14T18:00:00+00:00",
"2020-06-15T18:00:00Z",
"2020-06-16T18:00:00Z",
"2020-06-17T18:00:00Z",
"2020-06-18T18:00:00Z",
"2020-06-19T18:00:00Z",
]
return [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_START_EXEC_DAY_1" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(dates[i]),
start_date=timezone.parse(dates[i]),
external_trigger=True,
state="success",
)
for i in range(len(dates))
]
class TestGetDagRunsEndDateFilters(TestDagRunEndpoint):
@parameterized.expand(
[
(
f"api/v1/dags/TEST_DAG_ID/dagRuns?end_date_gte="
f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}",
[],
),
(
f"api/v1/dags/TEST_DAG_ID/dagRuns?end_date_lte="
f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}",
["TEST_DAG_RUN_ID_1"],
),
]
)
def test_end_date_gte_lte(self, url, expected_dag_run_ids):
self._create_test_dag_run('success') # state==success, then end date is today
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"] if dag_run]
assert dag_run_ids == expected_dag_run_ids
class TestGetDagRunBatch(TestDagRunEndpoint):
def test_should_respond_200(self):
self._create_test_dag_run()
response = self.client.post(
"api/v1/dags/~/dagRuns/list",
json={"dag_ids": ["TEST_DAG_ID"]},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
assert response.json == {
"dag_runs": [
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_1',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_2',
'end_date': None,
'state': 'running',
'execution_date': self.default_time_2,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
],
"total_entries": 2,
}
def test_should_return_accessible_with_tilde_as_dag_id_and_dag_level_permissions(self):
self._create_test_dag_run(extra_dag=True)
response = self.client.post(
"api/v1/dags/~/dagRuns/list",
json={"dag_ids": []},
environ_overrides={'REMOTE_USER': "test_granular_permissions"},
)
assert response.status_code == 200
assert response.json == {
"dag_runs": [
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_1',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_2',
'end_date': None,
'state': 'running',
'execution_date': self.default_time_2,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
],
"total_entries": 2,
}
@parameterized.expand(
[
(
{"dag_ids": ["TEST_DAG_ID"], "page_offset": -1},
"-1 is less than the minimum of 0 - 'page_offset'",
),
({"dag_ids": ["TEST_DAG_ID"], "page_limit": 0}, "0 is less than the minimum of 1 - 'page_limit'"),
({"dag_ids": "TEST_DAG_ID"}, "'TEST_DAG_ID' is not of type 'array' - 'dag_ids'"),
({"start_date_gte": "2020-06-12T18"}, "{'start_date_gte': ['Not a valid datetime.']}"),
]
)
def test_payload_validation(self, payload, error):
self._create_test_dag_run()
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
assert error == response.json.get("detail")
def test_should_raises_401_unauthenticated(self):
self._create_test_dag_run()
response = self.client.post("api/v1/dags/~/dagRuns/list", json={"dag_ids": ["TEST_DAG_ID"]})
assert_401(response)
class TestGetDagRunBatchPagination(TestDagRunEndpoint):
@parameterized.expand(
[
({"page_limit": 1}, ["TEST_DAG_RUN_ID1"]),
({"page_limit": 2}, ["TEST_DAG_RUN_ID1", "TEST_DAG_RUN_ID2"]),
(
{"page_offset": 5},
[
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
(
{"page_offset": 0},
[
"TEST_DAG_RUN_ID1",
"TEST_DAG_RUN_ID2",
"TEST_DAG_RUN_ID3",
"TEST_DAG_RUN_ID4",
"TEST_DAG_RUN_ID5",
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
({"page_offset": 5, "page_limit": 1}, ["TEST_DAG_RUN_ID6"]),
({"page_offset": 1, "page_limit": 1}, ["TEST_DAG_RUN_ID2"]),
(
{"page_offset": 2, "page_limit": 2},
["TEST_DAG_RUN_ID3", "TEST_DAG_RUN_ID4"],
),
]
)
def test_handle_limit_and_offset(self, payload, expected_dag_run_ids):
self._create_dag_runs(10)
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == 10
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_respect_page_size_limit(self):
self._create_dag_runs(200)
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json={}, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == 200
assert len(response.json["dag_runs"]) == 100 # default is 100
def _create_dag_runs(self, count):
dag_runs = [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time) + timedelta(minutes=i),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
for i in range(1, count + 1)
]
dag = DagModel(dag_id="TEST_DAG_ID")
with create_session() as session:
session.add_all(dag_runs)
session.add(dag)
class TestGetDagRunBatchDateFilters(TestDagRunEndpoint):
@parameterized.expand(
[
(
{"start_date_gte": "2020-06-18T18:00:00+00:00"},
["TEST_START_EXEC_DAY_18", "TEST_START_EXEC_DAY_19"],
),
(
{"start_date_lte": "2020-06-11T18:00:00+00:00"},
["TEST_START_EXEC_DAY_10", "TEST_START_EXEC_DAY_11"],
),
(
{"start_date_lte": "2020-06-15T18:00:00+00:00", "start_date_gte": "2020-06-12T18:00:00Z"},
[
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
"TEST_START_EXEC_DAY_14",
"TEST_START_EXEC_DAY_15",
],
),
(
{"execution_date_lte": "2020-06-13T18:00:00+00:00"},
[
"TEST_START_EXEC_DAY_10",
"TEST_START_EXEC_DAY_11",
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
],
),
(
{"execution_date_gte": "2020-06-16T18:00:00+00:00"},
[
"TEST_START_EXEC_DAY_16",
"TEST_START_EXEC_DAY_17",
"TEST_START_EXEC_DAY_18",
"TEST_START_EXEC_DAY_19",
],
),
]
)
def test_date_filters_gte_and_lte(self, payload, expected_dag_run_ids):
self._create_dag_runs()
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def _create_dag_runs(self):
dates = [
'2020-06-10T18:00:00+00:00',
'2020-06-11T18:00:00+00:00',
'2020-06-12T18:00:00+00:00',
'2020-06-13T18:00:00+00:00',
'2020-06-14T18:00:00+00:00',
'2020-06-15T18:00:00Z',
'2020-06-16T18:00:00Z',
'2020-06-17T18:00:00Z',
'2020-06-18T18:00:00Z',
'2020-06-19T18:00:00Z',
]
dag = DagModel(dag_id="TEST_DAG_ID")
dag_runs = [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_START_EXEC_DAY_1" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(dates[i]),
start_date=timezone.parse(dates[i]),
external_trigger=True,
state='success',
)
for i in range(len(dates))
]
with create_session() as session:
session.add_all(dag_runs)
session.add(dag)
return dag_runs
@parameterized.expand(
[
({"execution_date_gte": '2020-11-09T16:25:56.939143'}, 'Naive datetime is disallowed'),
(
{"start_date_gte": "2020-06-18T16:25:56.939143"},
'Naive datetime is disallowed',
),
(
{"start_date_lte": "2020-06-18T18:00:00.564434"},
'Naive datetime is disallowed',
),
(
{"start_date_lte": "2020-06-15T18:00:00.653434", "start_date_gte": "2020-06-12T18:00.343534"},
'Naive datetime is disallowed',
),
(
{"execution_date_lte": "2020-06-13T18:00:00.353454"},
'Naive datetime is disallowed',
),
({"execution_date_gte": "2020-06-16T18:00:00.676443"}, 'Naive datetime is disallowed'),
]
)
def test_naive_date_filters_raises_400(self, payload, expected_response):
self._create_dag_runs()
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
self.assertEqual(response.json['detail'], expected_response)
@parameterized.expand(
[
(
{"end_date_gte": f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}"},
[],
),
(
{"end_date_lte": f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}"},
["TEST_DAG_RUN_ID_1"],
),
]
)
def test_end_date_gte_lte(self, payload, expected_dag_run_ids):
self._create_test_dag_run('success') # state==success, then end date is today
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"] if dag_run]
assert dag_run_ids == expected_dag_run_ids
class TestPostDagRun(TestDagRunEndpoint):
@parameterized.expand(
[
(
"All fields present",
{
"dag_run_id": "TEST_DAG_RUN",
"execution_date": "2020-06-11T18:00:00+00:00",
},
),
("dag_run_id missing", {"execution_date": "2020-06-11T18:00:00+00:00"}),
("dag_run_id and execution_date missing", {}),
]
)
@provide_session
def test_should_respond_200(self, name, request_json, session):
del name
dag_instance = DagModel(dag_id="TEST_DAG_ID")
session.add(dag_instance)
session.commit()
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns", json=request_json, environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
{
"conf": {},
"dag_id": "TEST_DAG_ID",
"dag_run_id": response.json["dag_run_id"],
"end_date": None,
"execution_date": response.json["execution_date"],
"external_trigger": True,
"start_date": response.json["start_date"],
"state": "running",
},
response.json,
)
@parameterized.expand(
[
({'execution_date': "2020-11-10T08:25:56.939143"}, 'Naive datetime is disallowed'),
({'execution_date': "2020-11-10T08:25:56P"}, "{'execution_date': ['Not a valid datetime.']}"),
]
)
@provide_session
def test_should_response_400_for_naive_datetime_and_bad_datetime(self, data, expected, session):
dag_instance = DagModel(dag_id="TEST_DAG_ID")
session.add(dag_instance)
session.commit()
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns", json=data, environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json['detail'], expected)
def test_response_404(self):
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns",
json={"dag_run_id": "TEST_DAG_RUN", "execution_date": self.default_time},
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
{
"detail": "DAG with dag_id: 'TEST_DAG_ID' not found",
"status": 404,
"title": "DAG not found",
"type": EXCEPTIONS_LINK_MAP[404],
},
response.json,
)
@parameterized.expand(
[
(
"start_date in request json",
"api/v1/dags/TEST_DAG_ID/dagRuns",
{
"start_date": "2020-06-11T18:00:00+00:00",
"execution_date": "2020-06-12T18:00:00+00:00",
},
{
"detail": "Property is read-only - 'start_date'",
"status": 400,
"title": "Bad Request",
"type": EXCEPTIONS_LINK_MAP[400],
},
),
(
"state in request json",
"api/v1/dags/TEST_DAG_ID/dagRuns",
{"state": "failed", "execution_date": "2020-06-12T18:00:00+00:00"},
{
"detail": "Property is read-only - 'state'",
"status": 400,
"title": "Bad Request",
"type": EXCEPTIONS_LINK_MAP[400],
},
),
]
)
@provide_session
def test_response_400(self, name, url, request_json, expected_response, session):
del name
dag_instance = DagModel(dag_id="TEST_DAG_ID")
session.add(dag_instance)
session.commit()
response = self.client.post(url, json=request_json, environ_overrides={'REMOTE_USER': "test"})
self.assertEqual(response.status_code, 400, response.data)
self.assertEqual(expected_response, response.json)
def test_response_409(self):
self._create_test_dag_run()
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns",
json={
"dag_run_id": "TEST_DAG_RUN_ID_1",
"execution_date": self.default_time,
},
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 409, response.data)
self.assertEqual(
response.json,
{
"detail": "DAGRun with DAG ID: 'TEST_DAG_ID' and "
"DAGRun ID: 'TEST_DAG_RUN_ID_1' already exists",
"status": 409,
"title": "Conflict",
"type": EXCEPTIONS_LINK_MAP[409],
},
)
def test_should_raises_401_unauthenticated(self):
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns",
json={
"dag_run_id": "TEST_DAG_RUN_ID_1",
"execution_date": self.default_time,
},
)
assert_401(response)
| 38.258236
| 110
| 0.552624
|
794b802b4facf7d82d7ab87fc50ec8c3d69ae220
| 31,456
|
py
|
Python
|
src/ea/sutadapters/CLI/client.py
|
ExtensiveAutomation/extensiveautomation-plugin-cli
|
dbe25549166e5ec7732887a0341143bac567f6c1
|
[
"MIT"
] | null | null | null |
src/ea/sutadapters/CLI/client.py
|
ExtensiveAutomation/extensiveautomation-plugin-cli
|
dbe25549166e5ec7732887a0341143bac567f6c1
|
[
"MIT"
] | null | null | null |
src/ea/sutadapters/CLI/client.py
|
ExtensiveAutomation/extensiveautomation-plugin-cli
|
dbe25549166e5ec7732887a0341143bac567f6c1
|
[
"MIT"
] | null | null | null |
##!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2010-2021 Denis MACHARD
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------
import sys
import threading
import select
import socket
import io
EXT_SSH_LIB_INSTALLED=True
try:
import paramiko
except ImportError:
EXT_SSH_LIB_INSTALLED=False
from ea.testexecutorlib import TestValidatorsLib as TestValidatorsLib
from ea.testexecutorlib import TestTemplatesLib as TestTemplatesLib
from ea.testexecutorlib import TestOperatorsLib as TestOperatorsLib
from ea.testexecutorlib import TestAdapterLib as TestAdapterLib
from ea.sutadapters.CLI import templates
# unicode = str with python3
if sys.version_info > (3,):
unicode = str
IPv4=4
__NAME__="""SSHv2"""
AGENT_INITIALIZED = "AGENT_INITIALIZED"
AGENT_TYPE_EXPECTED='ssh'
class TransportSsh(object):
def __init__(self):
"""
"""
self.authenticated = False
def is_authenticated(self):
"""
"""
self.authenticated = True
return self.authenticated
def close(self):
"""
"""
pass
class Client(TestAdapterLib.Adapter):
def __init__ (self, parent, destIp, destPort=22,
bindIp = '0.0.0.0', bindPort=0, destHost='',
login='admin', password='admin',
privateKey=None, privateKeyPath=None, verbose=True,
socketTimeout=10.0, socketFamily=IPv4,
name=None, tcpKeepAlive=True, tcpKeepAliveInterval=30,
debug=False, logEventSent=True,
logEventReceived=True, parentName=None,
shared=False, sftpSupport=False,
terminalType='vt100',
terminalWidth=100, terminalHeight=200,
agent=None, agentSupport=False):
"""
This class enable to use SSH v2 as client only,
Authentication by login/password or by key are supported
lower network layer (IP, Ethernet) are not controlable.
@param parent: parent testcase
@type parent: testcase
@param name: adapter name used with from origin/to destination (default=None)
@type name: string/none
@param login: ssh login (default=admin)
@type login: string
@param privateKey: string private key to use to authenticate, push your public key on the remote server
@type privateKey: string/none
@param privateKeyPath: path to the private key to use to authenticate, push your public key on the remote server
@type privateKeyPath: string/none
@param password: ssh password (default=admin)
@type password: string
@param bindIp: bind on ip (source ip)
@type bindIp: string
@param bindPort: bind on port (source port)
@type bindPort: integer
@param destIp: destination ip
@type destIp: string
@param destPort: destination port
@type destPort: integer
@param destHost: destination host (automatic dns resolution)
@type destHost: string
@param socketFamily: SutAdapters.IP.IPv4 (default) | SutAdapters.IP.IPv6
@type socketFamily: intconstant
@param socketTimeout: timeout to connect in second (default=1s)
@type socketTimeout: float
@param tcpKeepAlive: turn on tcp keep-alive (defaut=False)
@type tcpKeepAlive: boolean
@param tcpKeepAliveInterval: tcp keep-alive interval (default=30s)
@type tcpKeepAliveInterval: float
@param terminalType: terminal type to emulate (default=vt100)
@type terminalType: string
@param terminalWidth: terminal width in characters (default=300)
@type terminalWidth: integer
@param terminalHeight: terminal height in characters (default=300)
@type terminalHeight: integer
@param debug: True to activate debug mode (default=False)
@type debug: boolean
@param verbose: False to disable verbose mode (default=True)
@type verbose: boolean
@param shared: shared adapter (default=False)
@type shared: boolean
@param agent: agent to use, ssh type expected
@type agent: string/none
@param agentSupport: agent support (default=False)
@type agentSupport: boolean
"""
if not isinstance(bindPort, int):
raise TestAdapterLib.ValueException(TestAdapterLib.caller(), "bindPort argument is not a integer (%s)" % type(bindPort) )
if not isinstance(destPort, int):
raise TestAdapterLib.ValueException(TestAdapterLib.caller(), "destPort argument is not a integer (%s)" % type(destPort) )
if isinstance(agent, str):
agent = {"name": agent, "type": AGENT_TYPE_EXPECTED }
TestAdapterLib.Adapter.__init__(self, name = __NAME__, parent = parent,
realname=name, shared=shared, debug=debug,
showEvts=verbose, showSentEvts=verbose, showRecvEvts=verbose,
agentSupport=agentSupport, agent=agent,
caller=TestAdapterLib.caller(),
agentType=AGENT_TYPE_EXPECTED)
if parentName is not None:
TestAdapterLib.Adapter.setName(self, name="%s_%s" % (parentName,__NAME__) )
self.logEventSent = logEventSent
self.logEventReceived = logEventReceived
self.parent = parent
self.__mutex__ = threading.RLock()
if not EXT_SSH_LIB_INSTALLED:
raise Exception('External ssh lib not installed!')
# adding log
if not agentSupport: paramiko.util.log_to_file("%s/sshlog.internal" % self.getTestResultPath() )
self.socket = None
self.sshTranport = None
self.sshChannel = None
self.sourceIp = bindIp
self.sourcePort = bindPort
self.connected = False
self.channelOpened = False
# sftp support
self.sftpOpened = False
self.sftpSupport = sftpSupport
# ssh options
self.cfg = {}
# transport options
self.cfg['bind-ip'] = bindIp
self.cfg['bind-port'] = bindPort
self.cfg['dst-ip'] = destIp
self.cfg['dst-port'] = destPort
self.cfg['dst-host'] = destHost
# tcp options
self.cfg['tcp-keepalive'] = tcpKeepAlive
self.cfg['tcp-keepalive-interval'] = tcpKeepAliveInterval
# ssh
self.cfg['login'] = login
self.cfg['password'] = password
self.cfg['private-key'] = privateKey
self.cfg['private-key-path'] = privateKeyPath
# socket options
self.cfg['sock-timeout'] = socketTimeout
self.cfg['sock-family'] = int(socketFamily)
self.cfg['terminal-type'] = terminalType
self.cfg['terminal-width'] = terminalWidth
self.cfg['terminal-height'] = terminalHeight
self.cfg['agent-support'] = agentSupport
if agentSupport:
self.cfg['agent'] = agent
self.cfg['agent-name'] = agent['name']
self.cfg['agent-type'] = agent['type']
self.TIMER_ALIVE_AGT = TestAdapterLib.Timer(parent=self,
duration=20,
name="keepalive-agent",
callback=self.aliveAgent,
logEvent=False, enabled=True)
self.__checkConfig()
# initialize the agent with no data
if agentSupport:
self.prepareAgent(data={'shared': shared})
if self.agentIsReady(timeout=30) is None:
raise TestAdapterLib.ValueException(TestAdapterLib.caller(),
"agent response timeout" )
self.TIMER_ALIVE_AGT.start()
def __checkConfig(self):
"""
"""
self.debug("config: %s" % self.cfg)
if self.cfg['agent-support'] :
self.warning('using agent %s' % self.cfg['agent']['name'])
def __setSource(self):
"""
Set the source ip and port
"""
if self.socket is not None:
srcIp, srcPort = self.socket.getsockname() # ('127.0.0.1', 52318)
self.sourceIp = srcIp
self.sourcePort = srcPort
def encapsule(self, ip_event, ssh_event):
"""
encapsule template
"""
# prepare template
tpl = TestTemplatesLib.TemplateMessage()
if self.cfg['agent-support']:
layer_agent= TestTemplatesLib.TemplateLayer('AGENT')
layer_agent.addKey(name='name', data=self.cfg['agent']['name'] )
layer_agent.addKey(name='type', data=self.cfg['agent']['type'] )
tpl.addLayer(layer=layer_agent)
return tpl
def cleanSocket(self):
"""
Clean socket
"""
self.debug( 'clean the socket' )
self.unsetRunning()
try:
# clean the socket
if self.socket is not None:
self.socket.close()
self.connected = False
except Exception as e :
pass
def onReset(self):
"""
On reset
"""
if self.cfg['agent-support']:
self.resetAgent()
self.cleanSocket()
def channel(self):
"""
Return the channel
"""
return self.sshChannel
def receivedNotifyFromAgent(self, data):
"""
Function to reimplement
"""
self.debug( data )
if 'cmd' in data and data['cmd'] == AGENT_INITIALIZED:
tpl = TestTemplatesLib.TemplateMessage()
layer = TestTemplatesLib.TemplateLayer('AGENT')
layer.addKey("ready", True)
layer.addKey(name='name', data=self.cfg['agent']['name'] )
layer.addKey(name='type', data=self.cfg['agent']['type'] )
tpl.addLayer(layer= layer)
self.logRecvEvent( shortEvt = "Agent Is Ready" , tplEvt = tpl )
else:
if 'ssh-event' in data:
if data['ssh-event'] == 'initialized':
self.sourceIp = data['src-ip']
self.sourcePort = data['src-port']
elif data['ssh-event'] == 'connected':
self.connected = True
self.onConnection()
elif data['ssh-event'] == 'connection-failed':
self.onConnectionFailed(errno=data['err-no'], errstr=data['err-str'])
elif data['ssh-event'] == 'connection-timeout':
self.onConnectionTimeout(e=data['more'])
elif data['ssh-event'] == 'connection-refused':
self.onConnectionRefused()
elif data['ssh-event'] == 'disconnected-by-peer':
self.onDisconnectionByPeer(e=data['more'])
elif data['ssh-event'] == 'closed':
self.onDisconnection()
elif data['ssh-event'] == 'negotiation-ok':
self.sshTranport = TransportSsh()
self.onNegotiationOk()
elif data['ssh-event'] == 'negotiation-failed':
#self.debug(data['err'])
self.onNegotiationFailed(err=data['err'])
elif data['ssh-event'] == 'authentication-ok':
self.sshTranport.authenticated = True
self.onAuthenticationOk()
elif data['ssh-event'] == 'authentication-failed':
#self.debug(data['err'])
self.onAuthenticationFailed(err=data['err'])
elif data['ssh-event'] == 'channel-opened':
self.channelOpened = True
self.onChannelOpened()
else:
self.error("agent mode - ssh event unknown on notify: %s" % data['ssh-event'] )
def receivedErrorFromAgent(self, data):
"""
Function to reimplement
"""
if data['ssh-event'] == 'on-run':
self.error( "error: %s" % data['more'] )
elif data['ssh-event'] == 'socket-error':
self.onSocketError(e=data['more'])
elif data['ssh-event'] == 'connect-error':
self.error( "connect error: %s" % data['more'] )
self.disconnect()
elif data['ssh-event'] == 'send-data-error':
self.error( "error on send data: %s" % data['more'] )
else:
self.error("agent mode - ssh event unknown on error: %s" % data['ssh-event'] )
def receivedDataFromAgent(self, data):
"""
Function to reimplement
"""
if len(data) == 0:
self.onIncomingData(noMoreData=True)
else:
self.onIncomingData(data=data)
def sendNotifyToAgent(self, data):
"""
"""
self.parent.sendNotifyToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData=data)
def initAgent(self, data):
"""
Init agent
"""
self.parent.sendInitToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData=data)
def prepareAgent(self, data):
"""
prepare agent
"""
self.parent.sendReadyToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData=data)
def resetAgent(self):
"""
Reset agent
"""
self.parent.sendResetToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData='')
def aliveAgent(self):
"""
Keep alive agent
"""
self.parent.sendAliveToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData='')
self.TIMER_ALIVE_AGT.restart()
def agentIsReady(self, timeout=1.0):
"""
Waits to receive agent ready event until the end of the timeout
@param timeout: time max to wait to receive event in second (default=1s)
@type timeout: float
@return: an event matching with the template or None otherwise
@rtype: templatemessage
"""
tpl = TestTemplatesLib.TemplateMessage()
layer = TestTemplatesLib.TemplateLayer('AGENT')
layer.addKey("ready", True)
layer.addKey(name='name', data=self.cfg['agent']['name'] )
layer.addKey(name='type', data=self.cfg['agent']['type'] )
tpl.addLayer(layer= layer)
evt = self.received( expected = tpl, timeout = timeout )
return evt
def connect(self):
"""
Start the TCP connection
"""
if self.connected:
self.debug( 'already connected' )
return
# Optional: resolve hostname
if self.cfg['dst-host'] != '':
self.cfg['dst-ip'] = socket.gethostbyname( self.cfg['dst-host'] )
if not len(self.cfg['dst-ip']):
return
# Start the tcp connection
self.debug( 'connection started' )
if self.cfg['agent-support']:
remote_cfg = {
'cmd': 'connect',
'bind-ip': self.cfg['bind-ip'], 'bind-port': self.cfg['bind-port'],
'sock-timeout': self.cfg['sock-timeout'], 'tcp-keepalive': self.cfg['tcp-keepalive'],
'tcp-keepalive-interval': self.cfg['tcp-keepalive-interval'] ,
'sock-family': self.cfg['sock-family'],
'dst-ip': self.cfg['dst-ip'], 'dst-port':self.cfg['dst-port'],
'shared': self.isShared()
}
self.sendNotifyToAgent(data=remote_cfg)
else:
try:
# set the socket version
if self.cfg['sock-family'] == 4:
sockType = TestAdapterLib.INIT_STREAM_SOCKET
elif self.cfg['sock-family'] == 6:
sockType = TestAdapterLib.INIT6_STREAM_SOCKET
else:
raise Exception('socket family unknown: %s' % str(self.cfg['sock-family']) )
# Create the socket
self.socket = TestAdapterLib.getSocket(sockType=sockType)
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
if self.cfg['tcp-keepalive']:
# active tcp keep alive
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# seconds before sending keepalive probes
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, self.cfg['tcp-keepalive-interval'] )
# interval in seconds between keepalive probes
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, self.cfg['tcp-keepalive-interval'])
# failed keepalive probes before declaring the other end dead
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 5)
self.socket.settimeout( self.cfg['sock-timeout'] )
self.debug( 'bind socket on %s:%s' % (self.cfg['bind-ip'], self.cfg['bind-port']) )
self.socket.bind( (self.cfg['bind-ip'], self.cfg['bind-port']) )
# Connect the socket
self.socket.connect( (self.cfg['dst-ip'], self.cfg['dst-port']) )
# Connection successful
self.__setSource()
self.connected = True
self.onConnection()
# start thread
self.setRunning()
except socket.timeout as e:
self.onConnectionTimeout(e)
except socket.error as e:
(errno, errstr) = e
if errno == 111:
self.onConnectionRefused()
else:
self.onConnectionFailed(errno=errno, errstr=errstr)
except Exception as e:
self.error( "connect error: %s" % str(e) )
self.disconnectTcp()
def disconnect(self):
"""
Close the TCP connection
"""
self.__mutex__.acquire()
if self.connected:
self.debug( 'disconnection started' )
if self.cfg['agent-support']:
self.unsetRunning()
remote_cfg = {'cmd': 'disconnect'}
self.sendNotifyToAgent(data=remote_cfg)
self.debug( 'reset sent to agent' )
else:
self.cleanSocket()
self.onDisconnection()
self.__mutex__.release()
def onConnection(self):
"""
On connection
"""
self.debug( "connected" )
# start ssh negotation
self.negotiation()
def onDisconnection(self):
"""
On disconnection
"""
self.channelOpened = False
self.connected = False
self.debug( "disconnected" )
self.unsetRunning()
self.sshChannel = None
def onDisconnectionByPeer(self, e):
"""
On disconnection by peer
"""
self.debug("disconnected by the server: %s" % str(e) )
self.cleanSocket()
def onConnectionRefused(self):
"""
On connection refused
"""
self.__setSource()
self.debug( "connection refused" )
self.cleanSocket()
def onConnectionTimeout(self, e):
"""
On connection timeout
"""
self.__setSource()
self.debug( "connection timeout: %s" % str(e) )
self.cleanSocket()
def onConnectionFailed(self, errno, errstr):
"""
On connection failed
"""
self.debug( "connection failed" )
self.cleanSocket()
def onSocketError(self, e):
"""
On socket error
"""
self.error( "socket error: %s" % str(e) )
self.cleanSocket()
def notifyAgent(self, cfg):
"""
"""
self.sendNotifyToAgent(data=cfg)
def negotiation(self):
"""
Start ssh negotiation
"""
if not self.connected:
self.debug( 'tcp not connected' )
return
if self.cfg['agent-support']:
remote_cfg = { 'cmd': 'negotiation'}
self.sendNotifyToAgent(data=remote_cfg)
else:
t = threading.Thread(target=self.__negotiation)
t.start()
def __negotiation(self):
"""
Sub function to start the negotiation
"""
self.sshTranport = paramiko.Transport(self.socket)
try:
self.sshTranport.start_client()
except Exception as e:
#self.error( e )
self.onNegotiationFailed(err="%s" % e)
# nego ok
else:
self.onNegotiationOk()
def onNegotiationOk(self):
"""
On negotiation ok
"""
# auth with password
self.authentication()
def onNegotiationFailed(self, err=""):
"""
On negotiation failed
"""
# close transport
if self.sshTranport is not None:
self.sshTranport.close()
self.sshTranport = None
self.connected = False
self.handleConnectionFailed(err=err)
def authentication(self):
"""
authentication ssh with login and password
"""
if self.sshTranport is None:
self.debug( 'negotiation todo before' )
return
if self.cfg['agent-support']:
remote_cfg = { 'cmd': 'authentication', 'login': self.cfg['login'], 'password': self.cfg['password'] }
if self.cfg['private-key'] is not None:
remote_cfg['private-key'] = self.cfg['private-key']
else:
remote_cfg['private-key'] = ''
self.sendNotifyToAgent(data=remote_cfg)
else:
try:
if self.cfg['private-key'] is not None or self.cfg['private-key-path'] is not None :
key = self.sshTranport.get_remote_server_key()
if self.cfg['private-key-path'] is not None:
f = open(self.cfg['private-key-path'], 'r')
self.cfg['private-key'] = f.read()
f.close()
# read first line of the private key to detect the type
key_head=self.cfg['private-key'].splitlines()[0]
if 'DSA' in key_head:
keytype=paramiko.DSSKey
elif 'RSA' in key_head:
keytype=paramiko.RSAKey
else:
raise Exception("Invalid key type: %s" % key_head)
# construct the key
keyfile = io.StringIO( unicode(self.cfg['private-key']) )
pkey=keytype.from_private_key(keyfile)
# try to make the authen
self.sshTranport.auth_publickey(self.cfg['login'], pkey)
else:
self.sshTranport.auth_password(self.cfg['login'], self.cfg['password'])
except Exception as e:
#self.debug( e )
self.onAuthenticationFailed(err="%s" % e )
# authen ok
else:
self.onAuthenticationOk()
def onAuthenticationOk(self):
"""
On authentication ok
"""
# open session
self.openSession()
def onAuthenticationFailed(self, err=""):
"""
On authentication failed
"""
# close transport
if self.sshTranport is not None:
self.sshTranport.close()
self.sshTranport = None
self.connected = False
self.handleConnectionFailed(err=err)
def handleConnectionFailed(self, err):
"""
"""
pass
def openSession(self):
"""
Open a ssh session
"""
if self.sshTranport is None:
return
if not self.sshTranport.is_authenticated():
self.debug( 'not authenticated' )
return
if self.cfg['agent-support']:
remote_cfg = { 'cmd': 'open-session', 'sftp-support': self.sftpSupport, 'terminal-type': self.cfg['terminal-type'],
'terminal-width': self.cfg['terminal-width'] , 'terminal-height': self.cfg['terminal-height']}
self.sendNotifyToAgent(data=remote_cfg)
else:
try:
self.sshChannel = self.sshTranport.open_session()
self.sshChannel.get_pty(term=self.cfg['terminal-type'],
width=self.cfg['terminal-width'] , height =self.cfg['terminal-height'] )
self.sshChannel.invoke_shell()
self.sshChannel.settimeout(0.0)
except Exception as e:
self.onChannelOpeningFailed(err="%s" % e)
else:
self.onChannelOpened()
def onChannelOpened(self):
"""
On channel opened
"""
self.channelOpened = True
# begin to run
self.setRunning()
def onChannelOpeningFailed(self, err=""):
"""
On channel opening failed
"""
# close transport
if self.sshTranport is not None:
self.sshTranport.close()
self.sshTranport = None
self.sshChannel = None
def onRun(self):
"""
"""
try:
if self.connected:
if self.channelOpened:
if self.cfg['agent-support']:
pass
else:
r, w, e = select.select([self.sshChannel], [], [self.sshChannel])
if self.sshChannel in r:
data = self.sshChannel.recv(2048)
# no data
if len(data) == 0:
self.onIncomingData(noMoreData=True)
raise EOFError("nothing to read: disconnecting")
# incoming data
self.onIncomingData(data=data)
except EOFError as e:
self.onDisconnectionByPeer(e)
except socket.error as e:
self.onSocketError(e)
except Exception as e:
self.error( "on run %s" % str(e) )
def onIncomingData(self, data=None, noMoreData=False):
"""
On incoming data
"""
try:
if noMoreData:
self.handleNoMoreData()
else:
# handle data
self.handleIncomingData( data )
except Exception as e:
self.error( "on incoming ssh data: %s" % e )
def handleIncomingData(self, data):
"""
Function to overwrite
Called on incoming data
@param data: tcp data received
@type data: string
@param lower: template tcp data received
@type lower: templatemessage
"""
pass
def handleNoMoreData(self):
"""
Function to reimplement
@param lower:
@type lower:
"""
pass
def getExpectedTemplate(self, event, versionIp=None, sourceIp=None, destinationIp=None, sourcePort=None, destinationPort=None):
"""
Return an expected template with ip and tcp layers
"""
# prepare template
tpl = TestTemplatesLib.TemplateMessage()
if self.cfg['agent-support']:
layer_agent= TestTemplatesLib.TemplateLayer('AGENT')
layer_agent.addKey(name='name', data=self.cfg['agent']['name'] )
layer_agent.addKey(name='type', data=self.cfg['agent']['type'] )
tpl.addLayer(layer=layer_agent)
return tpl
def sendData(self, tpl=None, dataRaw=None):
"""
Send ssh data
@param tpl: ssh template data (default=None)
@type tpl: templatelayer/none
@param dataRaw: ssh data (default=None)
@type dataRaw: string/none
@return: an event matching with the template or None otherwise
@rtype: templatemessage
"""
if self.sshTranport is None:
return
if not self.sshTranport.is_authenticated():
self.debug( 'not authenticated' )
return
if not self.connected:
self.debug( "not connected" )
return
self.debug( dataRaw )
if dataRaw is None:
return
if self.cfg['agent-support']:
remote_cfg = { 'cmd': 'send-data', 'data': dataRaw }
self.sendNotifyToAgent(data=remote_cfg)
else:
try:
self.sshChannel.send(dataRaw)
except Exception as e:
self.error("unable to send data through ssh: %s" % str(e) )
# return tpl_final
| 35.664399
| 156
| 0.531568
|
794b807dd77edf2ce3eacd126714947f70d70d62
| 185
|
py
|
Python
|
Types/Enums/Stereo_SfM_Type.py
|
SBCV/PythonUtility
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
[
"MIT"
] | 2
|
2019-02-20T14:56:13.000Z
|
2020-05-19T12:31:53.000Z
|
Types/Enums/Stereo_SfM_Type.py
|
SBCV/PythonUtility
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
[
"MIT"
] | null | null | null |
Types/Enums/Stereo_SfM_Type.py
|
SBCV/PythonUtility
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
[
"MIT"
] | 1
|
2021-01-07T08:32:07.000Z
|
2021-01-07T08:32:07.000Z
|
class StereoSfMType(object):
# Options to demonstrate usefulness of semantic and statistical outlier filtering
initial_stereo_sfm = 'INITIAL'
refined_stereo_sfm = 'REFINED'
| 37
| 85
| 0.778378
|
794b81fb5c370d080b31bc2fe188ed1bb120368c
| 3,735
|
py
|
Python
|
python/efcamdat/preprocess_efcamdat.py
|
skinnersBoxy/input-filter
|
6528b6dc094c59ac6d28a24016d0c42de495d313
|
[
"MIT"
] | null | null | null |
python/efcamdat/preprocess_efcamdat.py
|
skinnersBoxy/input-filter
|
6528b6dc094c59ac6d28a24016d0c42de495d313
|
[
"MIT"
] | null | null | null |
python/efcamdat/preprocess_efcamdat.py
|
skinnersBoxy/input-filter
|
6528b6dc094c59ac6d28a24016d0c42de495d313
|
[
"MIT"
] | null | null | null |
""" Processes English L2 parse tree data into (determiner, noun category, count) triplets. """
import argparse
import datetime
import gc
import xml.etree.ElementTree as ElementTree
import pandas as pd
def get_flags():
""" Parses the flags. """
parser = argparse.ArgumentParser(
description=
'Processes parse tree data into (determiner, noun category, count) triplets.'
)
parser.add_argument(
'--ncfile',
default='./data/CELEX/nouns.txt',
help='File containing noun countability info.')
parser.add_argument(
'--efcamdatfile',
default='./data/EFCAMDAT2_parse.xml',
help='Location of EFCAMDAT2 xml file.')
parser.add_argument(
'-o',
'--output',
default='./generated_data/det_cat_counts_efcamdat.csv',
help='Path to output file.')
return parser.parse_args()
def clean(string):
""" Cleans the string by making the case uniform and removing spaces. """
if string:
return string.strip().lower()
return ''
def extract_triplets(noun_classes, context):
""" Extracts triplets from a noun class dataframe and a xml tree parse context. """
nouns = set(noun_classes.noun)
l2_noun_det_pairs = dict()
n_sentence = 0
for event, elem in context:
# <sentence> is the start of annotated text
if event == 'end' and elem.tag == 'sentence':
n_sentence += 1
# Each of the children is a token with tagged attributes
for token in elem:
# If we are currently looking at a determiner
if token.attrib['pos'] in ['DT', 'CD', 'WDT']:
head = elem[int(token.attrib['head']) - 1]
# If the head element is actually a noun and we have record of it
if clean(head.text) in nouns:
if head.attrib['pos'] in ['NN',
'NNP']: # Singular or Mass
if noun_classes[noun_classes.noun == clean(
head.text)].iloc[0].countable:
pair = (clean(token.text), 'SINGULAR_OR_MASS')
else:
pair = (clean(token.text), 'MASS')
elif head.attrib['pos'] in ['NNS', 'NNPS']: # Plural
pair = (clean(token.text), 'PLURAL')
l2_noun_det_pairs[pair] = l2_noun_det_pairs.get(
pair, 0) + 1
if n_sentence % 100000 == 0:
print(
f"[{datetime.datetime.now()}] Sentence: {n_sentence}, pairs: {len(l2_noun_det_pairs.keys())}"
)
if elem.tag != 'token':
elem.clear()
return l2_noun_det_pairs
def main():
""" Reads the data files and extracts the (determiner, noun class, count) triplets. """
args = get_flags()
noun_classes_per_sense = pd.read_csv(
args.ncfile, sep='\\', names=['noun', 'countable', 'uncountable'])
noun_classes_per_sense['noun'] = [
clean(noun) for noun in noun_classes_per_sense.noun
]
noun_classes = noun_classes_per_sense.groupby(
'noun',
as_index=False).aggregate(lambda group: any([x == 'Y' for x in group]))
# get an iterable
context = ElementTree.iterparse(args.efcamdatfile)
l2_noun_det_pairs = extract_triplets(noun_classes, context)
out_file = open(args.output, 'w')
for (key, count) in l2_noun_det_pairs.items():
det, noun = key
out_file.write(det + "," + noun + "," + str(count) + "\n")
out_file.close()
if __name__ == "__main__":
main()
| 35.913462
| 113
| 0.562517
|
794b82a3daaf8291fd8fcaeaa50debbda617761f
| 7,976
|
py
|
Python
|
distutilazy/clean.py
|
farzadghanei/distutilazy
|
c3c7d062f7cb79abb7677cac57dd752127ff78e7
|
[
"MIT"
] | null | null | null |
distutilazy/clean.py
|
farzadghanei/distutilazy
|
c3c7d062f7cb79abb7677cac57dd752127ff78e7
|
[
"MIT"
] | 2
|
2016-06-16T14:12:48.000Z
|
2018-07-22T12:44:21.000Z
|
distutilazy/clean.py
|
farzadghanei/distutilazy
|
c3c7d062f7cb79abb7677cac57dd752127ff78e7
|
[
"MIT"
] | null | null | null |
"""
distutilazy.clean
-----------------
command classes to help clean temporary files
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
from __future__ import absolute_import
import os
from shutil import rmtree
from distutils import log
from distutils.core import Command
from distutils.command import clean
from . import util
__version__ = "0.4.0"
class BaseFileSystemCleanerCommand(Command):
@staticmethod
def default_extensions(cls):
return []
@staticmethod
def default_directories(cls):
return []
def initialize_options(self):
self.root = os.getcwd()
self.extensions = ','.join(self.default_extensions())
self.directories = ','.join(self.default_directories())
def finalize_options(self):
if not os.path.exists(self.root):
raise IOError("Failed to access root path '{}'".format(self.root))
self.extensions = [ext.strip() for ext in self.extensions.split(',')]
self.directories = [
dir_.strip() for dir_ in self.directories.split(',')]
def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files
def _find_directories(self):
directories = []
for dir_name in self.directories:
dirs = util.find_directories(self.root, dir_name)
log.debug("found {} directories in '{}'".format(
len(dirs), self.root))
directories.extend(dirs)
return directories
def _clean_file(self, filename):
"""Clean a file if exists and not in dry run"""
if not os.path.exists(filename):
return
self.announce("removing '{}'".format(filename))
if not self.dry_run:
os.remove(filename)
def _clean_directory(self, name):
"""Clean a directory if exists and not in dry run"""
if not os.path.exists(name):
return
self.announce(
"removing directory '{}' and all its contents".format(name)
)
if not self.dry_run:
rmtree(name, True)
class CleanPyc(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied python files"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return [".pyc", ".pyo", ".pyd"]
@staticmethod
def default_directories():
return ["__pycache__"]
def find_compiled_files(self):
"""Find compiled Python files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled python files in '{}'".format(
len(files), self.root
)
)
return files
def find_cache_directories(self):
directories = self._find_directories()
self.announce(
"found {} python cache directories in '{}'".format(
len(directories), self.root
)
)
return directories
def run(self):
directories = self.find_cache_directories()
if directories:
self.announce(
"cleaning python cache directories in '{}' ...".format(
self.root))
if not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self.find_compiled_files()
if files:
self.announce(
"cleaning compiled python files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanJythonClass(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied files created by Jython"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return ["$py.class"]
@staticmethod
def default_directories():
return []
def find_class_files(self):
"""Find compiled class files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled class files in '{}'".format(
len(files), self.root
)
)
return files
def run(self):
files = self.find_class_files()
if files:
self.announce(
"cleaning compiled class files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanAll(clean.clean, BaseFileSystemCleanerCommand):
description = "Clean root dir from temporary files (complied files, etc)"
user_options = [
("keep-build", None, "do not clean build directory"),
("keep-dist", None, "do not clean dist directory"),
("keep-egginfo", None, "do not clean egg info directory"),
("keep-extra", None, "do not clean extra files"),
]
boolean_options = ["keep-build", "keep-dist", "keep-egginfo", "keep-extra"]
@staticmethod
def default_extensions():
return CleanPyc.default_extensions() + \
CleanJythonClass.default_extensions()
@staticmethod
def default_directories():
return CleanPyc.default_directories() + \
CleanJythonClass.default_directories()
def initialize_options(self):
clean.clean.initialize_options(self)
BaseFileSystemCleanerCommand.initialize_options(self)
self.keep_build = None
self.keep_dist = None
self.keep_egginfo = None
self.keep_extra = None
def finalize_options(self):
clean.clean.finalize_options(self)
BaseFileSystemCleanerCommand.finalize_options(self)
self.all = True
def get_egginfo_dir(self):
return self.distribution.metadata.get_name() + ".egg-info"
def get_extra_paths(self):
"""Return list of extra files/directories to be removed"""
return []
def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name)
def clean_dist(self):
self._clean_directory(os.path.join(self.root, "dist"))
def clean_build(self):
self._clean_directory(os.path.join(self.root, "build"))
def clean_extra(self):
"""Clean extra files/directories specified by get_extra_paths()"""
extra_paths = self.get_extra_paths()
for path in extra_paths:
if not os.path.exists(path):
continue
if os.path.isdir(path):
self._clean_directory(path)
else:
self._clean_file(path)
def run(self):
clean.clean.run(self)
if not self.keep_build:
self.clean_build()
if not self.keep_egginfo:
self.clean_egginfo()
if not self.keep_dist:
self.clean_dist()
if not self.keep_extra:
self.clean_extra()
directories = self._find_directories()
if directories and not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self._find_files()
if files and not self.dry_run:
for filename in files:
self._clean_file(filename)
clean_pyc = CleanPyc
clean_all = CleanAll
| 30.442748
| 79
| 0.596163
|
794b8473783d9c923730503c75665a9f98b693cc
| 844
|
py
|
Python
|
true_crime/01-Web-Scraping-Python-and-BeautifulSoup/criminal_minds.py
|
jvmistica/hubpages
|
6486312b3b3df122dd67bc4c2f7582736f02342a
|
[
"MIT"
] | null | null | null |
true_crime/01-Web-Scraping-Python-and-BeautifulSoup/criminal_minds.py
|
jvmistica/hubpages
|
6486312b3b3df122dd67bc4c2f7582736f02342a
|
[
"MIT"
] | 1
|
2021-03-31T20:06:36.000Z
|
2021-03-31T20:06:36.000Z
|
true_crime/01-Web-Scraping-Python-and-BeautifulSoup/criminal_minds.py
|
jvmistica/hubpages
|
6486312b3b3df122dd67bc4c2f7582736f02342a
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
# Retrieve all stories
url = "https://criminalminds.fandom.com/wiki/Real_Criminals/Serial_Killers"
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
stories = soup.find_all("div", {"class": "lightbox-caption"})
for story in stories:
# Retrieve each story
url = "https://criminalminds.fandom.com" + story.find("a")["href"]
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
main_story = soup.find("div", {"id":"mw-content-text"})
quote = " ".join(main_story.find("table").text.split())
subject = story.find("a")["title"]
blocks = main_story.find_all("p")
full_story = ""
for block in blocks:
full_story = full_story + block.text + "\n"
print(quote + "\n" + subject + "\n\n" + full_story)
break
| 32.461538
| 75
| 0.664692
|
794b851df222f0777e624d5dc90cab6aaaaaa812
| 3,291
|
py
|
Python
|
tests/unit/utils/test_pkg.py
|
nadvornik/salt-1
|
6837044f5a207cf39f3064428b0ed276226a5e39
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
tests/unit/utils/test_pkg.py
|
nadvornik/salt-1
|
6837044f5a207cf39f3064428b0ed276226a5e39
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
tests/unit/utils/test_pkg.py
|
nadvornik/salt-1
|
6837044f5a207cf39f3064428b0ed276226a5e39
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
import salt.utils.pkg
from salt.utils.pkg import rpm
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, Mock, patch
from tests.support.unit import TestCase, skipIf
try:
import pytest
except ImportError:
pytest = None
@skipIf(pytest is None, "PyTest is missing")
class PkgRPMTestCase(TestCase):
"""
Test case for pkg.rpm utils
"""
@patch("salt.utils.path.which", MagicMock(return_value=True))
def test_get_osarch_by_rpm(self):
"""
Get os_arch if RPM package is installed.
:return:
"""
subprocess_mock = MagicMock()
subprocess_mock.Popen = MagicMock()
subprocess_mock.Popen().communicate = MagicMock(return_value=["Z80"])
with patch("salt.utils.pkg.rpm.subprocess", subprocess_mock):
assert rpm.get_osarch() == "Z80"
assert subprocess_mock.Popen.call_count == 2 # One within the mock
assert subprocess_mock.Popen.call_args[1]["close_fds"]
assert subprocess_mock.Popen.call_args[1]["shell"]
assert len(subprocess_mock.Popen.call_args_list) == 2
assert subprocess_mock.Popen.call_args[0][0] == 'rpm --eval "%{_host_cpu}"'
@patch("salt.utils.path.which", MagicMock(return_value=False))
@patch("salt.utils.pkg.rpm.subprocess", MagicMock(return_value=False))
@patch(
"salt.utils.pkg.rpm.platform.uname",
MagicMock(
return_value=(
"Sinclair BASIC",
"motophone",
"1982 Sinclair Research Ltd",
"1.0",
"ZX81",
"Z80",
)
),
)
def test_get_osarch_by_platform(self):
"""
Get os_arch if RPM package is not installed (inird image, for example).
:return:
"""
assert rpm.get_osarch() == "Z80"
@patch("salt.utils.path.which", MagicMock(return_value=False))
@patch("salt.utils.pkg.rpm.subprocess", MagicMock(return_value=False))
@patch(
"salt.utils.pkg.rpm.platform.uname",
MagicMock(
return_value=(
"Sinclair BASIC",
"motophone",
"1982 Sinclair Research Ltd",
"1.0",
"ZX81",
"",
)
),
)
def test_get_osarch_by_platform_no_cpu_arch(self):
"""
Get os_arch if RPM package is not installed (inird image, for example) but cpu arch cannot be determined.
:return:
"""
assert rpm.get_osarch() == "ZX81"
@patch("salt.utils.path.which", MagicMock(return_value=False))
@patch("salt.utils.pkg.rpm.subprocess", MagicMock(return_value=False))
@patch(
"salt.utils.pkg.rpm.platform.uname",
MagicMock(
return_value=(
"Sinclair BASIC",
"motophone",
"1982 Sinclair Research Ltd",
"1.0",
"",
"",
)
),
)
def test_get_osarch_by_platform_no_cpu_arch_no_machine(self):
"""
Get os_arch if RPM package is not installed (inird image, for example)
where both cpu arch and machine cannot be determined.
:return:
"""
assert rpm.get_osarch() == "unknown"
| 32.584158
| 113
| 0.576724
|
794b8579baba721a2b89b7b791575220e57b3f44
| 990
|
py
|
Python
|
CosmosTrigger/__init__.py
|
yshin1209/peridyme2
|
ada7fec3820044bbd2bd4ce20aeb08b9ca4b1928
|
[
"MIT"
] | null | null | null |
CosmosTrigger/__init__.py
|
yshin1209/peridyme2
|
ada7fec3820044bbd2bd4ce20aeb08b9ca4b1928
|
[
"MIT"
] | null | null | null |
CosmosTrigger/__init__.py
|
yshin1209/peridyme2
|
ada7fec3820044bbd2bd4ce20aeb08b9ca4b1928
|
[
"MIT"
] | 2
|
2021-08-05T14:25:29.000Z
|
2021-08-05T14:37:39.000Z
|
# Cosmos DB Trigger
# 2021 Yong-Jun Shin
# Ctrl-Shift-P --> Terminal: Create New Integrated Terminal
# gremlinpython==3.5.0 --> requirments.txt
# gremelinpython supports python 3.4 or higher
# pip install --target ".\.venv\Lib\site-packages" -r requirements.txt --upgrade
import logging
import datetime
import azure.functions as func
def main(documents: func.DocumentList, outputEvent: func.Out[func.EventGridOutputEvent]):
if documents:
logging.info('id: %s', documents[0]['id'])
logging.info('value: %s', documents[0]['value'][0]['_value'])
input_id = documents[0]['id']
value = documents[0]['value'][0]['_value']
outputEvent.set(
func.EventGridOutputEvent(
id="test-id",
data={'input_id': input_id, 'value': value},
subject="CosmosDB Trigger",
event_type="test-event-1",
event_time= datetime.datetime.utcnow(),
data_version="1.0"))
logging.info ("event sent")
| 31.935484
| 89
| 0.641414
|
794b87c4977e357f0da921102852614b755b3cff
| 4,229
|
py
|
Python
|
test_ga_scripts/compare_selections.py
|
jmehault/genetic_algorithm_feature_selection
|
43d250f35be69b32943963201165ea941fb173da
|
[
"MIT"
] | null | null | null |
test_ga_scripts/compare_selections.py
|
jmehault/genetic_algorithm_feature_selection
|
43d250f35be69b32943963201165ea941fb173da
|
[
"MIT"
] | null | null | null |
test_ga_scripts/compare_selections.py
|
jmehault/genetic_algorithm_feature_selection
|
43d250f35be69b32943963201165ea941fb173da
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import pandas as pd
import genetic_algorithm_feature_selection.variable_selection as vs
import genetic_algorithm_feature_selection.genetic_steps as gs
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.feature_selection import RFECV
from mlxtend.feature_selection import SequentialFeatureSelector
# import matplotlib.pyplot as plt
resDict = {}
listNCols = [100, 500, 1000, 2500]
listRatio = [0.1, 0.25, 0.5, 0.75]
nCols = listNCols[0]
ratio = listRatio[0]
nGoods = int(nCols * ratio)
nEch = 1000
data, target, coefs = make_regression(n_samples=nEch,
n_features=nCols,
n_informative=nGoods,
noise=1,
effective_rank=10,
coef=True,
random_state=243)
colnames = np.array([f'X{n}' for n in range(nCols)])
data = pd.DataFrame(data, columns=colnames)
target = pd.Series(target, name='target')
coefs = pd.Series(coefs, index=colnames).sort_values(ascending=False)
print(coefs.head(5))
# duplication de colonnes pour voir
# comment se passe la sélection avec algo genetique
data_dup = data.loc[:, coefs[coefs!=0].index] + 2*np.random.randn(nEch, nGoods)
data_dup.columns = data_dup.columns + "_dup"
data = pd.concat((data, data_dup), axis=1, sort=False)
nCols = data.shape[1]
colnames = np.array(data.columns)
# calcule performance théorique
model = LinearRegression()
population = pd.DataFrame(coefs != 0).T
res_eval = gs.evaluation_p(population, population.columns,
data, target, model, 'bic')
score = res_eval.score.values
print(f"meilleure performance possible : {score}")
# recherche meilleure combinaison de variables
model = LinearRegression()
print(f"Démarre sélection par génétique")
timeStart = time.time()
lst_param = {'lasso_init': False,
'taille_pop': 20,
'n_generations': 60,
'n_warming': 2, # 5
'pr_croisement': 0.8,
'pr_mutation_car': 0.1,
'n_cvfolds': 5,
'diversification': True,
'verbose': 1}
res_algo_genetique = vs.genetic_algorithm(data, target, model, **lst_param)
dureeGenetique = time.time() - timeStart
scoreGenetique = res_algo_genetique['best_score']
resDict[(nCols, nGoods, 'Gen')] = {'duree': dureeGenetique,
'score': scoreGenetique}
# sélection par RFE + validation croisée
print(f"Démarre sélection par RFE")
timeStart = time.time()
rfeSel = RFECV(estimator=model, min_features_to_select=1, cv=5, n_jobs=-1)
resultsRFE = rfeSel.fit(data, target)
dureeRFE = time.time() - timeStart
population = resultsRFE.get_support().reshape(1, nCols)
population = pd.DataFrame(population, columns=colnames)
scoreRFE = gs.evaluation_p(population, colnames, data, target, model, 'bic')['score'][0]
resDict[(nCols, nGoods, 'RFE')] = {'duree': dureeRFE,
'score': scoreRFE}
# sélection séquentielle
print(f"Démarre sélection par SFS")
timeStart = time.time()
seqSel = SequentialFeatureSelector(estimator=model, k_features="parsimonious",
floating=True, cv=5, n_jobs=-1)
resultsSFS = seqSel.fit(data, target)
dureeSFS = time.time()-timeStart
population = pd.Index(colnames).isin(resultsSFS.k_feature_names_).reshape(1, nCols)
population = pd.DataFrame(population, columns=colnames)
scoreSFS = gs.evaluation_p(population, colnames, data, target, model, 'bic')['score'][0]
resDict[(nCols, nGoods, 'SFS')] = {'duree': dureeSFS,
'score': scoreSFS}
# tracer temps d'execution, meilleure performance pour chaque méthode,
# nombre de variables conservées
# en fonction du nombre total de variables et du rapport nbVarVraies/nbVarTot
resDf = pd.DataFrame(resDict).T
resDf.index.names = ['nCols', 'nGood', 'selAlgo']
# graphiques:
## temps d'exécution pour (nCols,nGood)
## score pour (nCols,nGood)
## meilleur algo de sélection pour score pour (nCols,nGood)
## meilleur algo de sélection pour temps d'exécution pour (nCols,nGood)
| 37.758929
| 88
| 0.671317
|
794b884d5147320b0b4d590b311ec8e03395e4af
| 7,756
|
py
|
Python
|
kaizen_mapping/utils/gis.py
|
fuzailpalnak/kaizen
|
432fbb780cd3725ecab51ee3daf74b1373a13c0c
|
[
"MIT"
] | 12
|
2020-10-11T09:59:05.000Z
|
2022-01-16T18:34:55.000Z
|
kaizen_mapping/utils/gis.py
|
safarzadeh-reza/kaizen
|
432fbb780cd3725ecab51ee3daf74b1373a13c0c
|
[
"MIT"
] | null | null | null |
kaizen_mapping/utils/gis.py
|
safarzadeh-reza/kaizen
|
432fbb780cd3725ecab51ee3daf74b1373a13c0c
|
[
"MIT"
] | 2
|
2021-04-15T08:26:07.000Z
|
2022-01-16T20:59:43.000Z
|
from typing import Union, Tuple, List
import numpy as np
import geopandas
import visvalingamwyatt as vw
from gdal import ogr, osr
from affine import Affine
from geopandas import GeoDataFrame
from pandas import Series
from rasterio.transform import rowcol, xy
from shapely.geometry import mapping, box, Point, Polygon, LineString, MultiLineString
from shapely.ops import polygonize, unary_union, linemerge
def decompose_data_frame_row(row: Series):
if "geometry" not in list(row.keys()):
raise KeyError("Missing Keys, Must have keys ['geometry']")
feature_geometry = mapping(row["geometry"])
feature_property = dict()
for geometry_property in list(row.keys()):
if geometry_property not in ["geometry"]:
feature_property[geometry_property] = row[geometry_property]
return feature_geometry, feature_property
def geom_check(data_frame: GeoDataFrame, geom_type: str) -> bool:
assert geom_type in [
"LineString",
"Point",
"MultiLineString",
"MultiPolygon",
"Polygon",
], (
"Expected geomtype to be in ['LineString', 'Point', 'MultiLineString', 'MultiPolygon', 'Polygon'] to check"
"got %s",
(geom_type,),
)
return all(data_frame.geom_type.array == geom_type)
def pixel_position(x: float, y: float, transform: Affine) -> list:
"""
CONVERT SPATIAL COORDINATES TO PIXEL X AND Y
:param transform:
:param x:
:param y:
:return:
"""
return rowcol(transform, x, y)
def spatial_position(x: int, y: int, transform: Affine) -> list:
"""
CONVERT PIXEL COORDINATE TO SPATIAL COORDINATES
:param transform:
:param x:
:param y:
:return:
"""
return xy(transform, x, y)
def generate_affine(minx: float, maxy: float, resolution: float) -> Affine:
"""
Generate affine transform over the spatial coordinates
:param minx: min x of the extent
:param maxy: max y of the extent
:param resolution: what resolution to use to convert the spatial coordinates in pixel
:return:
"""
return Affine.translation(minx, maxy) * Affine.scale(resolution, -resolution)
def total_bounds(data_frame: GeoDataFrame):
return data_frame.total_bounds
def get_maximum_bound(data_frame_1: GeoDataFrame, data_frame_2: GeoDataFrame):
return (
data_frame_1.total_bounds
if box(*data_frame_1.total_bounds).area > box(*data_frame_2.total_bounds).area
else data_frame_2.total_bounds
)
def compute_diagonal_distance_of_extent(data_frame: GeoDataFrame) -> float:
min_x, min_y, max_x, max_y = total_bounds(data_frame)
return Point((min_x, min_y)).distance(Point((max_x, max_y)))
def my_crs(crs: str):
return crs in ["epsg:26910", "epsg:32649"]
def supported_crs(data_frame: GeoDataFrame):
return data_frame.crs in ["epsg:26910", "epsg:32649"]
def read_data_frame(path: str):
return geopandas.read_file(path)
def crs_conversion(
crs_from: str, crs_to: str, coordinate: tuple
) -> Tuple[float, float]:
# https://gis.stackexchange.com/questions/78838/converting-projected-coordinates-to-lat-lon-using-python
assert len(coordinate) == 2, (
"Expected 'point' in format '(X, Y)'" "got %s",
(coordinate,),
)
crs_from = int(crs_from.split(":")[-1])
crs_to = int(crs_to.split(":")[-1])
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(coordinate[0], coordinate[1])
in_spatial_ref = osr.SpatialReference()
in_spatial_ref.ImportFromEPSG(crs_from)
out_spatial_ref = osr.SpatialReference()
out_spatial_ref.ImportFromEPSG(crs_to)
coordinate_transform = osr.CoordinateTransformation(in_spatial_ref, out_spatial_ref)
point.Transform(coordinate_transform)
return point.GetX(), point.GetY()
def bounding_box_crs_conversion(
bounds: Union[np.ndarray, list, tuple], crs_to: str, crs_from="epsg:4326"
) -> list:
assert len(bounds) == 1, ("Expected a single bounding box" "got %s", (len(bounds)))
assert my_crs(crs_to), (
"CRS Provided not in supported list" "Expected %s got %s",
(
["epsg:26910", "epsg:32649"],
crs_to,
),
)
converted_boundary = list()
for point in bounds[0]:
converted_boundary.append(
crs_conversion(crs_from, crs_to, (point[0], point[1]))
)
return converted_boundary
def convert_and_get_extent(
bounds: Union[np.ndarray, list, tuple], crs_to: str, crs_from="epsg:4326"
) -> tuple:
assert len(bounds) == 1, ("Expected a single bounding box" "got %s", (len(bounds)))
assert my_crs(crs_to), (
"CRS Provided not in supported list" "Expected %s got %s",
(
["epsg:26910", "epsg:32649"],
crs_to,
),
)
return Polygon(bounding_box_crs_conversion(bounds, crs_to, crs_from)).bounds
def line_simplify(coordinates: list, area_threshold_im_meters: float):
# https://github.com/Permafacture/Py-Visvalingam-Whyatt/blob/master/polysimplify.py
# https://pypi.org/project/visvalingamwyatt/
# https://hull-repository.worktribe.com/preview/376364/000870493786962263.pdf
return vw.simplify(coordinates, threshold=area_threshold_im_meters)
def line_referencing(
line: Union[LineString, MultiLineString], point: Point
) -> Tuple[Union[int, float], Point]:
# https://stackoverflow.com/questions/24415806/coordinates-of-the-closest-points-of-two-geometries-in-shapely
assert type(line) in [LineString, MultiLineString], (
"Expected type of 'line' to be in ['LineString', 'MultiLineString']" "got %s",
(type(line),),
)
assert type(point) == Point, (
"Expected type of 'point' to be 'Point'" "got %s",
(type(point),),
)
fraction = line.project(point, normalized=True)
project_point = line.interpolate(fraction, normalized=True)
return fraction, project_point
def line_referencing_series_of_coordinates(
line: Union[LineString, MultiLineString], points: List[tuple]
) -> List[Point]:
assert type(line) in [LineString, MultiLineString], (
"Expected type of 'line' to be in ['LineString', 'MultiLineString']" "got %s",
(type(line),),
)
assert all(
type(point) is tuple for point in points
), "Expected type of 'point' to be 'tuple'"
referenced = list()
for point in points:
fraction, projected_point = line_referencing(line, Point(point))
referenced.append(projected_point)
return referenced
def line_referencing_series_of_point_object(
line: Union[LineString, MultiLineString], points: List[Point]
) -> List[Point]:
assert type(line) in [LineString, MultiLineString], (
"Expected type of 'line' to be in ['LineString', 'MultiLineString']" "got %s",
(type(line),),
)
assert all(
type(point) is Point for point in points
), "Expected type of 'point' to be 'Point'"
referenced = list()
for point in points:
fraction, projected_point = line_referencing(line, point)
referenced.append(projected_point)
return referenced
def split_polygon_with_line_string(line: LineString, polygon: Polygon):
assert type(line) == LineString, (
"Expected 'line' to be of type 'LineString'" "got %s",
(type(line),),
)
assert type(polygon) == Polygon, (
"Expected 'polygon' to be of type 'Polygon'" "got %s",
(type(polygon),),
)
return list(polygonize(unary_union(linemerge([polygon.boundary, line]))))
def split_poly_coordinates_with_line_coordinates(
line: List[Union[Tuple, List]], polygon: [List[Union[Tuple, List]]]
):
return split_polygon_with_line_string(LineString(line), Polygon(polygon))
| 31.024
| 115
| 0.67638
|
794b895aa064898c35b6c990341ff16b3478a8ba
| 8,693
|
py
|
Python
|
front-end/testsuite-python-lib/Python-3.1/Lib/ctypes/util.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
Lib/ctypes/util.py
|
orestis/python
|
870a82aac7788ffa105e2a3e4480b3715c93bff6
|
[
"PSF-2.0"
] | null | null | null |
Lib/ctypes/util.py
|
orestis/python
|
870a82aac7788ffa105e2a3e4480b3715c93bff6
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
import sys, os
# find_library(name) returns the pathname of a library, or None.
if os.name == "nt":
def _get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
# This function was copied from Lib/distutils/msvccompiler.py
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def find_msvcrt():
"""Return the name of the VC runtime dll"""
version = _get_build_version()
if version is None:
# better be safe than sorry
return None
if version <= 6:
clibname = 'msvcrt'
else:
clibname = 'msvcr%d' % (version * 10)
# If python was built with in debug mode
import imp
if imp.get_suffixes()[0][0] == '_d.pyd':
clibname += 'd'
return clibname+'.dll'
def find_library(name):
if name in ('c', 'm'):
return find_msvcrt()
# See MSDN for the REAL search order.
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
return fname
return None
if os.name == "ce":
# search path according to MSDN:
# - absolute path specified by filename
# - The .exe launch directory
# - the Windows directory
# - ROM dll files (where are they?)
# - OEM specified search path: HKLM\Loader\SystemPath
def find_library(name):
return name
if os.name == "posix" and sys.platform == "darwin":
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except ValueError:
continue
return None
elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile, errno
def _findLib_gcc(name):
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
fdout, ccout = tempfile.mkstemp()
os.close(fdout)
cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \
'$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
try:
f = os.popen(cmd)
try:
trace = f.read()
finally:
rv = f.close()
finally:
try:
os.unlink(ccout)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if rv == 10:
raise OSError('gcc or cc command not found')
res = re.search(expr, trace)
if not res:
return None
return res.group(0)
if sys.platform == "sunos5":
# use /usr/ccs/bin/dump on solaris
def _get_soname(f):
if not f:
return None
cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f
f = os.popen(cmd)
try:
data = f.read()
finally:
f.close()
res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data)
if not res:
return None
return res.group(1)
else:
def _get_soname(f):
# assuming GNU binutils / ELF
if not f:
return None
cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \
"objdump -p -j .dynamic 2>/dev/null " + f
f = os.popen(cmd)
dump = f.read()
rv = f.close()
if rv == 10:
raise OSError('objdump command not found')
f = os.popen(cmd)
try:
data = f.read()
finally:
f.close()
res = re.search(r'\sSONAME\s+([^\s]+)', data)
if not res:
return None
return res.group(1)
if (sys.platform.startswith("freebsd")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [ sys.maxsize ]
def find_library(name):
ename = re.escape(name)
expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
f = os.popen('/sbin/ldconfig -r 2>/dev/null')
try:
data = f.read()
finally:
f.close()
res = re.findall(expr, data)
if not res:
return _get_soname(_findLib_gcc(name))
res.sort(key=_num_version)
return res[-1]
else:
def _findLib_ldconfig(name):
# XXX assuming GLIBC's ldconfig (with option -p)
expr = r'/[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
f = os.popen('/sbin/ldconfig -p 2>/dev/null')
try:
data = f.read()
finally:
f.close()
res = re.search(expr, data)
if not res:
# Hm, this works only for libs needed by the python executable.
cmd = 'ldd %s 2>/dev/null' % sys.executable
f = os.popen(cmd)
try:
data = f.read()
finally:
f.close()
res = re.search(expr, data)
if not res:
return None
return res.group(0)
def _findSoname_ldconfig(name):
import struct
if struct.calcsize('l') == 4:
machine = os.uname()[4] + '-32'
else:
machine = os.uname()[4] + '-64'
mach_map = {
'x86_64-64': 'libc6,x86-64',
'ppc64-64': 'libc6,64bit',
'sparc64-64': 'libc6,64bit',
's390x-64': 'libc6,64bit',
'ia64-64': 'libc6,IA-64',
}
abi_type = mach_map.get(machine, 'libc6')
# XXX assuming GLIBC's ldconfig (with option -p)
expr = r'(\S+)\s+\((%s(?:, OS ABI:[^\)]*)?)\)[^/]*(/[^\(\)\s]*lib%s\.[^\(\)\s]*)' \
% (abi_type, re.escape(name))
res = re.search(expr,
os.popen('/sbin/ldconfig -p 2>/dev/null').read())
if not res:
return None
return res.group(1)
def find_library(name):
return _findSoname_ldconfig(name) or _get_soname(_findLib_gcc(name))
################################################################
# test code
def test():
from ctypes import cdll
if os.name == "nt":
print(cdll.msvcrt)
print(cdll.load("msvcrt"))
print(find_library("msvcrt"))
if os.name == "posix":
# find and load_version
print(find_library("m"))
print(find_library("c"))
print(find_library("bz2"))
# getattr
## print cdll.m
## print cdll.bz2
# load
if sys.platform == "darwin":
print(cdll.LoadLibrary("libm.dylib"))
print(cdll.LoadLibrary("libcrypto.dylib"))
print(cdll.LoadLibrary("libSystem.dylib"))
print(cdll.LoadLibrary("System.framework/System"))
else:
print(cdll.LoadLibrary("libm.so"))
print(cdll.LoadLibrary("libcrypt.so"))
print(find_library("crypt"))
if __name__ == "__main__":
test()
| 32.92803
| 118
| 0.478891
|
794b89d15c84a1cda965110bd791ba8658a9d68c
| 8,945
|
py
|
Python
|
benchmarks/vision/cifar_10_stc.py
|
jzheng17/rf-dn-paper
|
e13274e4c6a7b6cfc7f8146d3d1eb3f1bfafefe4
|
[
"MIT"
] | 6
|
2021-09-01T07:25:00.000Z
|
2021-10-06T00:53:52.000Z
|
benchmarks/vision/cifar_10_stc.py
|
jzheng17/rf-dn-paper
|
e13274e4c6a7b6cfc7f8146d3d1eb3f1bfafefe4
|
[
"MIT"
] | 20
|
2021-11-02T13:27:31.000Z
|
2022-02-15T21:16:26.000Z
|
benchmarks/vision/cifar_10_stc.py
|
jzheng17/rf-dn-paper
|
e13274e4c6a7b6cfc7f8146d3d1eb3f1bfafefe4
|
[
"MIT"
] | 3
|
2021-09-05T22:07:44.000Z
|
2021-09-14T00:32:36.000Z
|
"""
Coauthors: Haoyin Xu
Yu-Chung Peng
"""
from toolbox import *
import argparse
import random
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
def run_cnn32():
cnn32_kappa = []
cnn32_ece = []
cnn32_train_time = []
cnn32_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32)
for i, samples in enumerate(samples_space):
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32 = SimpleCNN32Filter(len(classes))
time_limit = rf_times[i]
train_loader, test_loader = create_loaders_set(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_set(
cnn32,
train_loader,
test_loader,
time_limit=time_limit,
ratio=ratio,
)
cnn32_kappa.append(cohen_kappa)
cnn32_ece.append(ece)
cnn32_train_time.append(train_time)
cnn32_test_time.append(test_time)
print("cnn32 finished")
write_result(prefix + "cnn32_kappa" + suffix, cnn32_kappa)
write_result(prefix + "cnn32_ece" + suffix, cnn32_ece)
write_result(prefix + "cnn32_train_time" + suffix, cnn32_train_time)
write_result(prefix + "cnn32_test_time" + suffix, cnn32_test_time)
def run_cnn32_2l():
cnn32_2l_kappa = []
cnn32_2l_ece = []
cnn32_2l_train_time = []
cnn32_2l_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32_2l)
for i, samples in enumerate(samples_space):
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32_2l = SimpleCNN32Filter2Layers(len(classes))
time_limit = rf_times[i]
train_loader, test_loader = create_loaders_set(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_set(
cnn32_2l,
train_loader,
test_loader,
time_limit=time_limit,
ratio=ratio,
)
cnn32_2l_kappa.append(cohen_kappa)
cnn32_2l_ece.append(ece)
cnn32_2l_train_time.append(train_time)
cnn32_2l_test_time.append(test_time)
print("cnn32_2l finished")
write_result(prefix + "cnn32_2l_kappa" + suffix, cnn32_2l_kappa)
write_result(prefix + "cnn32_2l_ece" + suffix, cnn32_2l_ece)
write_result(prefix + "cnn32_2l_train_time" + suffix, cnn32_2l_train_time)
write_result(prefix + "cnn32_2l_test_time" + suffix, cnn32_2l_test_time)
def run_cnn32_5l():
cnn32_5l_kappa = []
cnn32_5l_ece = []
cnn32_5l_train_time = []
cnn32_5l_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32_5l)
for i, samples in enumerate(samples_space):
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32_5l = SimpleCNN32Filter5Layers(len(classes))
time_limit = rf_times[i]
train_loader, test_loader = create_loaders_set(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_set(
cnn32_5l,
train_loader,
test_loader,
time_limit=time_limit,
ratio=ratio,
)
cnn32_5l_kappa.append(cohen_kappa)
cnn32_5l_ece.append(ece)
cnn32_5l_train_time.append(train_time)
cnn32_5l_test_time.append(test_time)
print("cnn32_5l finished")
write_result(prefix + "cnn32_5l_kappa" + suffix, cnn32_5l_kappa)
write_result(prefix + "cnn32_5l_ece" + suffix, cnn32_5l_ece)
write_result(prefix + "cnn32_5l_train_time" + suffix, cnn32_5l_train_time)
write_result(prefix + "cnn32_5l_test_time" + suffix, cnn32_5l_test_time)
def run_resnet18():
resnet18_kappa = []
resnet18_ece = []
resnet18_train_time = []
resnet18_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (resnet18)
for i, samples in enumerate(samples_space):
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
res = models.resnet18(pretrained=True)
num_ftrs = res.fc.in_features
res.fc = nn.Linear(num_ftrs, len(classes))
time_limit = rf_times[i]
train_loader, test_loader = create_loaders_set(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_set(
res,
train_loader,
test_loader,
time_limit=time_limit,
ratio=ratio,
)
resnet18_kappa.append(cohen_kappa)
resnet18_ece.append(ece)
resnet18_train_time.append(train_time)
resnet18_test_time.append(test_time)
print("resnet18 finished")
write_result(prefix + "resnet18_kappa" + suffix, resnet18_kappa)
write_result(prefix + "resnet18_ece" + suffix, resnet18_ece)
write_result(prefix + "resnet18_train_time" + suffix, resnet18_train_time)
write_result(prefix + "resnet18_test_time" + suffix, resnet18_test_time)
if __name__ == "__main__":
torch.multiprocessing.freeze_support()
# Example usage: python cifar_10.py -m 3 -s l
parser = argparse.ArgumentParser()
parser.add_argument("-m", help="class number")
parser.add_argument("-s", help="computation speed")
args = parser.parse_args()
n_classes = int(args.m)
prefix = args.m + "_class/"
samples_space = np.geomspace(10, 10000, num=8, dtype=int)
nums = list(range(10))
random.shuffle(nums)
classes_space = list(combinations_45(nums, n_classes))
if args.s == "h":
# High speed RF
rf_times = produce_mean(load_result(prefix + "naive_rf_train_time.txt"))
suffix = "_st.txt"
ratio = 1.0
elif args.s == "l":
# Low speed RF
rf_times = produce_mean(load_result(prefix + "naive_rf_train_time_lc.txt"))
suffix = "_sc.txt"
ratio = 0.11 / 0.9
else:
raise Exception("Wrong configurations for time calibration.")
data_transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
run_cnn32()
run_cnn32_2l()
run_cnn32_5l()
data_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
run_resnet18()
| 34.141221
| 87
| 0.597317
|
794b8b603fb45226d50f1457096f2054126779d5
| 2,949
|
py
|
Python
|
utils/json2trn.py
|
szmmm/speechchain
|
909724c6f305588a52958f64f584ad21696b5173
|
[
"Apache-2.0"
] | 2
|
2019-10-29T21:41:50.000Z
|
2021-01-23T03:52:42.000Z
|
utils/json2trn.py
|
szmmm/speechchain
|
909724c6f305588a52958f64f584ad21696b5173
|
[
"Apache-2.0"
] | null | null | null |
utils/json2trn.py
|
szmmm/speechchain
|
909724c6f305588a52958f64f584ad21696b5173
|
[
"Apache-2.0"
] | 1
|
2021-12-04T01:30:25.000Z
|
2021-12-04T01:30:25.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# 2018 Xuankai Chang (Shanghai Jiao Tong University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import json
import logging
import sys
from espnet.utils.cli_utils import get_commandline_args
def get_parser():
parser = argparse.ArgumentParser(
description='convert a json to a transcription file with a token dictionary',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('json', type=str, help='json files')
parser.add_argument('dict', type=str, help='dict')
parser.add_argument('--num-spkrs', type=int, default=1, help='number of speakers')
parser.add_argument('--refs', type=str, nargs='+', help='ref for all speakers')
parser.add_argument('--hyps', type=str, nargs='+', help='hyp for all outputs')
return parser
def main(args):
args = get_parser().parse_args(args)
convert(args.json, args.dict, args.refs, args.hyps, args.num_spkrs)
def convert(jsonf, dic, refs, hyps, num_spkrs=1):
n_ref = len(refs)
n_hyp = len(hyps)
assert n_ref == n_hyp
assert n_ref == num_spkrs
# logging info
logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
logging.info("reading %s", jsonf)
with codecs.open(jsonf, 'r', encoding="utf-8") as f:
j = json.load(f)
logging.info("reading %s", dic)
with codecs.open(dic, 'r', encoding="utf-8") as f:
dictionary = f.readlines()
char_list = [entry.split(' ')[0] for entry in dictionary]
char_list.insert(0, '<blank>')
char_list.append('<eos>')
for ns in range(num_spkrs):
hyp_file = codecs.open(hyps[ns], 'w', encoding="utf-8")
ref_file = codecs.open(refs[ns], 'w', encoding="utf-8")
for x in j['utts']:
# hyps
if num_spkrs == 1:
seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['rec_tokenid'].split()]
else:
seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['rec_tokenid'].split()]
hyp_file.write(" ".join(seq).replace('<eos>', '')),
hyp_file.write(" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
# ref
if num_spkrs == 1:
seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['tokenid'].split()]
else:
seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['tokenid'].split()]
ref_file.write(" ".join(seq).replace('<eos>', '')),
ref_file.write(" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
hyp_file.close()
ref_file.close()
if __name__ == '__main__':
main(sys.argv[1:])
| 35.53012
| 103
| 0.597152
|
794b8d913445c77cf2a69da01c22e8e45e62c2fe
| 7,499
|
py
|
Python
|
slim/nets/dcgan.py
|
okojoalg/Creative-Adversarial-Networks
|
7f06f395b9f317f9235dc8c60c7b385cd6530471
|
[
"MIT"
] | 1
|
2021-05-04T16:56:32.000Z
|
2021-05-04T16:56:32.000Z
|
slim/nets/dcgan.py
|
okojoalg/Creative-Adversarial-Networks
|
7f06f395b9f317f9235dc8c60c7b385cd6530471
|
[
"MIT"
] | null | null | null |
slim/nets/dcgan.py
|
okojoalg/Creative-Adversarial-Networks
|
7f06f395b9f317f9235dc8c60c7b385cd6530471
|
[
"MIT"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DCGAN generator and discriminator from https://arxiv.org/abs/1511.06434."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import log
import tensorflow as tf
import tf_slim as slim
def _validate_image_inputs(inputs):
inputs.get_shape().assert_has_rank(4)
inputs.get_shape()[1:3].assert_is_fully_defined()
if inputs.get_shape()[1] != inputs.get_shape()[2]:
raise ValueError('Input tensor does not have equal width and height: ',
inputs.get_shape()[1:3])
width = inputs.get_shape().as_list()[1]
if log(width, 2) != int(log(width, 2)):
raise ValueError('Input tensor `width` is not a power of 2: ', width)
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def discriminator(inputs,
depth=64,
is_training=True,
reuse=None,
scope='Discriminator',
fused_batch_norm=False):
"""Discriminator network for DCGAN.
Construct discriminator network from inputs to the final endpoint.
Args:
inputs: A tensor of size [batch_size, height, width, channels]. Must be
floating point.
depth: Number of channels in first convolution layer.
is_training: Whether the network is for training or not.
reuse: Whether or not the network variables should be reused. `scope`
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: The pre-softmax activations, a tensor of size [batch_size, 1]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If the input image shape is not 4-dimensional, if the spatial
dimensions aren't defined at graph construction time, if the spatial
dimensions aren't square, or if the spatial dimensions aren't a power of
two.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
_validate_image_inputs(inputs)
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.compat.v1.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
kernel_size=4,
activation_fn=tf.nn.leaky_relu):
net = inputs
for i in xrange(int(log(inp_shape, 2))):
scope = 'conv%i' % (i + 1)
current_depth = depth * 2**i
normalizer_fn_ = None if i == 0 else normalizer_fn
net = slim.conv2d(
net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)
end_points[scope] = net
logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID',
normalizer_fn=None, activation_fn=None)
logits = tf.reshape(logits, [-1, 1])
end_points['logits'] = logits
return logits, end_points
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def generator(inputs,
depth=64,
final_size=32,
num_outputs=3,
is_training=True,
reuse=None,
scope='Generator',
fused_batch_norm=False):
"""Generator network for DCGAN.
Construct generator network from inputs to the final endpoint.
Args:
inputs: A tensor with any size N. [batch_size, N]
depth: Number of channels in last deconvolution layer.
final_size: The shape of the final output.
num_outputs: Number of output features. For images, this is the number of
channels.
is_training: whether is training or not.
reuse: Whether or not the network has its variables should be reused. scope
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, 32, 32, channels]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If `inputs` is not 2-dimensional.
ValueError: If `final_size` isn't a power of 2 or is less than 8.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
inputs.get_shape().assert_has_rank(2)
if log(final_size, 2) != int(log(final_size, 2)):
raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)
if final_size < 8:
raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.compat.v1.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
stride=2,
kernel_size=4):
net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)
# First upscaling is different because it takes the input vector.
current_depth = depth * 2 ** (num_layers - 1)
scope = 'deconv1'
net = slim.conv2d_transpose(
net, current_depth, stride=1, padding='VALID', scope=scope)
end_points[scope] = net
for i in xrange(2, num_layers):
scope = 'deconv%i' % (i)
current_depth = depth * 2 ** (num_layers - i)
net = slim.conv2d_transpose(net, current_depth, scope=scope)
end_points[scope] = net
# Last layer has different normalizer and activation.
scope = 'deconv%i' % (num_layers)
net = slim.conv2d_transpose(
net, depth, normalizer_fn=None, activation_fn=None, scope=scope)
end_points[scope] = net
# Convert to proper channels.
scope = 'logits'
logits = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=None,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = logits
logits.get_shape().assert_has_rank(4)
logits.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
return logits, end_points
| 37.308458
| 81
| 0.64702
|
794b8d9749897156ad3075fc954a59c761dae4b0
| 5,232
|
py
|
Python
|
venv/Lib/site-packages/kombu/transport/zookeeper.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | 39
|
2016-12-05T14:36:37.000Z
|
2021-07-29T18:22:34.000Z
|
venv/Lib/site-packages/kombu/transport/zookeeper.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | 68
|
2016-12-12T20:38:47.000Z
|
2020-07-26T18:28:49.000Z
|
venv/Lib/site-packages/kombu/transport/zookeeper.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | 120
|
2016-08-18T14:53:03.000Z
|
2020-06-16T13:27:20.000Z
|
"""
kombu.transport.zookeeper
=========================
Zookeeper transport.
:copyright: (c) 2010 - 2013 by Mahendra M.
:license: BSD, see LICENSE for more details.
**Synopsis**
Connects to a zookeeper node as <server>:<port>/<vhost>
The <vhost> becomes the base for all the other znodes. So we can use
it like a vhost.
This uses the built-in kazoo recipe for queues
**References**
- https://zookeeper.apache.org/doc/trunk/recipes.html#sc_recipes_Queues
- https://kazoo.readthedocs.io/en/latest/api/recipe/queue.html
**Limitations**
This queue does not offer reliable consumption. An entry is removed from
the queue prior to being processed. So if an error occurs, the consumer
has to re-queue the item or it will be lost.
"""
from __future__ import absolute_import
import os
import socket
from anyjson import loads, dumps
from kombu.five import Empty
from kombu.utils.encoding import bytes_to_str
from . import virtual
MAX_PRIORITY = 9
try:
import kazoo
from kazoo.client import KazooClient
from kazoo.recipe.queue import Queue
KZ_CONNECTION_ERRORS = (
kazoo.exceptions.SystemErrorException,
kazoo.exceptions.ConnectionLossException,
kazoo.exceptions.MarshallingErrorException,
kazoo.exceptions.UnimplementedException,
kazoo.exceptions.OperationTimeoutException,
kazoo.exceptions.NoAuthException,
kazoo.exceptions.InvalidACLException,
kazoo.exceptions.AuthFailedException,
kazoo.exceptions.SessionExpiredException,
)
KZ_CHANNEL_ERRORS = (
kazoo.exceptions.RuntimeInconsistencyException,
kazoo.exceptions.DataInconsistencyException,
kazoo.exceptions.BadArgumentsException,
kazoo.exceptions.MarshallingErrorException,
kazoo.exceptions.UnimplementedException,
kazoo.exceptions.OperationTimeoutException,
kazoo.exceptions.ApiErrorException,
kazoo.exceptions.NoNodeException,
kazoo.exceptions.NoAuthException,
kazoo.exceptions.NodeExistsException,
kazoo.exceptions.NoChildrenForEphemeralsException,
kazoo.exceptions.NotEmptyException,
kazoo.exceptions.SessionExpiredException,
kazoo.exceptions.InvalidCallbackException,
socket.error,
)
except ImportError:
kazoo = None # noqa
KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () # noqa
DEFAULT_PORT = 2181
__author__ = 'Mahendra M <mahendra.m@gmail.com>'
class Channel(virtual.Channel):
_client = None
_queues = {}
def _get_path(self, queue_name):
return os.path.join(self.vhost, queue_name)
def _get_queue(self, queue_name):
queue = self._queues.get(queue_name, None)
if queue is None:
queue = Queue(self.client, self._get_path(queue_name))
self._queues[queue_name] = queue
# Ensure that the queue is created
len(queue)
return queue
def _put(self, queue, message, **kwargs):
try:
priority = message['properties']['delivery_info']['priority']
except KeyError:
priority = 0
queue = self._get_queue(queue)
queue.put(dumps(message), priority=(MAX_PRIORITY - priority))
def _get(self, queue):
queue = self._get_queue(queue)
msg = queue.get()
if msg is None:
raise Empty()
return loads(bytes_to_str(msg))
def _purge(self, queue):
count = 0
queue = self._get_queue(queue)
while True:
msg = queue.get()
if msg is None:
break
count += 1
return count
def _delete(self, queue, *args, **kwargs):
if self._has_queue(queue):
self._purge(queue)
self.client.delete(self._get_path(queue))
def _size(self, queue):
queue = self._get_queue(queue)
return len(queue)
def _new_queue(self, queue, **kwargs):
if not self._has_queue(queue):
queue = self._get_queue(queue)
def _has_queue(self, queue):
return self.client.exists(self._get_path(queue)) is not None
def _open(self):
conninfo = self.connection.client
port = conninfo.port or DEFAULT_PORT
conn_str = '%s:%s' % (conninfo.hostname, port)
self.vhost = os.path.join('/', conninfo.virtual_host[0:-1])
conn = KazooClient(conn_str)
conn.start()
return conn
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (
virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS
)
channel_errors = (
virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS
)
driver_type = 'zookeeper'
driver_name = 'kazoo'
def __init__(self, *args, **kwargs):
if kazoo is None:
raise ImportError('The kazoo library is not installed')
super(Transport, self).__init__(*args, **kwargs)
def driver_version(self):
return kazoo.__version__
| 27.68254
| 73
| 0.658257
|
794b8e5a3505a2fc84abcaaa821de76519c705f8
| 3,498
|
py
|
Python
|
python/marvin/io.py
|
NTForked/marvin
|
a5f242ec7a8ba7ab7af7cb1590eb4932a2b13cda
|
[
"MIT"
] | 487
|
2015-11-10T09:04:26.000Z
|
2022-03-11T08:53:03.000Z
|
python/marvin/io.py
|
NTForked/marvin
|
a5f242ec7a8ba7ab7af7cb1590eb4932a2b13cda
|
[
"MIT"
] | 42
|
2015-11-10T17:33:50.000Z
|
2018-07-05T17:15:38.000Z
|
python/marvin/io.py
|
NTForked/marvin
|
a5f242ec7a8ba7ab7af7cb1590eb4932a2b13cda
|
[
"MIT"
] | 178
|
2015-11-10T19:32:07.000Z
|
2022-03-17T08:38:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as np
import struct
class Tensor(object):
def __init__(self):
self.name = None
self.value = None
CODE_TO_TYPE = {
0: np.float16,
1: np.float32,
2: np.float64,
3: np.uint8,
4: np.uint16,
5: np.uint32,
6: np.uint64,
7: np.int8,
8: np.int16,
9: np.int32,
10: np.int64,
11: np.dtype('a').type,
12: np.dtype('b').type
}
TYPE_TO_CODE = {
np.float16: 0,
np.float32: 1,
np.float64: 2,
np.uint8: 3,
np.uint16: 4,
np.uint32: 5,
np.uint64: 6,
np.int8: 7,
np.int16: 8,
np.int32: 9,
np.int64: 10,
np.dtype('a').type: 11,
np.dtype('b').type: 12
}
def code2type(code):
try:
return CODE_TO_TYPE[code]
except KeyError:
raise ValueError('Unknown type code {}'.format(code))
def type2code(t):
try:
return TYPE_TO_CODE[t]
except KeyError:
raise TypeError('Unknown tensor type {}'.format(t))
def read_tensor(filename):
tensors = []
with open(filename, 'rb') as fp:
type_code_str = fp.read(1)
while len(type_code_str) > 0:
tensor = Tensor()
type_code = np.fromstring(type_code_str, dtype=np.uint8)[0]
tensor_type = code2type(type_code)
fp.read(4) # type size
name_length = struct.unpack('i', fp.read(4))[0]
tensor.name = fp.read(name_length).decode('ascii')
num_dims = struct.unpack('i', fp.read(4))[0]
# maybe zero padding at the end of a feature file
if num_dims == 0:
break
dims = np.fromstring(fp.read(4 * num_dims), dtype=np.int32)
num_bytes = np.prod(dims) * 4
tensor.value = np.fromstring(
fp.read(num_bytes), dtype=tensor_type).reshape(dims)
tensors.append(tensor)
type_code_str = fp.read(1)
return tensors
def read_tensor_v0(filename):
tensors = []
with open(filename, 'rb') as fp:
name_length_str = fp.read(4)
while len(name_length_str) > 0:
tensor = Tensor()
tensor_type = np.float32
name_length = struct.unpack('i', name_length_str)[0]
tensor.name = fp.read(name_length).decode('ascii')
num_dims = struct.unpack('i', fp.read(4))[0]
# maybe zero padding at the end of a feature file
if num_dims == 0:
break
dims = np.fromstring(fp.read(4 * num_dims), dtype=np.int32)
num_bytes = np.prod(dims) * 4
tensor.value = np.fromstring(
fp.read(num_bytes), dtype=tensor_type).reshape(dims)
tensors.append(tensor)
name_length_str = fp.read(4)
return tensors
def write_tensor(filename, tensors):
with open(filename, 'wb') as fp:
for tensor in tensors:
fp.write(np.array(type2code(tensor.value.dtype.type),
dtype=np.uint8).tostring())
fp.write(np.array(tensor.value.dtype.itemsize,
dtype=np.uint32).tostring())
fp.write(struct.pack('i', len(tensor.name)))
fp.write(tensor.name)
fp.write(struct.pack('i', len(tensor.value.shape)))
fp.write(np.array(tensor.value.shape, dtype=np.int32).tostring())
fp.write(tensor.value.tostring())
| 29.15
| 77
| 0.561464
|
794b8ec3c542fa446854d8c8c7a067532e22b23d
| 29,321
|
py
|
Python
|
src/sage/algebras/shuffle_algebra.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/shuffle_algebra.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/shuffle_algebra.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | 1
|
2020-07-24T12:08:30.000Z
|
2020-07-24T12:08:30.000Z
|
# -*- coding: utf-8 -*-
r"""
Shuffle algebras
AUTHORS:
- Frédéric Chapoton (2013-03): Initial version
- Matthieu Deneufchatel (2013-07): Implemented dual PBW basis
"""
#*****************************************************************************
# Copyright (C) 2013 Frédéric Chapoton <chapoton-math-univ-lyon1-fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.rings import Rings
from sage.categories.algebras_with_basis import AlgebrasWithBasis
from sage.categories.commutative_algebras import CommutativeAlgebras
from sage.categories.coalgebras_with_basis import CoalgebrasWithBasis
from sage.categories.tensor import TensorProductsCategory, tensor
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.words.alphabet import Alphabet
from sage.combinat.words.words import Words
from sage.combinat.words.word import Word
from sage.combinat.words.finite_word import FiniteWord_class
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.misc_c import prod
from sage.sets.family import Family
class ShuffleAlgebra(CombinatorialFreeModule):
r"""
The shuffle algebra on some generators over a base ring.
Shuffle algebras are commutative and associative algebras, with a
basis indexed by words. The product of two words `w_1 \cdot w_2` is given
by the sum over the shuffle product of `w_1` and `w_2`.
.. SEEALSO::
For more on shuffle products, see
:mod:`~sage.combinat.words.shuffle_product` and
:meth:`~sage.combinat.words.finite_word.FiniteWord_class.shuffle()`.
REFERENCES:
- :wikipedia:`Shuffle algebra`
INPUT:
- ``R`` -- ring
- ``names`` -- generator names (string or an alphabet)
EXAMPLES::
sage: F = ShuffleAlgebra(QQ, 'xyz'); F
Shuffle Algebra on 3 generators ['x', 'y', 'z'] over Rational Field
sage: mul(F.gens())
B[word: xyz] + B[word: xzy] + B[word: yxz] + B[word: yzx] + B[word: zxy] + B[word: zyx]
sage: mul([ F.gen(i) for i in range(2) ]) + mul([ F.gen(i+1) for i in range(2) ])
B[word: xy] + B[word: yx] + B[word: yz] + B[word: zy]
sage: S = ShuffleAlgebra(ZZ, 'abcabc'); S
Shuffle Algebra on 3 generators ['a', 'b', 'c'] over Integer Ring
sage: S.base_ring()
Integer Ring
sage: G = ShuffleAlgebra(S, 'mn'); G
Shuffle Algebra on 2 generators ['m', 'n'] over Shuffle Algebra on 3 generators ['a', 'b', 'c'] over Integer Ring
sage: G.base_ring()
Shuffle Algebra on 3 generators ['a', 'b', 'c'] over Integer Ring
Shuffle algebras commute with their base ring::
sage: K = ShuffleAlgebra(QQ,'ab')
sage: a,b = K.gens()
sage: K.is_commutative()
True
sage: L = ShuffleAlgebra(K,'cd')
sage: c,d = L.gens()
sage: L.is_commutative()
True
sage: s = a*b^2 * c^3; s
(12*B[word:abb]+12*B[word:bab]+12*B[word:bba])*B[word: ccc]
sage: parent(s)
Shuffle Algebra on 2 generators ['c', 'd'] over Shuffle Algebra on 2 generators ['a', 'b'] over Rational Field
sage: c^3 * a * b^2
(12*B[word:abb]+12*B[word:bab]+12*B[word:bba])*B[word: ccc]
Shuffle algebras are commutative::
sage: c^3 * b * a * b == c * a * c * b^2 * c
True
We can also manipulate elements in the basis and coerce elements from our
base field::
sage: F = ShuffleAlgebra(QQ, 'abc')
sage: B = F.basis()
sage: B[Word('bb')] * B[Word('ca')]
B[word: bbca] + B[word: bcab] + B[word: bcba] + B[word: cabb] + B[word: cbab] + B[word: cbba]
sage: 1 - B[Word('bb')] * B[Word('ca')] / 2
B[word: ] - 1/2*B[word: bbca] - 1/2*B[word: bcab] - 1/2*B[word: bcba] - 1/2*B[word: cabb] - 1/2*B[word: cbab] - 1/2*B[word: cbba]
"""
@staticmethod
def __classcall_private__(cls, R, names):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: F1 = ShuffleAlgebra(QQ, 'xyz')
sage: F2 = ShuffleAlgebra(QQ, ['x','y','z'])
sage: F3 = ShuffleAlgebra(QQ, Alphabet('xyz'))
sage: F1 is F2 and F1 is F3
True
"""
return super(ShuffleAlgebra, cls).__classcall__(cls, R, Alphabet(names))
def __init__(self, R, names):
r"""
Initialize ``self``.
EXAMPLES::
sage: F = ShuffleAlgebra(QQ, 'xyz'); F
Shuffle Algebra on 3 generators ['x', 'y', 'z'] over Rational Field
sage: TestSuite(F).run()
TESTS::
sage: ShuffleAlgebra(24, 'toto')
Traceback (most recent call last):
...
TypeError: argument R must be a ring
"""
if R not in Rings():
raise TypeError("argument R must be a ring")
self._alphabet = names
self.__ngens = self._alphabet.cardinality()
CombinatorialFreeModule.__init__(self, R, Words(names),
latex_prefix="",
category=(AlgebrasWithBasis(R), CommutativeAlgebras(R), CoalgebrasWithBasis(R)))
def variable_names(self):
r"""
Return the names of the variables.
EXAMPLES::
sage: R = ShuffleAlgebra(QQ,'xy')
sage: R.variable_names()
{'x', 'y'}
"""
return self._alphabet
def is_commutative(self):
r"""
Return ``True`` as the shuffle algebra is commutative.
EXAMPLES::
sage: R = ShuffleAlgebra(QQ,'x')
sage: R.is_commutative()
True
sage: R = ShuffleAlgebra(QQ,'xy')
sage: R.is_commutative()
True
"""
return True
def _repr_(self):
r"""
Text representation of this shuffle algebra.
EXAMPLES::
sage: F = ShuffleAlgebra(QQ,'xyz')
sage: F # indirect doctest
Shuffle Algebra on 3 generators ['x', 'y', 'z'] over Rational Field
sage: ShuffleAlgebra(ZZ,'a')
Shuffle Algebra on one generator ['a'] over Integer Ring
"""
if self.__ngens == 1:
gen = "one generator"
else:
gen = "%s generators" %self.__ngens
return "Shuffle Algebra on "+ gen +" %s over %s"%(
self._alphabet.list(), self.base_ring())
@cached_method
def one_basis(self):
r"""
Return the empty word, which index of `1` of this algebra,
as per :meth:`AlgebrasWithBasis.ParentMethods.one_basis`.
EXAMPLES::
sage: A = ShuffleAlgebra(QQ,'a')
sage: A.one_basis()
word:
sage: A.one()
B[word: ]
"""
return self.basis().keys()([])
def product_on_basis(self, w1, w2):
r"""
Return the product of basis elements ``w1`` and ``w2``, as per
:meth:`AlgebrasWithBasis.ParentMethods.product_on_basis()`.
INPUT:
- ``w1``, ``w2`` -- Basis elements
EXAMPLES::
sage: A = ShuffleAlgebra(QQ,'abc')
sage: W = A.basis().keys()
sage: A.product_on_basis(W("acb"), W("cba"))
B[word: acbacb] + B[word: acbcab] + 2*B[word: acbcba] + 2*B[word: accbab] + 4*B[word: accbba] + B[word: cabacb] + B[word: cabcab] + B[word: cabcba] + B[word: cacbab] + 2*B[word: cacbba] + 2*B[word: cbaacb] + B[word: cbacab] + B[word: cbacba]
sage: (a,b,c) = A.algebra_generators()
sage: a * (1-b)^2 * c
2*B[word: abbc] - 2*B[word: abc] + 2*B[word: abcb] + B[word: ac] - 2*B[word: acb] + 2*B[word: acbb] + 2*B[word: babc] - 2*B[word: bac] + 2*B[word: bacb] + 2*B[word: bbac] + 2*B[word: bbca] - 2*B[word: bca] + 2*B[word: bcab] + 2*B[word: bcba] + B[word: ca] - 2*B[word: cab] + 2*B[word: cabb] - 2*B[word: cba] + 2*B[word: cbab] + 2*B[word: cbba]
"""
return sum(self.basis()[u] for u in w1.shuffle(w2))
def gen(self,i):
r"""
The ``i``-th generator of the algebra.
INPUT:
- ``i`` -- an integer
EXAMPLES::
sage: F = ShuffleAlgebra(ZZ,'xyz')
sage: F.gen(0)
B[word: x]
sage: F.gen(4)
Traceback (most recent call last):
...
IndexError: argument i (= 4) must be between 0 and 2
"""
n = self.__ngens
if i < 0 or not i < n:
raise IndexError("argument i (= %s) must be between 0 and %s"%(i, n-1))
return self.algebra_generators()[i]
def coproduct_on_basis(self, w):
"""
Return the coproduct of the element of the basis indexed by
the word ``w``.
INPUT:
- ``w`` -- a word
EXAMPLES::
sage: F = ShuffleAlgebra(QQ,'ab')
sage: F.coproduct_on_basis(Word('a'))
B[word: ] # B[word: a] + B[word: a] # B[word: ]
sage: F.coproduct_on_basis(Word('aba'))
B[word: ] # B[word: aba] + B[word: a] # B[word: ab] + B[word: a] # B[word: ba]
+ B[word: aa] # B[word: b] + B[word: ab] # B[word: a] + B[word: aba] # B[word: ]
+ B[word: b] # B[word: aa] + B[word: ba] # B[word: a]
sage: F.coproduct_on_basis(Word())
B[word: ] # B[word: ]
"""
if len(w) == 0:
return self.tensor_square().monomial((self.one_basis(), self.one_basis()))
if len(w) == 1:
return self.tensor_square().sum_of_terms([
((w, self.one_basis()), 1),
((self.one_basis(), w), 1) ], distinct=True)
B = self.basis()
result = self.coproduct_on_basis(Word([w[0]]))
for i in w[1:]:
result = self.tensor_square().sum_of_terms([
((Word(v1)*Word(u1), Word(v2)*Word(u2)), coeff1 * coeff2)
for ((u1,u2),coeff1) in self.coproduct_on_basis(Word([i]))
for ((v1,v2),coeff2) in result ])
return result
def coproduct(self, S):
"""
Return the coproduct of the series ``S``.
EXAMPLES::
sage: F = ShuffleAlgebra(QQ,'ab')
sage: S = F.an_element(); S
2*B[word: ] + 2*B[word: a] + 3*B[word: b]
sage: F.coproduct(S)
2*B[word: ] # B[word: ] + 2*B[word: ] # B[word: a] + 3*B[word: ] # B[word: b]
+ 2*B[word: a] # B[word: ] + 3*B[word: b] # B[word: ]
sage: F.coproduct(F.one())
B[word: ] # B[word: ]
"""
return sum([c * self.coproduct_on_basis(i)
for i,c in S.monomial_coefficients().items()])
def counit(self,S):
"""
Return the counit of ``S``.
EXAMPLES::
sage: F = ShuffleAlgebra(QQ,'ab')
sage: S = F.an_element(); S
2*B[word: ] + 2*B[word: a] + 3*B[word: b]
sage: F.counit(S)
2
"""
return S.monomial_coefficients().get(Word(), 0)
@cached_method
def algebra_generators(self):
r"""
Return the generators of this algebra.
EXAMPLES::
sage: A = ShuffleAlgebra(ZZ,'fgh'); A
Shuffle Algebra on 3 generators ['f', 'g', 'h'] over Integer Ring
sage: A.algebra_generators()
Family (B[word: f], B[word: g], B[word: h])
"""
Words = self.basis().keys()
return Family( [self.monomial(Words(a)) for a in self._alphabet] )
# FIXME: use this once the keys argument of FiniteFamily will be honoured
# for the specifying the order of the elements in the family
#return Family(self._alphabet, lambda a: self.term(self.basis().keys()(a)))
gens = algebra_generators
def _element_constructor_(self, x):
r"""
Convert ``x`` into ``self``.
EXAMPLES::
sage: R = ShuffleAlgebra(QQ,'xy')
sage: x, y = R.gens()
sage: R(3)
3*B[word: ]
sage: R(x)
B[word: x]
sage: R('xyy')
B[word: xyy]
sage: R(Word('xyx'))
B[word: xyx]
"""
if isinstance(x, (str, FiniteWord_class)):
W = self.basis().keys()
return self.monomial(W(x))
P = x.parent()
if isinstance(P, ShuffleAlgebra):
if P is self:
return x
if not (P is self.base_ring()):
return self.element_class(self, x.monomial_coefficients())
if isinstance(P, DualPBWBasis):
return self(P.expansion(x))
# ok, not a shuffle algebra element (or should not be viewed as one).
if isinstance(x, basestring):
from sage.misc.sage_eval import sage_eval
return sage_eval(x,locals=self.gens_dict())
R = self.base_ring()
# coercion via base ring
x = R(x)
if x == 0:
return self.element_class(self,{})
else:
return self.from_base_ring_from_one_basis(x)
def _coerce_impl(self, x):
r"""
Canonical coercion of ``x`` into ``self``.
Here is what canonically coerces to ``self``:
- this shuffle algebra,
- anything that coerces to the base ring of this shuffle algebra,
- any shuffle algebra on the same variables, whose base ring
coerces to the base ring of this shuffle algebra.
EXAMPLES::
sage: F = ShuffleAlgebra(GF(7), 'xyz'); F
Shuffle Algebra on 3 generators ['x', 'y', 'z'] over Finite Field of size 7
Elements of the shuffle algebra canonically coerce in::
sage: x, y, z = F.gens()
sage: F.coerce(x*y) # indirect doctest
B[word: xy] + B[word: yx]
Elements of the integers coerce in, since there is a coerce map
from `\ZZ` to GF(7)::
sage: F.coerce(1) # indirect doctest
B[word: ]
There is no coerce map from `\QQ` to `\GF{7}`::
sage: F.coerce(2/3) # indirect doctest
Traceback (most recent call last):
...
TypeError: no canonical coercion from Rational Field to Shuffle Algebra on 3 generators ['x', 'y', 'z'] over Finite Field of size 7
Elements of the base ring coerce in::
sage: F.coerce(GF(7)(5))
5*B[word: ]
The shuffle algebra over `\ZZ` on `x, y, z` coerces in, since
`\ZZ` coerces to `\GF{7}`::
sage: G = ShuffleAlgebra(ZZ,'xyz')
sage: Gx,Gy,Gz = G.gens()
sage: z = F.coerce(Gx**2 * Gy);z
2*B[word: xxy] + 2*B[word: xyx] + 2*B[word: yxx]
sage: z.parent() is F
True
However, `\GF{7}` does not coerce to `\ZZ`, so the shuffle
algebra over `\GF{7}` does not coerce to the one over `\ZZ`::
sage: G.coerce(x^3*y)
Traceback (most recent call last):
...
TypeError: no canonical coercion from Shuffle Algebra on 3 generators
['x', 'y', 'z'] over Finite Field of size 7 to Shuffle Algebra on 3
generators ['x', 'y', 'z'] over Integer Ring
"""
try:
R = x.parent()
# shuffle algebras in the same variables over any base
# that coerces in:
if isinstance(R,ShuffleAlgebra):
if R.variable_names() == self.variable_names():
if self.has_coerce_map_from(R.base_ring()):
return self(x)
else:
raise TypeError("no natural map between bases of shuffle algebras")
if isinstance(R, DualPBWBasis):
return self(R.expansion(x))
except AttributeError:
pass
# any ring that coerces to the base ring of this shuffle algebra.
return self._coerce_try(x, [self.base_ring()])
def _coerce_map_from_(self, R):
r"""
Return ``True`` if there is a coercion from ``R`` into ``self``
and ``False`` otherwise.
The things that coerce into ``self`` are
- Shuffle Algebras in the same variables over a base with a coercion
map into ``self.base_ring()``.
- Anything with a coercion into ``self.base_ring()``.
TESTS::
sage: F = ShuffleAlgebra(ZZ, 'xyz')
sage: G = ShuffleAlgebra(QQ, 'xyz')
sage: H = ShuffleAlgebra(ZZ, 'y')
sage: F._coerce_map_from_(G)
False
sage: G._coerce_map_from_(F)
True
sage: F._coerce_map_from_(H)
False
sage: F._coerce_map_from_(QQ)
False
sage: G._coerce_map_from_(QQ)
True
sage: F.has_coerce_map_from(PolynomialRing(ZZ, 3, 'x,y,z'))
False
sage: F._coerce_map_from_(F.dual_pbw_basis())
True
"""
# shuffle algebras in the same variable over any base that coerces in:
if isinstance(R, ShuffleAlgebra):
if R.variable_names() == self.variable_names():
if self.base_ring().has_coerce_map_from(R.base_ring()):
return True
else:
return False
if isinstance(R, DualPBWBasis):
return self.has_coerce_map_from(R._alg)
return self.base_ring().has_coerce_map_from(R)
def dual_pbw_basis(self):
"""
Return the dual PBW of ``self``.
EXAMPLES::
sage: A = ShuffleAlgebra(QQ, 'ab')
sage: A.dual_pbw_basis()
The dual Poincare-Birkhoff-Witt basis of Shuffle Algebra on 2 generators ['a', 'b'] over Rational Field
"""
return DualPBWBasis(self.base_ring(), self._alphabet)
def to_dual_pbw_element(self, w):
"""
Return the element `w` of ``self`` expressed in the dual PBW basis.
INPUT:
- ``w`` -- an element of the shuffle algebra
EXAMPLES::
sage: A = ShuffleAlgebra(QQ, 'ab')
sage: f = 2 * A(Word()) + A(Word('ab')); f
2*B[word: ] + B[word: ab]
sage: A.to_dual_pbw_element(f)
2*S[word: ] + S[word: ab]
sage: A.to_dual_pbw_element(A.one())
S[word: ]
sage: S = A.dual_pbw_basis()
sage: elt = S.expansion_on_basis(Word('abba')); elt
2*B[word: aabb] + B[word: abab] + B[word: abba]
sage: A.to_dual_pbw_element(elt)
S[word: abba]
sage: A.to_dual_pbw_element(2*A(Word('aabb')) + A(Word('abab')))
S[word: abab]
sage: S.expansion(S('abab'))
2*B[word: aabb] + B[word: abab]
"""
D = self.dual_pbw_basis()
l = {}
W = self.basis().keys()
while w != self.zero():
support = [W(i[0]) for i in list(w)]
min_elt = W(support[0])
if len(support) > 1:
for word in support[1:len(support)-1]:
if min_elt.lex_less(word):
min_elt = W(word)
coeff = list(w)[support.index(min_elt)][1]
l[min_elt] = l.get(min_elt, 0) + coeff
w = w - coeff * D.expansion_on_basis(W(min_elt))
return D.sum_of_terms([(m, c) for m,c in l.items() if c != 0])
class DualPBWBasis(CombinatorialFreeModule):
r"""
The basis dual to the Poincare-Birkhoff-Witt basis of the free algebra.
We recursively define the dual PBW basis as the basis of the
shuffle algebra given by
.. MATH::
S_w = \begin{cases}
w & |w| = 1, \\
x S_u & w = xu \text{ and } w \in \mathrm{Lyn}(X), \\
\displaystyle \frac{S_{\ell_{i_1}}^{\ast \alpha_1} \ast \cdots
\ast S_{\ell_{i_k}}^{\ast \alpha_k}}{\alpha_1! \cdots \alpha_k!} &
w = \ell_{i_1}^{\alpha_1} \cdots \ell_{i_k}^{\alpha_k} \text{ with }
\ell_1 > \cdots > \ell_k \in \mathrm{Lyn}(X).
\end{cases}
where `S \ast T` denotes the shuffle product of `S` and `T` and
`\mathrm{Lyn}(X)` is the set of Lyndon words in the alphabet `X`.
The definition may be found in Theorem 5.3 of [Reuten1993]_.
INPUT:
- ``R`` -- ring
- ``names`` -- names of the generators (string or an alphabet)
REFERENCES:
.. [Reuten1993] C. Reutenauer. *Free Lie Algebras*. Number 7 in
London Math. Soc. Monogr. (N.S.). Oxford University Press. (1993).
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: S
The dual Poincare-Birkhoff-Witt basis of Shuffle Algebra on 2 generators ['a', 'b'] over Rational Field
sage: S.one()
S[word: ]
sage: S.one_basis()
word:
sage: T = ShuffleAlgebra(QQ, 'abcd').dual_pbw_basis(); T
The dual Poincare-Birkhoff-Witt basis of Shuffle Algebra on 4 generators ['a', 'b', 'c', 'd'] over Rational Field
sage: T.algebra_generators()
(S[word: a], S[word: b], S[word: c], S[word: d])
TESTS:
We check conversion between the bases::
sage: A = ShuffleAlgebra(QQ, 'ab')
sage: S = A.dual_pbw_basis()
sage: W = Words('ab', 5)
sage: all(S(A(S(w))) == S(w) for w in W)
True
sage: all(A(S(A(w))) == A(w) for w in W)
True
"""
@staticmethod
def __classcall_private__(cls, R, names):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: from sage.algebras.shuffle_algebra import DualPBWBasis
sage: D1 = DualPBWBasis(QQ, 'ab')
sage: D2 = DualPBWBasis(QQ, Alphabet('ab'))
sage: D1 is D2
True
"""
return super(DualPBWBasis, cls).__classcall__(cls, R, Alphabet(names))
def __init__(self, R, names):
"""
Initialize ``self``.
EXAMPLES::
sage: D = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: TestSuite(D).run()
"""
self._alphabet = names
self._alg = ShuffleAlgebra(R, names)
CombinatorialFreeModule.__init__(self, R, Words(names), prefix='S',
category=(AlgebrasWithBasis(R), CommutativeAlgebras(R), CoalgebrasWithBasis(R)))
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
The dual Poincare-Birkhoff-Witt basis of Shuffle Algebra on 2 generators ['a', 'b'] over Rational Field
"""
return "The dual Poincare-Birkhoff-Witt basis of {}".format(self._alg)
def _element_constructor_(self, x):
"""
Construct an element of ``self`` from ``x``.
EXAMPLES::
sage: A = ShuffleAlgebra(QQ, 'ab')
sage: S = A.dual_pbw_basis()
sage: S('abaab')
S[word: abaab]
sage: S(Word('aba'))
S[word: aba]
sage: S(A('ab'))
S[word: ab]
"""
if isinstance(x, (str, FiniteWord_class)):
W = self.basis().keys()
x = W(x)
elif isinstance(x.parent(), ShuffleAlgebra):
return self._alg.to_dual_pbw_element(self._alg(x))
return super(DualPBWBasis, self)._element_constructor_(x)
def _coerce_map_from_(self, R):
"""
Return ``True`` if there is a coercion from ``R`` into ``self`` and
``False`` otherwise. The things that coerce into ``self`` are:
- Anything that coerces into the associated shuffle algebra of ``self``
TESTS::
sage: F = ShuffleAlgebra(ZZ, 'xyz').dual_pbw_basis()
sage: G = ShuffleAlgebra(QQ, 'xyz').dual_pbw_basis()
sage: H = ShuffleAlgebra(ZZ, 'y').dual_pbw_basis()
sage: F._coerce_map_from_(G)
False
sage: G._coerce_map_from_(F)
True
sage: F._coerce_map_from_(H)
False
sage: F._coerce_map_from_(QQ)
False
sage: G._coerce_map_from_(QQ)
True
sage: F.has_coerce_map_from(PolynomialRing(ZZ, 3, 'x,y,z'))
False
sage: F._coerce_map_from_(F._alg)
True
"""
return self._alg.has_coerce_map_from(R)
def one_basis(self):
"""
Return the indexing element of the basis element `1`.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: S.one_basis()
word:
"""
W = self.basis().keys()
return W([])
def algebra_generators(self):
"""
Return the algebra generators of ``self``.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: S.algebra_generators()
(S[word: a], S[word: b])
"""
W = self.basis().keys()
return tuple(self.monomial(W(a)) for a in self._alphabet)
gens = algebra_generators
def gen(self, i):
"""
Return the ``i``-th generator of ``self``.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: S.gen(0)
S[word: a]
sage: S.gen(1)
S[word: b]
"""
return self.algebra_generators()[i]
def shuffle_algebra(self):
"""
Return the associated shuffle algebra of ``self``.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: S.shuffle_algebra()
Shuffle Algebra on 2 generators ['a', 'b'] over Rational Field
"""
return self._alg
def product(self, u, v):
"""
Return the product of two elements ``u`` and ``v``.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: a,b = S.gens()
sage: S.product(a, b)
S[word: ba]
sage: S.product(b, a)
S[word: ba]
sage: S.product(b^2*a, a*b*a)
36*S[word: bbbaaa]
TESTS:
Check that multiplication agrees with the multiplication in the
shuffle algebra::
sage: A = ShuffleAlgebra(QQ, 'ab')
sage: S = A.dual_pbw_basis()
sage: a,b = S.gens()
sage: A(a*b)
B[word: ab] + B[word: ba]
sage: A(a*b*a)
2*B[word: aab] + 2*B[word: aba] + 2*B[word: baa]
sage: S(A(a)*A(b)*A(a)) == a*b*a
True
"""
return self(self.expansion(u) * self.expansion(v))
@lazy_attribute
def expansion(self):
"""
Return the morphism corresponding to the expansion into words of
the shuffle algebra.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: f = S('ab') + S('aba')
sage: S.expansion(f)
2*B[word: aab] + B[word: ab] + B[word: aba]
"""
return self.module_morphism(self.expansion_on_basis, codomain=self._alg)
@cached_method
def expansion_on_basis(self, w):
r"""
Return the expansion of `S_w` in words of the shuffle algebra.
INPUT:
- ``w`` -- a word
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: S.expansion_on_basis(Word())
B[word: ]
sage: S.expansion_on_basis(Word()).parent()
Shuffle Algebra on 2 generators ['a', 'b'] over Rational Field
sage: S.expansion_on_basis(Word('abba'))
2*B[word: aabb] + B[word: abab] + B[word: abba]
sage: S.expansion_on_basis(Word())
B[word: ]
sage: S.expansion_on_basis(Word('abab'))
2*B[word: aabb] + B[word: abab]
"""
from sage.functions.other import factorial
if len(w) == 0:
return self._alg.one()
if len(w) == 1:
return self._alg.monomial(w)
if w.is_lyndon():
W = self.basis().keys()
letter = W(w[0])
expansion = self.expansion_on_basis(W(w[1:]))
return self._alg.sum_of_terms([(letter * i, c) for i,c in expansion])
lf = w.lyndon_factorization()
powers = {}
for i in lf:
powers[i] = powers.get(i, 0) + 1
denom = prod(factorial(p) for p in powers.values())
result = self._alg.prod(self.expansion_on_basis(i) for i in lf)
return self._alg(result / denom)
class Element(CombinatorialFreeModule.Element):
"""
An element in the dual PBW basis.
"""
def expand(self):
"""
Expand ``self`` in words of the shuffle algebra.
EXAMPLES::
sage: S = ShuffleAlgebra(QQ, 'ab').dual_pbw_basis()
sage: f = S('ab') + S('bab')
sage: f.expand()
B[word: ab] + 2*B[word: abb] + B[word: bab]
"""
return self.parent().expansion(self)
| 33.168552
| 355
| 0.531735
|
794b8f2c7501f87d3d62fb367b0c150c2b8b7969
| 2,075
|
py
|
Python
|
roll_dice.py
|
Karins-Coils/CastleDice
|
66397b1d50da89aaf1ba69ea33bc58e0eba43d73
|
[
"MIT"
] | 2
|
2015-09-17T16:36:27.000Z
|
2015-09-17T17:01:03.000Z
|
roll_dice.py
|
Karins-Coils/CastleDice
|
66397b1d50da89aaf1ba69ea33bc58e0eba43d73
|
[
"MIT"
] | 9
|
2019-12-04T21:49:50.000Z
|
2021-10-02T09:08:01.000Z
|
roll_dice.py
|
khawley/CastleDice
|
66397b1d50da89aaf1ba69ea33bc58e0eba43d73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
import random
# Resources
## Building Materials
Wood ='wood'
Stone = 'stone'
Gold = 'gold'
Land = 'land'
Iron = 'iron'
## Animals
Horse = 'horse'
Pig = 'pig'
Cow = 'cow'
Chicken = 'chicken'
## Lone Barbarian
Barbarian = 'barbarian'
class Die:
resource = "" ## this tracks what kind of die is being rolled: Wood, Stone, Gold, etc
sides = []
debug = False
def __init__(self, resource, debug = False):
self.resource = resource
self.debug = debug
self.create_die()
def __str__(self):
sides = ""
for i in range(len(self.sides)):
sides += str(self.sides[i])
if i < len(self.sides)-1:
sides += ", "
return self.resource +": [ "+ sides+" ]"
def create_die(self):
if self.resource == Wood:
self.sides = [(Wood, 1), (Wood, 1), (Wood, 2), (Wood, 3), (Cow, 1), (Barbarian, 1)]
elif self.resource == Stone:
self.sides = [(Stone, 1), (Stone, 1), (Stone, 2), (Stone, 2), (Chicken, 1), (Barbarian, 1)]
elif self.resource == Gold:
self.sides = [(Gold, 1), (Gold, 1), (Gold, 1), (Gold, 2), (Horse, 1), (Barbarian, 1)]
elif self.resource == Land:
self.sides = [(Land, 1), (Land, 1), (Land, 2), (Pig, 1), (Pig, 1), (Barbarian, 1)]
elif self.resource == Iron:
self.sides = [(Iron, 1), (Iron, 2), (Pig, 1), (Horse, 1), (Chicken, 1), (Barbarian, 1)]
else:
self.sides = [1,2,3,4,5,6]
self.resource = self.resource.capitalize()
def roll_die(self):
roll = random.randint(0,5)
if self.debug: print "rolled " + str(roll) + "\n"
resource, count = self.sides[roll]
return resource, count
def roll_multiple_die(dieSet):
for d in dieSet:
print d.resource +": "+ str(d.roll_die())
def roll_and_total_dice(dieSet):
r_rolled = {}
for d in dieSet:
r, count = d.roll_die()
print d.resource +" die: "+ str(count) +" "+ r
if r_rolled.has_key(r):
r_rolled[r] += count
else:
r_rolled[r] = count
print r_rolled
dieSet = [
Die(Wood),
Die(Stone),
Die(Gold),
Die(Land),
Die(Iron),
Die(Wood),
Die(Stone),
Die(Wood)
]
roll_multiple_die(dieSet)
print
print
roll_and_total_dice(dieSet)
| 22.554348
| 94
| 0.618313
|
794b906d5f535d643589f28dbfef3431a8f0adc6
| 1,309
|
py
|
Python
|
test/test_page_dto_uploaded_file_dto.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
test/test_page_dto_uploaded_file_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
test/test_page_dto_uploaded_file_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import memsource_cli
from memsource_cli.models.page_dto_uploaded_file_dto import PageDtoUploadedFileDto # noqa: E501
from memsource_cli.rest import ApiException
class TestPageDtoUploadedFileDto(unittest.TestCase):
"""PageDtoUploadedFileDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPageDtoUploadedFileDto(self):
"""Test PageDtoUploadedFileDto"""
# FIXME: construct object with mandatory attributes with example values
# model = memsource_cli.models.page_dto_uploaded_file_dto.PageDtoUploadedFileDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 31.926829
| 421
| 0.741024
|
794b910ddc914b9c51b0b2152bcbad42d6c7fcc8
| 948
|
py
|
Python
|
src/models/hierarchical_attention/train_han.py
|
kmanchel/BiasNet
|
caef2e4f7db0a64ae528b5428a7123b3061481d2
|
[
"MIT"
] | 1
|
2020-11-09T01:26:14.000Z
|
2020-11-09T01:26:14.000Z
|
src/models/hierarchical_attention/train_han.py
|
kmanchel/BiasNet
|
caef2e4f7db0a64ae528b5428a7123b3061481d2
|
[
"MIT"
] | null | null | null |
src/models/hierarchical_attention/train_han.py
|
kmanchel/BiasNet
|
caef2e4f7db0a64ae528b5428a7123b3061481d2
|
[
"MIT"
] | 1
|
2021-03-29T22:16:56.000Z
|
2021-03-29T22:16:56.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../preprocess/")
import logging
from logging import getLogger, DEBUG, INFO, WARNING, ERROR
logging.basicConfig()
logging.root.setLevel(INFO)
import pdb
from utils import Params
from han import HAN_Model
pipeline_params = Params(
"/home/kmanchel/Documents/GitHub/BiasNet/results/hierarchical_attention/params.json"
)
train_mode = True
device = "/device:GPU:3"
model = HAN_Model(pipeline_params, train_mode)
train_params = {
"initiate_model": True,
"device": device,
"learning_rate": 0.001,
"decay": 0.0001,
"dropout": 0.25,
"num_epochs": 3,
"metric": "Accuracy",
"check_every": 50,
"val_batch_num": 1,
}
pdb.set_trace()
tf_model, history = model.train(train_params)
| 26.333333
| 88
| 0.727848
|
794b923604ab860b1209ac9ec64b986db3cf80df
| 4,233
|
py
|
Python
|
dogechia/consensus/block_record.py
|
hagbardcelene/doge-chia
|
72bdf0a7b20a579fe4645f0cb132955e181e1c44
|
[
"Apache-2.0"
] | 27
|
2021-07-06T16:33:50.000Z
|
2022-02-19T21:11:25.000Z
|
dogechia/consensus/block_record.py
|
hagbardcelene/doge-chia
|
72bdf0a7b20a579fe4645f0cb132955e181e1c44
|
[
"Apache-2.0"
] | 15
|
2021-07-07T02:32:59.000Z
|
2021-10-15T21:19:51.000Z
|
dogechia/consensus/block_record.py
|
hagbardcelene/doge-chia
|
72bdf0a7b20a579fe4645f0cb132955e181e1c44
|
[
"Apache-2.0"
] | 12
|
2021-07-08T15:36:20.000Z
|
2022-03-15T08:34:01.000Z
|
from dataclasses import dataclass
from typing import List, Optional
from dogechia.consensus.constants import ConsensusConstants
from dogechia.consensus.pot_iterations import calculate_ip_iters, calculate_sp_iters
from dogechia.types.blockchain_format.classgroup import ClassgroupElement
from dogechia.types.blockchain_format.coin import Coin
from dogechia.types.blockchain_format.sized_bytes import bytes32
from dogechia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from dogechia.util.ints import uint8, uint32, uint64, uint128
from dogechia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BlockRecord(Streamable):
"""
This class is not included or hashed into the blockchain, but it is kept in memory as a more
efficient way to maintain data about the blockchain. This allows us to validate future blocks,
difficulty adjustments, etc, without saving the whole header block in memory.
"""
header_hash: bytes32
prev_hash: bytes32 # Header hash of the previous block
height: uint32
weight: uint128 # Total cumulative difficulty of all ancestor blocks since genesis
total_iters: uint128 # Total number of VDF iterations since genesis, including this block
signage_point_index: uint8
challenge_vdf_output: ClassgroupElement # This is the intermediary VDF output at ip_iters in challenge chain
infused_challenge_vdf_output: Optional[
ClassgroupElement
] # This is the intermediary VDF output at ip_iters in infused cc, iff deficit <= 3
reward_infusion_new_challenge: bytes32 # The reward chain infusion output, input to next VDF
challenge_block_info_hash: bytes32 # Hash of challenge chain data, used to validate end of slots in the future
sub_slot_iters: uint64 # Current network sub_slot_iters parameter
pool_puzzle_hash: bytes32 # Need to keep track of these because Coins are created in a future block
farmer_puzzle_hash: bytes32
required_iters: uint64 # The number of iters required for this proof of space
deficit: uint8 # A deficit of 16 is an overflow block after an infusion. Deficit of 15 is a challenge block
overflow: bool
prev_transaction_block_height: uint32
# Transaction block (present iff is_transaction_block)
timestamp: Optional[uint64]
prev_transaction_block_hash: Optional[bytes32] # Header hash of the previous transaction block
fees: Optional[uint64]
reward_claims_incorporated: Optional[List[Coin]]
# Slot (present iff this is the first SB in sub slot)
finished_challenge_slot_hashes: Optional[List[bytes32]]
finished_infused_challenge_slot_hashes: Optional[List[bytes32]]
finished_reward_slot_hashes: Optional[List[bytes32]]
# Sub-epoch (present iff this is the first SB after sub-epoch)
sub_epoch_summary_included: Optional[SubEpochSummary]
@property
def is_transaction_block(self) -> bool:
return self.timestamp is not None
@property
def first_in_sub_slot(self) -> bool:
return self.finished_challenge_slot_hashes is not None
def is_challenge_block(self, constants: ConsensusConstants) -> bool:
return self.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
def sp_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128:
if self.overflow:
return uint128(self.total_iters - self.ip_iters(constants) - self.sub_slot_iters)
else:
return uint128(self.total_iters - self.ip_iters(constants))
def ip_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128:
return uint128(self.total_iters - self.ip_iters(constants))
def sp_iters(self, constants: ConsensusConstants) -> uint64:
return calculate_sp_iters(constants, self.sub_slot_iters, self.signage_point_index)
def ip_iters(self, constants: ConsensusConstants) -> uint64:
return calculate_ip_iters(
constants,
self.sub_slot_iters,
self.signage_point_index,
self.required_iters,
)
def sp_total_iters(self, constants: ConsensusConstants):
return self.sp_sub_slot_total_iters(constants) + self.sp_iters(constants)
| 47.033333
| 115
| 0.762343
|
794b9276cd797858634f25c0e8802569d1146f83
| 7,564
|
py
|
Python
|
test/test_dataset.py
|
LeoPaoli/sankeyview
|
99f21a7d90888a85bb5aac2f57e9c72bd9de4a8a
|
[
"MIT"
] | 1
|
2021-08-05T09:52:20.000Z
|
2021-08-05T09:52:20.000Z
|
test/test_dataset.py
|
LeoPaoli/sankeyview
|
99f21a7d90888a85bb5aac2f57e9c72bd9de4a8a
|
[
"MIT"
] | null | null | null |
test/test_dataset.py
|
LeoPaoli/sankeyview
|
99f21a7d90888a85bb5aac2f57e9c72bd9de4a8a
|
[
"MIT"
] | null | null | null |
import pytest
import pandas as pd
from sankeyview.dataset import Dataset, eval_selection
from sankeyview.sankey_definition import ProcessGroup, Bundle, Elsewhere
def _dataset():
dim_process = pd.DataFrame.from_records(
[
('a1', 'a'),
('a2', 'a'),
('b', 'b'),
('c', 'c'),
],
columns=['id', 'function']).set_index('id')
dim_material = pd.DataFrame.from_records([
('m1', 'type1'),
('m2', 'type2'),
], columns=['id', 'type']).set_index('id')
dim_time = pd.DataFrame.from_records([
('t1', 'August'),
('t2', 'March'),
], columns=['id', 'month']).set_index('id')
flows = pd.DataFrame.from_records(
[
('a1', 'b', 'm1', 't1', 3),
('a2', 'b', 'm2', 't1', 4),
('b', 'c', 'm1', 't1', 3),
('b', 'c', 'm2', 't1', 4),
],
columns=['source', 'target', 'material', 'time', 'value'])
return Dataset(flows, dim_process, dim_material, dim_time)
def test_dataset_joins_tables():
d = _dataset()
assert len(d._table.index) == 4
assert set(d._table.columns) == {'source', 'target', 'material', 'time', 'value',
'source.function', 'target.function',
'material.type', 'time.month'}
def test_dataset_checks_dim_tables_have_unique_index():
dim_time = pd.DataFrame.from_records([
('same_id', 'August'),
('same_id', 'March'),
], columns=['id', 'month']).set_index('id')
flows = pd.DataFrame.from_records(
[
('a1', 'b', 'same_id', 3),
],
columns=['source', 'target', 'time', 'value'])
with pytest.raises(ValueError):
Dataset(flows, dim_time=dim_time)
def test_selection_list():
"""ProcessGroup selection can be a list -> ids"""
d = _dataset()
assert list(eval_selection(d._flows, 'source', ['a1', 'a2'])) \
== [True, True, False, False]
assert list(eval_selection(d._flows, 'target', ['c'])) \
== [False, False, True, True]
def test_selection_string():
"""ProcessGroup selection can be a string -> pandas eval"""
d = _dataset()
assert list(eval_selection(d._table, 'source', 'function == "a"')) \
== [True, True, False, False]
q = 'function == "a" and id in ["a1"]'
assert list(eval_selection(d._table, 'source', q)) \
== [True, False, False, False]
def test_dataset_only_includes_unused_flows_in_elsewhere_bundles():
# Bundle 0 should include flow 0, bundle 1 should include flow 1
nodes = {
'a': ProcessGroup(selection=['a']),
'x': ProcessGroup(selection=['x']),
}
bundles = {
0: Bundle('a', 'x'),
1: Bundle(Elsewhere, 'x'),
}
# Dataset
flows = pd.DataFrame.from_records(
[
('a', 'x', 'm', 1),
('b', 'x', 'm', 1),
],
columns=('source', 'target', 'material', 'value'))
dataset = Dataset(flows)
bundle_flows, _ = dataset.apply_view(nodes, bundles)
def get_source_target(b):
return [(row['source'], row['target'])
for i, row in bundle_flows[b].iterrows()]
assert get_source_target(0) == [('a', 'x')]
assert get_source_target(1) == [('b', 'x')]
# Check it works with duplicated flow index values (old bug)
flows.index = [0, 0]
dataset = Dataset(flows)
bundle_flows, _ = dataset.apply_view(nodes, bundles)
assert get_source_target(0) == [('a', 'x')]
assert get_source_target(1) == [('b', 'x')]
def test_unused_flows():
"""Unused flows are between *used* nodes
"""
# view definition:
# Elsewhere --> [a] --> Elsewhere
# Elsewhere --> [b] --> Elsewhere
#
# dataset:
# other --> a --> other
# other --> b --> other
# a --> b --> c
#
# The a --> b flow in the dataset is "unused"
# The b --> c flow is not unused since c isn't visible
#
nodes = {
'other': ProcessGroup(selection=['other']),
'a': ProcessGroup(selection=['a']),
'b': ProcessGroup(selection=['b']),
}
bundles = {
0: Bundle(Elsewhere, 'a'),
1: Bundle(Elsewhere, 'b'),
2: Bundle('a', Elsewhere),
3: Bundle('b', Elsewhere),
}
# Dataset
flows = pd.DataFrame.from_records(
[
('other', 'a', 'm', 1),
('other', 'b', 'm', 1),
('a', 'other', 'm', 1),
('b', 'other', 'm', 1),
('a', 'b', 'm', 1),
('b', 'c', 'm', 1),
],
columns=('source', 'target', 'material', 'value'))
dim_process = pd.DataFrame(
{'id': ['a', 'b', 'c', 'other']}).set_index('id')
dataset = Dataset(flows, dim_process)
bundle_flows, unused = dataset.apply_view(nodes, bundles)
def get_source_target(b):
return [(row['source'], row['target'])
for i, row in bundle_flows[b].iterrows()]
assert get_source_target(0) == [('other', 'a')]
assert get_source_target(1) == [('other', 'b'), ('a', 'b')]
assert get_source_target(2) == [('a', 'other'), ('a', 'b')]
assert get_source_target(3) == [('b', 'other'), ('b', 'c')]
assert len(unused) == 1
assert unused.iloc[0].equals(flows.iloc[4])
def test_internal_flows():
nodes = {
'a': ProcessGroup(selection=['a']),
'bcd': ProcessGroup(selection=['b', 'c', 'd']),
'e': ProcessGroup(selection=['e']),
}
bundles = {
0: Bundle('a', 'bcd'),
1: Bundle('bcd', 'e'),
2: Bundle('bcd', 'bcd', flow_selection='source == "c"'),
}
ordering = [['a'], ['bcd'], ['e']]
# Dataset
flows = pd.DataFrame.from_records(
[
('a', 'b', 'm', 4),
('b', 'c', 'm', 3),
('b', 'd', 'm', 1),
('c', 'b', 'm', 2),
('c', 'e', 'm', 1),
],
columns=('source', 'target', 'material', 'value'))
dataset = Dataset(flows)
bundle_flows, unused = dataset.apply_view(nodes, bundles)
def get_source_target(b):
return [(row['source'], row['target'], row['value'])
for i, row in bundle_flows[b].iterrows()]
assert get_source_target(0) == [('a', 'b', 4)]
assert get_source_target(1) == [('c', 'e', 1)]
assert get_source_target(2) == [('c', 'b', 2)]
assert len(unused) == 0
def test_internal_flows_elsewhere():
"""Internal flows should not be included in to/from Elsewhere bundles.
"""
# view definition:
# Elsewhere --> [a,b] --> Elsewhere
#
# dataset:
# other --> a --> b --> other
#
nodes = {
'other': ProcessGroup(selection=['other']),
'ab': ProcessGroup(selection=['a', 'b']),
}
bundles = {
0: Bundle(Elsewhere, 'ab'),
1: Bundle('ab', Elsewhere),
}
# Dataset
flows = pd.DataFrame.from_records(
[
('other', 'a', 'm', 1),
('a', 'b', 'm', 1),
('b', 'other', 'm', 1),
],
columns=('source', 'target', 'material', 'value'))
dim_process = pd.DataFrame({'id': ['a', 'b', 'other']}).set_index('id')
dataset = Dataset(flows, dim_process)
bundle_flows, unused = dataset.apply_view(nodes, bundles)
def get_source_target(b):
return [(row['source'], row['target'])
for i, row in bundle_flows[b].iterrows()]
assert get_source_target(0) == [('other', 'a')]
assert get_source_target(1) == [('b', 'other')]
assert len(unused) == 0
| 29.092308
| 85
| 0.515071
|
794b928e2339f08fce1207ccb4920eabb1314a7e
| 2,693
|
py
|
Python
|
tests/mytesttools.py
|
iwschris/ezodf2
|
061c4aa3f26e9157ad46155d8ce92db7187b0574
|
[
"MIT"
] | 4
|
2015-03-15T22:32:35.000Z
|
2019-12-23T12:13:13.000Z
|
tests/mytesttools.py
|
iwschris/ezodf2
|
061c4aa3f26e9157ad46155d8ce92db7187b0574
|
[
"MIT"
] | 3
|
2017-08-17T09:36:42.000Z
|
2021-12-13T19:43:28.000Z
|
tests/mytesttools.py
|
iwschris/ezodf2
|
061c4aa3f26e9157ad46155d8ce92db7187b0574
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
# Purpose: testing tools
# Created: 30.12.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT
from __future__ import unicode_literals, print_function, division
__author__ = "mozman <mozman@gmx.at>"
import os
import random
from lxml import etree
def in_XML(source, target):
for element in source.strip().split():
if element not in target:
return False
return True
def getdatafile(filename):
return os.path.join(os.path.dirname(__file__), "data", filename)
SPECFILE = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'specs', 'OpenDocument-v1.1.odt'))
SPECFILE_EXISTS = os.path.exists(SPECFILE)
def get_n_random_tags(count, tags):
return (random.choice(tags) for _ in range(count))
def create_node(tags):
nodes = (etree.Element(tag, num=str(num)) for num, tag in enumerate(tags))
root = etree.Element('root')
root.extend(nodes)
return root
class SimpleStructureChecker:
def __init__(self, prelude_tags, epilogue_tags):
self.prelude_tags = prelude_tags
self.epilogue_tags = epilogue_tags
def has_valid_structure(self, xmlnode):
def remove_prelude(nodes):
for tag in self.prelude_tags:
remove_from_head(tag, nodes)
def remove_from_head(tag, nodes):
while nodes[0].tag == tag:
nodes.pop(0)
def remove_epilogue(nodes):
for tag in reversed(self.epilogue_tags):
remove_from_tail(tag, nodes)
def remove_from_tail(tag, nodes):
while nodes[-1].tag == tag:
nodes.pop()
def has_tags(tags, nodes):
def has_tag(tag):
for node in nodes:
if node.tag == tag:
return True
return False
for tag in tags:
if has_tag(tag):
return True
return False
def is_in_creation_order(nodes):
sorted_nodes = sorted(nodes, key=lambda n: int(n.get('num')))
for node1, node2 in zip(nodes, sorted_nodes):
if node1.tag != node2.tag or \
node1.get('num') != node2.get('num'):
return False
return True
nodes = xmlnode.getchildren()
remove_prelude(nodes)
if has_tags(self.prelude_tags, nodes):
return False
remove_epilogue(nodes)
if has_tags(self.epilogue_tags, nodes):
return False
return is_in_creation_order(nodes)
| 30.258427
| 118
| 0.583364
|
794b92e0626174e5012f3c35d354037da1636d30
| 732
|
py
|
Python
|
addon-sdk-1.17/python-lib/cuddlefish/templates.py
|
hankduan/firefoxExtension
|
a5fd86ef024a5ed21e039eb2f4b50fb6d0cf3567
|
[
"MIT"
] | 102
|
2015-01-09T22:12:00.000Z
|
2021-04-21T01:18:51.000Z
|
addon-sdk-1.17/python-lib/cuddlefish/templates.py
|
hankduan/firefoxExtension
|
a5fd86ef024a5ed21e039eb2f4b50fb6d0cf3567
|
[
"MIT"
] | 17
|
2015-01-24T22:30:47.000Z
|
2020-11-19T01:13:32.000Z
|
addon-sdk-1.17/python-lib/cuddlefish/templates.py
|
hankduan/firefoxExtension
|
a5fd86ef024a5ed21e039eb2f4b50fb6d0cf3567
|
[
"MIT"
] | 33
|
2015-01-15T16:11:15.000Z
|
2021-06-11T12:15:29.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#Template used by test-main.js
TEST_MAIN_JS = '''\
var main = require("./main");
exports["test main"] = function(assert) {
assert.pass("Unit test running!");
};
exports["test main async"] = function(assert, done) {
assert.pass("async Unit test running!");
done();
};
require("sdk/test").run(exports);
'''
#Template used by package.json
PACKAGE_JSON = '''\
{
"name": "%(name)s",
"title": "%(title)s",
"id": "%(id)s",
"description": "a basic add-on",
"author": "",
"license": "MPL 2.0",
"version": "0.1"
}
'''
| 22.181818
| 69
| 0.628415
|
794b92f6fa3a20681cce2b7f994700d56cde2064
| 284
|
py
|
Python
|
basic/if_or.py
|
shilpasayura/python
|
8f3b2432f972c9aeb4b04e2141ea4abf2437762c
|
[
"MIT"
] | 1
|
2021-10-07T15:15:01.000Z
|
2021-10-07T15:15:01.000Z
|
basic/if_or.py
|
shilpasayura/python
|
8f3b2432f972c9aeb4b04e2141ea4abf2437762c
|
[
"MIT"
] | null | null | null |
basic/if_or.py
|
shilpasayura/python
|
8f3b2432f972c9aeb4b04e2141ea4abf2437762c
|
[
"MIT"
] | null | null | null |
name1 = 'Fito'
name2 = 'Ben'
name3 = 'Ruby'
name4 = 'Nish'
name5 = 'Nito'
name = input("Enter your name: ")
if name == name1 or name == name2 or name == name3 or name == name4 or name == name5:
print("I know you!")
else:
print("Sorry, ", name, "I don't know who you are :(")
| 23.666667
| 85
| 0.598592
|
794b933cf28f5b9a094c2eac4cde57b3238ab70c
| 2,514
|
py
|
Python
|
qiskit/transpiler/passes/layout/apply_layout.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 1
|
2021-07-06T09:07:47.000Z
|
2021-07-06T09:07:47.000Z
|
qiskit/transpiler/passes/layout/apply_layout.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 1
|
2019-10-03T12:22:41.000Z
|
2019-10-03T12:22:41.000Z
|
qiskit/transpiler/passes/layout/apply_layout.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Transform a circuit with virtual qubits into a circuit with physical qubits."""
from qiskit.circuit import QuantumRegister
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
class ApplyLayout(TransformationPass):
"""Transform a circuit with virtual qubits into a circuit with physical qubits.
Transforms a DAGCircuit with virtual qubits into a DAGCircuit with physical qubits
by applying the Layout given in `property_set`.
Requires either of passes to set/select Layout, e.g. `SetLayout`, `TrivialLayout`.
Assumes the Layout has full physical qubits.
"""
def run(self, dag):
"""Run the ApplyLayout pass on `dag`.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: A mapped DAG (with physical qubits).
Raises:
TranspilerError: if no layout is found in `property_set` or no full physical qubits.
"""
layout = self.property_set["layout"]
if not layout:
raise TranspilerError(
"No 'layout' is found in property_set. Please run a Layout pass in advance."
)
if len(layout) != (1 + max(layout.get_physical_bits())):
raise TranspilerError("The 'layout' must be full (with ancilla).")
for qreg in dag.qregs.values():
self.property_set["layout"].add_register(qreg)
q = QuantumRegister(len(layout), "q")
new_dag = DAGCircuit()
new_dag.add_qreg(q)
new_dag.metadata = dag.metadata
new_dag.add_clbits(dag.clbits)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for node in dag.topological_op_nodes():
if node.type == "op":
qargs = [q[layout[qarg]] for qarg in node.qargs]
new_dag.apply_operation_back(node.op, qargs, node.cargs)
new_dag._global_phase = dag._global_phase
return new_dag
| 36.970588
| 96
| 0.67144
|
794b9341f6af5f1fb87252e51fe40f4e0fc788ca
| 1,457
|
py
|
Python
|
src/algo/handle_simple.py
|
wengzilla/staketaxcsv
|
c52210106ec8194973f6ff178a307c77699e094e
|
[
"MIT"
] | 140
|
2021-12-11T23:37:46.000Z
|
2022-03-29T23:04:36.000Z
|
src/algo/handle_simple.py
|
wengzilla/staketaxcsv
|
c52210106ec8194973f6ff178a307c77699e094e
|
[
"MIT"
] | 80
|
2021-12-17T15:13:47.000Z
|
2022-03-31T13:33:53.000Z
|
src/algo/handle_simple.py
|
wengzilla/staketaxcsv
|
c52210106ec8194973f6ff178a307c77699e094e
|
[
"MIT"
] | 52
|
2021-12-12T00:37:17.000Z
|
2022-03-29T23:25:09.000Z
|
from algo import constants as co
from algo.export_tx import export_reward_tx
from algo.util_algo import get_transfer_asset, get_transfer_receiver
from common.make_tx import make_unknown_tx, make_unknown_tx_with_transfer
def handle_unknown(exporter, txinfo):
row = make_unknown_tx(txinfo)
exporter.ingest_row(row)
def handle_unknown_transactions(transactions, wallet_address, exporter, txinfo):
for transaction in transactions:
txtype = transaction["tx-type"]
if txtype == co.TRANSACTION_KEY_PAYMENT or txtype == co.TRANSACTION_TYPE_ASSET_TRANSFER:
txsender = transaction["sender"]
txreceiver = get_transfer_receiver(transaction)
asset = get_transfer_asset(transaction)
if txsender == wallet_address:
row = make_unknown_tx_with_transfer(txinfo, asset.amount, asset.ticker, "", "")
exporter.ingest_row(row)
elif txreceiver == wallet_address:
row = make_unknown_tx_with_transfer(txinfo, "", "", asset.amount, asset.ticker)
exporter.ingest_row(row)
elif txtype == co.TRANSACTION_TYPE_APP_CALL:
inner_transactions = transaction.get("inner-txns", [])
handle_unknown_transactions(inner_transactions, wallet_address, exporter, txinfo)
def handle_participation_rewards(reward, exporter, txinfo):
export_reward_tx(exporter, txinfo, reward, comment="Participation Rewards")
| 45.53125
| 96
| 0.717914
|
794b9410520b4758dbb0bc49b561db39fbc7973e
| 23,407
|
py
|
Python
|
notebooks/__code/ui_water_intake_profile.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/ui_water_intake_profile.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/ui_water_intake_profile.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/j35/git/python_notebooks/notebooks/ui/ui_water_intake_profile.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1420, 1035)
MainWindow.setMinimumSize(QtCore.QSize(0, 500))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setMidLineWidth(5)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setOpaqueResize(True)
self.splitter.setHandleWidth(22)
self.splitter.setObjectName("splitter")
self.widget = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(0, 0))
self.widget.setObjectName("widget")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.tableWidget = QtWidgets.QTableWidget(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
self.tableWidget.setSizePolicy(sizePolicy)
self.tableWidget.setMinimumSize(QtCore.QSize(0, 150))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.verticalLayout_4.addWidget(self.tableWidget)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.export_table_button = QtWidgets.QPushButton(self.layoutWidget)
self.export_table_button.setObjectName("export_table_button")
self.horizontalLayout_3.addWidget(self.export_table_button)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.verticalLayout.addLayout(self.verticalLayout_4)
self.horizontalLayout_2.addWidget(self.splitter)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setMinimumSize(QtCore.QSize(150, 120))
self.groupBox_2.setMaximumSize(QtCore.QSize(150, 16777215))
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.add_radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.add_radioButton.setChecked(True)
self.add_radioButton.setObjectName("add_radioButton")
self.verticalLayout_2.addWidget(self.add_radioButton)
self.mean_radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.mean_radioButton.setObjectName("mean_radioButton")
self.verticalLayout_2.addWidget(self.mean_radioButton)
self.median_radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.median_radioButton.setObjectName("median_radioButton")
self.verticalLayout_2.addWidget(self.median_radioButton)
self.verticalLayout_3.addWidget(self.groupBox_2)
self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_4.setMinimumSize(QtCore.QSize(150, 0))
self.groupBox_4.setMaximumSize(QtCore.QSize(150, 16777215))
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.groupBox_4)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.x_axis_integration_radioButton = QtWidgets.QRadioButton(self.groupBox_4)
self.x_axis_integration_radioButton.setEnabled(True)
self.x_axis_integration_radioButton.setChecked(True)
self.x_axis_integration_radioButton.setObjectName("x_axis_integration_radioButton")
self.verticalLayout_5.addWidget(self.x_axis_integration_radioButton)
self.y_axis_integration_radioButton = QtWidgets.QRadioButton(self.groupBox_4)
self.y_axis_integration_radioButton.setEnabled(True)
self.y_axis_integration_radioButton.setObjectName("y_axis_integration_radioButton")
self.verticalLayout_5.addWidget(self.y_axis_integration_radioButton)
self.verticalLayout_3.addWidget(self.groupBox_4)
self.groupBox_6 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_6.setMinimumSize(QtCore.QSize(150, 0))
self.groupBox_6.setMaximumSize(QtCore.QSize(150, 16777215))
self.groupBox_6.setObjectName("groupBox_6")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.groupBox_6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label = QtWidgets.QLabel(self.groupBox_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(50, 0))
self.label.setMaximumSize(QtCore.QSize(50, 16777215))
self.label.setObjectName("label")
self.horizontalLayout_4.addWidget(self.label)
self.rebin_spinBox = QtWidgets.QSpinBox(self.groupBox_6)
self.rebin_spinBox.setMinimum(1)
self.rebin_spinBox.setMaximum(10)
self.rebin_spinBox.setObjectName("rebin_spinBox")
self.horizontalLayout_4.addWidget(self.rebin_spinBox)
self.verticalLayout_3.addWidget(self.groupBox_6)
self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_5.setMinimumSize(QtCore.QSize(150, 0))
self.groupBox_5.setMaximumSize(QtCore.QSize(150, 16777215))
self.groupBox_5.setObjectName("groupBox_5")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.groupBox_5)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.sliding_average_checkBox = QtWidgets.QRadioButton(self.groupBox_5)
self.sliding_average_checkBox.setChecked(True)
self.sliding_average_checkBox.setObjectName("sliding_average_checkBox")
self.verticalLayout_6.addWidget(self.sliding_average_checkBox)
self.error_function_checkBox = QtWidgets.QRadioButton(self.groupBox_5)
self.error_function_checkBox.setObjectName("error_function_checkBox")
self.verticalLayout_6.addWidget(self.error_function_checkBox)
self.change_point_checkBox = QtWidgets.QRadioButton(self.groupBox_5)
self.change_point_checkBox.setObjectName("change_point_checkBox")
self.verticalLayout_6.addWidget(self.change_point_checkBox)
self.verticalLayout_3.addWidget(self.groupBox_5)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_7.addLayout(self.horizontalLayout_2)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setObjectName("groupBox_3")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.sort_files_by_time_radioButton = QtWidgets.QRadioButton(self.groupBox_3)
self.sort_files_by_time_radioButton.setChecked(True)
self.sort_files_by_time_radioButton.setObjectName("sort_files_by_time_radioButton")
self.horizontalLayout_7.addWidget(self.sort_files_by_time_radioButton)
self.sort_files_by_name_radioButton = QtWidgets.QRadioButton(self.groupBox_3)
self.sort_files_by_name_radioButton.setObjectName("sort_files_by_name_radioButton")
self.horizontalLayout_7.addWidget(self.sort_files_by_name_radioButton)
self.time_between_runs_label = QtWidgets.QLabel(self.groupBox_3)
self.time_between_runs_label.setEnabled(False)
self.time_between_runs_label.setMaximumSize(QtCore.QSize(130, 16777215))
self.time_between_runs_label.setObjectName("time_between_runs_label")
self.horizontalLayout_7.addWidget(self.time_between_runs_label)
self.time_between_runs_spinBox = QtWidgets.QDoubleSpinBox(self.groupBox_3)
self.time_between_runs_spinBox.setEnabled(False)
self.time_between_runs_spinBox.setMaximumSize(QtCore.QSize(80, 16777215))
self.time_between_runs_spinBox.setDecimals(2)
self.time_between_runs_spinBox.setMinimum(0.01)
self.time_between_runs_spinBox.setMaximum(500.0)
self.time_between_runs_spinBox.setSingleStep(0.1)
self.time_between_runs_spinBox.setProperty("value", 1.0)
self.time_between_runs_spinBox.setObjectName("time_between_runs_spinBox")
self.horizontalLayout_7.addWidget(self.time_between_runs_spinBox)
self.time_between_runs_units_label = QtWidgets.QLabel(self.groupBox_3)
self.time_between_runs_units_label.setEnabled(False)
self.time_between_runs_units_label.setMinimumSize(QtCore.QSize(20, 0))
self.time_between_runs_units_label.setMaximumSize(QtCore.QSize(20, 16777215))
self.time_between_runs_units_label.setObjectName("time_between_runs_units_label")
self.horizontalLayout_7.addWidget(self.time_between_runs_units_label)
self.horizontalLayout_6.addWidget(self.groupBox_3)
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setObjectName("groupBox")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.groupBox)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.pixel_radioButton = QtWidgets.QRadioButton(self.groupBox)
self.pixel_radioButton.setChecked(True)
self.pixel_radioButton.setObjectName("pixel_radioButton")
self.horizontalLayout_5.addWidget(self.pixel_radioButton)
self.distance_radioButton = QtWidgets.QRadioButton(self.groupBox)
self.distance_radioButton.setObjectName("distance_radioButton")
self.horizontalLayout_5.addWidget(self.distance_radioButton)
self.water_intake_distance_label = QtWidgets.QLabel(self.groupBox)
self.water_intake_distance_label.setEnabled(False)
self.water_intake_distance_label.setObjectName("water_intake_distance_label")
self.horizontalLayout_5.addWidget(self.water_intake_distance_label)
self.pixel_size_spinBox = QtWidgets.QDoubleSpinBox(self.groupBox)
self.pixel_size_spinBox.setEnabled(False)
self.pixel_size_spinBox.setDecimals(3)
self.pixel_size_spinBox.setMinimum(0.001)
self.pixel_size_spinBox.setMaximum(10.0)
self.pixel_size_spinBox.setSingleStep(0.001)
self.pixel_size_spinBox.setProperty("value", 0.05)
self.pixel_size_spinBox.setObjectName("pixel_size_spinBox")
self.horizontalLayout_5.addWidget(self.pixel_size_spinBox)
self.pixel_size_units = QtWidgets.QLabel(self.groupBox)
self.pixel_size_units.setEnabled(False)
self.pixel_size_units.setObjectName("pixel_size_units")
self.horizontalLayout_5.addWidget(self.pixel_size_units)
self.horizontalLayout_6.addWidget(self.groupBox)
self.ignore_first_image_checkbox = QtWidgets.QCheckBox(self.centralwidget)
self.ignore_first_image_checkbox.setChecked(True)
self.ignore_first_image_checkbox.setObjectName("ignore_first_image_checkbox")
self.horizontalLayout_6.addWidget(self.ignore_first_image_checkbox)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem2)
self.verticalLayout_7.addLayout(self.horizontalLayout_6)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.help_button = QtWidgets.QPushButton(self.centralwidget)
self.help_button.setMinimumSize(QtCore.QSize(100, 30))
self.help_button.setMaximumSize(QtCore.QSize(100, 30))
self.help_button.setObjectName("help_button")
self.horizontalLayout.addWidget(self.help_button)
spacerItem3 = QtWidgets.QSpacerItem(408, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.ok_button = QtWidgets.QPushButton(self.centralwidget)
self.ok_button.setMinimumSize(QtCore.QSize(100, 30))
self.ok_button.setMaximumSize(QtCore.QSize(100, 30))
self.ok_button.setObjectName("ok_button")
self.horizontalLayout.addWidget(self.ok_button)
self.verticalLayout_7.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1420, 22))
self.menubar.setObjectName("menubar")
self.menuFile_2 = QtWidgets.QMenu(self.menubar)
self.menuFile_2.setObjectName("menuFile_2")
self.menuImport_2 = QtWidgets.QMenu(self.menuFile_2)
self.menuImport_2.setObjectName("menuImport_2")
self.menuExport = QtWidgets.QMenu(self.menuFile_2)
self.menuExport.setObjectName("menuExport")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionExport_Profile = QtWidgets.QAction(MainWindow)
self.actionExport_Profile.setObjectName("actionExport_Profile")
self.actionWater_Intake = QtWidgets.QAction(MainWindow)
self.actionWater_Intake.setObjectName("actionWater_Intake")
self.actionImportedFilesMetadata = QtWidgets.QAction(MainWindow)
self.actionImportedFilesMetadata.setObjectName("actionImportedFilesMetadata")
self.actionBy_Time_Stamp = QtWidgets.QAction(MainWindow)
self.actionBy_Time_Stamp.setObjectName("actionBy_Time_Stamp")
self.actionBy_File_Name = QtWidgets.QAction(MainWindow)
self.actionBy_File_Name.setObjectName("actionBy_File_Name")
self.actionDsc_files = QtWidgets.QAction(MainWindow)
self.actionDsc_files.setObjectName("actionDsc_files")
self.actionDsc = QtWidgets.QAction(MainWindow)
self.actionDsc.setObjectName("actionDsc")
self.actionWater_Intake_2 = QtWidgets.QAction(MainWindow)
self.actionWater_Intake_2.setObjectName("actionWater_Intake_2")
self.actionProfiles = QtWidgets.QAction(MainWindow)
self.actionProfiles.setObjectName("actionProfiles")
self.menuImport_2.addAction(self.actionDsc)
self.menuExport.addAction(self.actionProfiles)
self.menuExport.addAction(self.actionWater_Intake_2)
self.menuFile_2.addAction(self.menuImport_2.menuAction())
self.menuFile_2.addAction(self.menuExport.menuAction())
self.menubar.addAction(self.menuFile_2.menuAction())
self.retranslateUi(MainWindow)
self.ok_button.clicked.connect(MainWindow.ok_button_clicked)
self.help_button.clicked.connect(MainWindow.help_button_clicked)
self.actionExport_Profile.triggered.connect(MainWindow.export_profile_clicked)
self.actionWater_Intake.triggered.connect(MainWindow.export_water_intake_clicked)
self.add_radioButton.clicked.connect(MainWindow.profile_algo_changed)
self.mean_radioButton.clicked.connect(MainWindow.profile_algo_changed)
self.median_radioButton.clicked.connect(MainWindow.profile_algo_changed)
self.sort_files_by_time_radioButton.clicked.connect(MainWindow.sorting_files_checkbox_clicked)
self.sort_files_by_name_radioButton.clicked.connect(MainWindow.sorting_files_checkbox_clicked)
self.time_between_runs_spinBox.editingFinished.connect(MainWindow.time_between_runs_spinBox_changed)
self.pixel_radioButton.clicked.connect(MainWindow._water_intake_yaxis_checkbox_changed)
self.distance_radioButton.clicked.connect(MainWindow._water_intake_yaxis_checkbox_changed)
self.pixel_size_spinBox.editingFinished.connect(MainWindow._pixel_size_spinBox_changed)
self.actionDsc_files.triggered.connect(MainWindow.import_dsc_clicked)
self.export_table_button.pressed.connect(MainWindow.export_table_button_clicked)
self.actionProfiles.triggered.connect(MainWindow.export_profile_clicked)
self.actionWater_Intake_2.triggered.connect(MainWindow.export_water_intake_clicked)
self.actionDsc.triggered.connect(MainWindow.import_dsc_clicked)
self.x_axis_integration_radioButton.clicked.connect(MainWindow.integration_direction_changed)
self.y_axis_integration_radioButton.clicked.connect(MainWindow.integration_direction_changed)
self.ignore_first_image_checkbox.clicked.connect(MainWindow.ignore_first_image_checkbox_clicked)
self.sliding_average_checkBox.clicked.connect(MainWindow.algorithm_changed)
self.error_function_checkBox.clicked.connect(MainWindow.algorithm_changed)
self.rebin_spinBox.valueChanged['int'].connect(MainWindow.rebin_slider_changed)
self.change_point_checkBox.clicked.connect(MainWindow.algorithm_changed)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "File Name"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Time Stamp (unix format)"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Time Stamp (user format)"))
self.export_table_button.setText(_translate("MainWindow", "Export Table ..."))
self.groupBox_2.setTitle(_translate("MainWindow", "Profile Algorithms"))
self.add_radioButton.setText(_translate("MainWindow", "Add"))
self.mean_radioButton.setText(_translate("MainWindow", "Mean"))
self.median_radioButton.setText(_translate("MainWindow", "Median"))
self.groupBox_4.setTitle(_translate("MainWindow", "Integration Direction"))
self.x_axis_integration_radioButton.setText(_translate("MainWindow", "along x-axis"))
self.y_axis_integration_radioButton.setText(_translate("MainWindow", "along y-axis"))
self.groupBox_6.setTitle(_translate("MainWindow", "Rebin"))
self.label.setText(_translate("MainWindow", "Pixels"))
self.groupBox_5.setTitle(_translate("MainWindow", "Algorithm"))
self.sliding_average_checkBox.setText(_translate("MainWindow", "Sliding Average"))
self.error_function_checkBox.setText(_translate("MainWindow", "Error Function"))
self.change_point_checkBox.setText(_translate("MainWindow", "Change Point"))
self.groupBox_3.setTitle(_translate("MainWindow", "Sorting Files"))
self.sort_files_by_time_radioButton.setText(_translate("MainWindow", "by Time Stamp"))
self.sort_files_by_name_radioButton.setText(_translate("MainWindow", "by Name"))
self.time_between_runs_label.setText(_translate("MainWindow", "-> Time Between Runs"))
self.time_between_runs_units_label.setText(_translate("MainWindow", "s"))
self.groupBox.setTitle(_translate("MainWindow", "Water Intake Y Axis"))
self.pixel_radioButton.setText(_translate("MainWindow", "Pixel"))
self.distance_radioButton.setText(_translate("MainWindow", "Distance"))
self.water_intake_distance_label.setText(_translate("MainWindow", "-> 1 pixel = "))
self.pixel_size_units.setText(_translate("MainWindow", "mm"))
self.ignore_first_image_checkbox.setText(_translate("MainWindow", "Ignore first image"))
self.help_button.setText(_translate("MainWindow", "HELP"))
self.ok_button.setText(_translate("MainWindow", "OK"))
self.menuFile_2.setTitle(_translate("MainWindow", "File"))
self.menuImport_2.setTitle(_translate("MainWindow", "Import"))
self.menuExport.setTitle(_translate("MainWindow", "Export"))
self.actionExport_Profile.setText(_translate("MainWindow", "Profiles ..."))
self.actionWater_Intake.setText(_translate("MainWindow", "Water Intake ..."))
self.actionImportedFilesMetadata.setText(_translate("MainWindow", "Imported Files and Metadata ..."))
self.actionBy_Time_Stamp.setText(_translate("MainWindow", "by Time Stamp"))
self.actionBy_File_Name.setText(_translate("MainWindow", "by File Name"))
self.actionDsc_files.setText(_translate("MainWindow", "dsc files ..."))
self.actionDsc.setText(_translate("MainWindow", "dsc ..."))
self.actionWater_Intake_2.setText(_translate("MainWindow", "Water Intake ..."))
self.actionProfiles.setText(_translate("MainWindow", "Profiles ..."))
| 65.75
| 126
| 0.758149
|
794b9422c4dc01b77440bc89e2eb00144cf7e62a
| 140,114
|
py
|
Python
|
lib/sqlalchemy/orm/relationships.py
|
bowlofeggs/sqlalchemy
|
4042792348481e8c00515f8df6af503ca4d0ee73
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/relationships.py
|
bowlofeggs/sqlalchemy
|
4042792348481e8c00515f8df6af503ca4d0ee73
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/relationships.py
|
bowlofeggs/sqlalchemy
|
4042792348481e8c00515f8df6af503ca4d0ee73
|
[
"MIT"
] | 1
|
2021-11-23T17:59:42.000Z
|
2021-11-23T17:59:42.000Z
|
# orm/relationships.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`_orm.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`_orm.relationship`.
"""
from __future__ import absolute_import
import collections
import re
import weakref
from . import attributes
from .base import state_str
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
if util.TYPE_CHECKING:
from .util import AliasedInsp
from typing import Union
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(
coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True}
)
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(
coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True}
)
@log.class_logger
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = "relationship"
inherit_cache = True
_persistence_only = dict(
passive_deletes=False,
passive_updates=True,
enable_typechecks=True,
active_history=False,
cascade_backrefs=True,
)
_dependency_processor = None
def __init__(
self,
argument,
secondary=None,
primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
overlaps=None,
post_update=False,
cascade=False,
viewonly=False,
lazy="select",
collection_class=None,
passive_deletes=_persistence_only["passive_deletes"],
passive_updates=_persistence_only["passive_updates"],
remote_side=None,
enable_typechecks=_persistence_only["enable_typechecks"],
join_depth=None,
comparator_factory=None,
single_parent=False,
innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=_persistence_only["active_history"],
cascade_backrefs=_persistence_only["cascade_backrefs"],
load_on_pending=False,
bake_queries=True,
_local_remote_pairs=None,
query_class=None,
info=None,
omit_join=None,
sync_backref=None,
):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`_orm.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`_orm.relationship`
optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`_orm.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`_orm.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need for related classes to be imported into the local module space
before the dependent classes have been declared. It is still required
that the modules in which these related classes appear are imported
anywhere in the application at some point before the related mappings
are actually used, else a lookup error will be raised when the
:func:`_orm.relationship`
attempts to resolve the string reference to the
related class. An example of a string- resolved class is as
follows::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`_orm.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
A mapped class, or actual :class:`_orm.Mapper` instance,
representing
the target of the relationship.
:paramref:`_orm.relationship.argument`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a string name when using Declarative.
.. warning:: Prior to SQLAlchemy 1.3.16, this value is interpreted
using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. versionchanged 1.3.16::
The string evaluation of the main "argument" no longer accepts an
open ended Python expression, instead only accepting a string
class name or dotted package-qualified name.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
For a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`_schema.Table`.
In less common circumstances, the argument may also be specified
as an :class:`_expression.Alias` construct, or even a
:class:`_expression.Join` construct.
:paramref:`_orm.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`_schema.Table`
that is
present in the :class:`_schema.MetaData`
collection associated with the
parent-mapped :class:`_schema.Table`.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
The :paramref:`_orm.relationship.secondary` keyword argument is
typically applied in the case where the intermediary
:class:`_schema.Table`
is not otherwise expressed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`_orm.relationship.viewonly` flag so that this
:func:`_orm.relationship`
is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`_orm.relationship.secondary`
when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`_orm.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`_orm.relationship.secondary`
works
more effectively when referring to a :class:`_expression.Join`
instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
Indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`_orm.relationship`
configuration when using :paramref:`_orm.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`_orm.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`_orm.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.backref` - alternative form
of backref specification.
:param overlaps:
A string name or comma-delimited set of names of other relationships
on either this mapper, a descendant mapper, or a target mapper with
which this relationship may write to the same foreign keys upon
persistence. The only effect this has is to eliminate the
warning that this relationship will conflict with another upon
persistence. This is used for such relationships that are truly
capable of conflicting with each other on write, but the application
will ensure that no such conflicts occur.
.. versionadded:: 1.4
:param bake_queries=True:
Use the :class:`.BakedQuery` cache to cache the construction of SQL
used in lazy loads. True by default. Set to False if the
join condition of the relationship has unusual features that
might not respond well to statement caching.
.. versionchanged:: 1.2
"Baked" loading is the default implementation for the "select",
a.k.a. "lazy" loading strategy for relationships.
.. versionadded:: 1.0.0
.. seealso::
:ref:`baked_toplevel`
:param cascade:
A comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
A boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. deprecated:: 1.4 The
:paramref:`_orm.relationship.cascade_backrefs`
flag will default to False in all cases in SQLAlchemy 2.0.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`_orm.relationship.cascade_backrefs` option is used.
:param collection_class:
A class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
A class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionchanged:: 0.9.0 -
:paramref:`_orm.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
Docstring which will be applied to the resulting descriptor.
:param foreign_keys:
A list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`_orm.relationship`
object's :paramref:`_orm.relationship.primaryjoin` condition.
That is, if the :paramref:`_orm.relationship.primaryjoin`
condition of this :func:`_orm.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`_orm.relationship` is ``b.a_id``.
In normal cases, the :paramref:`_orm.relationship.foreign_keys`
parameter is **not required.** :func:`_orm.relationship` will
automatically determine which columns in the
:paramref:`_orm.relationship.primaryjoin` condition are to be
considered "foreign key" columns based on those
:class:`_schema.Column` objects that specify
:class:`_schema.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`_schema.ForeignKeyConstraint` construct.
:paramref:`_orm.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`_orm.relationship`
to consider just those columns specified
here as "foreign".
2. The :class:`_schema.Table` being mapped does not actually have
:class:`_schema.ForeignKey` or
:class:`_schema.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`_orm.relationship.primaryjoin`
argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`_orm.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`_orm.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`_orm.relationship` doesn't raise any exceptions, the
:paramref:`_orm.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`_orm.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`_orm.relationship.primaryjoin`
condition.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param innerjoin=False:
When ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
The option supports the same "nested" and "unnested" options as
that of :paramref:`_orm.joinedload.innerjoin`. See that flag
for details on nested / unnested behaviors.
.. seealso::
:paramref:`_orm.joinedload.innerjoin` - the option as specified by
loader option, including detail on nesting behavior.
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:param join_depth:
When non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
How the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`_orm.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``selectin`` - items should be loaded "eagerly" as the parents
are loaded, using one or more additional SQL statements, which
issues a JOIN to the immediate parent object, specifying primary
key identifiers using an IN clause.
.. versionadded:: 1.2
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``raise`` - lazy loading is disallowed; accessing
the attribute, if its value were not already loaded via eager
loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
This strategy can be used when objects are to be detached from
their attached :class:`.Session` after they are loaded.
.. versionadded:: 1.1
* ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
accessing the attribute, if its value were not already loaded via
eager loading, will raise an
:exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
needs to emit SQL**. If the lazy load can pull the related value
from the identity map or determine that it should be None, the
value is loaded. This strategy can be used when objects will
remain associated with the attached :class:`.Session`, however
additional SELECT statements should be blocked.
.. versionadded:: 1.1
* ``dynamic`` - the attribute will return a pre-configured
:class:`_query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on
relationship loader configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:ref:`collections_noload_raiseload` - notes on "noload" and "raise"
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`_orm.relationship.load_on_pending`
flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
Indicates the ordering that should be applied when loading these
items. :paramref:`_orm.relationship.order_by`
is expected to refer to
one of the :class:`_schema.Column`
objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`_orm.relationship.order_by`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when the parent
object is deleted and there is no delete or delete-orphan cascade
enabled. This is typically used when a triggering or error raise
scenario is in place on the database side. Note that the foreign
key attributes on in-session child objects will not be changed after
a flush occurs so this is a very special use-case setting.
Additionally, the "nulling out" will still occur if the child
object is de-associated with the parent.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates the persistence behavior to take when a referenced
primary key value changes in place, indicating that the referencing
foreign key columns will also need their value changed.
When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. When False, the SQLAlchemy
:func:`_orm.relationship`
construct will attempt to emit its own UPDATE statements to
modify related targets. However note that SQLAlchemy **cannot**
emit an UPDATE for more than one level of cascade. Also,
setting this flag to False is not compatible in the case where
the database is in fact enforcing referential integrity, unless
those constraints are explicitly "deferred", if the target backend
supports it.
It is highly advised that an application which is employing
mutable primary keys keeps ``passive_updates`` set to True,
and instead uses the referential integrity features of the database
itself in order to handle the change efficiently and fully.
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
This indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`_orm.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
A SQL expression that will be used as the primary
join of the child object against the parent object, or in a
many-to-many relationship the join of the parent object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`_orm.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
Used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`_orm.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`_orm.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`_orm.relationship.remote_side`,
typically
when a custom :paramref:`_orm.relationship.primaryjoin` condition
is used.
:param query_class:
A :class:`_query.Query`
subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`_orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
A SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`_orm.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
When True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`_orm.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`_orm.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`_orm.relationship.single_parent`
flag may be appropriate.
:param uselist:
A boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`_orm.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`_orm.relationship.uselist`
to
False.
The :paramref:`_orm.relationship.uselist`
flag is also available on an
existing :func:`_orm.relationship`
construct as a read-only attribute,
which can be used to determine if this :func:`_orm.relationship`
deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`_orm.relationship.uselist` flag is needed.
:param viewonly=False:
When set to ``True``, the relationship is used only for loading
objects, and not for any persistence operation. A
:func:`_orm.relationship` which specifies
:paramref:`_orm.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`_orm.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`_expression.cast`. The
:paramref:`_orm.relationship.viewonly`
flag is also of general use when defining any kind of
:func:`_orm.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
When using the :paramref:`_orm.relationship.viewonly` flag in
conjunction with backrefs, the originating relationship for a
particular state change will not produce state changes within the
viewonly relationship. This is the behavior implied by
:paramref:`_orm.relationship.sync_backref` being set to False.
.. versionchanged:: 1.3.17 - the
:paramref:`_orm.relationship.sync_backref` flag is set to False
when using viewonly in conjunction with backrefs.
.. seealso::
:paramref:`_orm.relationship.sync_backref`
:param sync_backref:
A boolean that enables the events used to synchronize the in-Python
attributes when this relationship is target of either
:paramref:`_orm.relationship.backref` or
:paramref:`_orm.relationship.back_populates`.
Defaults to ``None``, which indicates that an automatic value should
be selected based on the value of the
:paramref:`_orm.relationship.viewonly` flag. When left at its
default, changes in state will be back-populated only if neither
sides of a relationship is viewonly.
.. versionadded:: 1.3.17
.. versionchanged:: 1.4 - A relationship that specifies
:paramref:`_orm.relationship.viewonly` automatically implies
that :paramref:`_orm.relationship.sync_backref` is ``False``.
.. seealso::
:paramref:`_orm.relationship.viewonly`
:param omit_join:
Allows manual control over the "selectin" automatic join
optimization. Set to ``False`` to disable the "omit join" feature
added in SQLAlchemy 1.3; or leave as ``None`` to leave automatic
optimization in place.
.. note:: This flag may only be set to ``False``. It is not
necessary to set it to ``True`` as the "omit_join" optimization is
automatically detected; if it is not detected, then the
optimization is not supported.
.. versionchanged:: 1.3.11 setting ``omit_join`` to True will now
emit a warning as this was not the intended use of this flag.
.. versionadded:: 1.3
"""
super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
if viewonly:
self._warn_for_persistence_only_flags(
passive_deletes=passive_deletes,
passive_updates=passive_updates,
enable_typechecks=enable_typechecks,
active_history=active_history,
cascade_backrefs=cascade_backrefs,
)
if viewonly and sync_backref:
raise sa_exc.ArgumentError(
"sync_backref and viewonly cannot both be True"
)
self.sync_backref = sync_backref
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
if omit_join:
util.warn(
"setting omit_join to True is not supported; selectin "
"loading of this relationship may not work correctly if this "
"flag is set explicitly. omit_join optimization is "
"automatically detected for conditions under which it is "
"supported."
)
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.bake_queries = bake_queries
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property = set()
if overlaps:
self._overlaps = set(re.split(r"\s*,\s*", overlaps))
else:
self._overlaps = ()
if cascade is not False:
self.cascade = cascade
elif self.viewonly:
self.cascade = "none"
else:
self.cascade = "save-update, merge"
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def _warn_for_persistence_only_flags(self, **kw):
for k, v in kw.items():
if v != self._persistence_only[k]:
# we are warning here rather than warn deprecated as this is a
# configuration mistake, and Python shows regular warnings more
# aggressively than deprecation warnings by default. Unlike the
# case of setting viewonly with cascade, the settings being
# warned about here are not actively doing the wrong thing
# against viewonly=True, so it is not as urgent to have these
# raise an error.
util.warn(
"Setting %s on relationship() while also "
"setting viewonly=True does not make sense, as a "
"viewonly=True relationship does not perform persistence "
"operations. This configuration may raise an error "
"in a future release." % (k,)
)
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
_extra_criteria = ()
def __init__(
self,
prop,
parentmapper,
adapt_to_entity=None,
of_type=None,
extra_criteria=(),
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
self._extra_criteria = extra_criteria
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
self.property,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
@util.memoized_property
def entity(self):
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.entity
@util.memoized_property
def mapper(self):
"""The target :class:`_orm.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type_entity = inspect(self._of_type)
else:
of_type_entity = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_entity=of_type_entity,
alias_secondary=True,
extra_criteria=self._extra_criteria,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=cls,
extra_criteria=self._extra_criteria,
)
def and_(self, *other):
"""Add AND criteria.
See :meth:`.PropComparator.and_` for an example.
.. versionadded:: 1.4
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=self._of_type,
extra_criteria=self._extra_criteria + other,
)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`_orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, "_of_type", None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable._anonymous_fromclause()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
criterion is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(criterion)
if secondary is not None:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest, secondary)
.correlate_except(dest, secondary)
)
else:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest)
.correlate_except(dest)
)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT (EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id))
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`_orm.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`_orm.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`_orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`_query.Query.outerjoin`
as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.property._optimized_compare(
other, adapt_source=self.adapter
)
if self.property.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(local_col, state, remote_col):
dict_ = state.dict
return sql.bindparam(
local_col.key,
type_=local_col.type,
unique=True,
callable_=self.property._get_attr_w_warn_on_none(
self.property.mapper, state, dict_, remote_col
),
)
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`_expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`_expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
@util.preload_module("sqlalchemy.orm.mapper")
def property(self):
mapperlib = util.preloaded.orm_mapper
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def _with_parent(self, instance, alias_secondary=True, from_entity=None):
assert instance is not None
adapt_source = None
if from_entity is not None:
insp = inspect(from_entity)
if insp.is_aliased_class:
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state,
value_is_parent=False,
adapt_source=None,
alias_secondary=True,
):
if state is not None:
try:
state = inspect(state)
except sa_exc.NoInspectionAvailable:
state = None
if state is None or not getattr(state, "is_instance", False):
raise sa_exc.ArgumentError(
"Mapped instance expected for relationship "
"comparison to object. Classes, queries and other "
"SQL elements are not accepted in this context; for "
"comparison with a subquery, "
"use %s.has(**criteria)." % self
)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(
self.secondary._anonymous_fromclause()
).traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(self, mapper, state, dict_, column):
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
def _go():
last_known = to_return = state._last_known_values[prop.key]
existing_is_available = last_known is not attributes.NO_VALUE
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=attributes.PASSIVE_OFF
if state.persistent
else attributes.PASSIVE_NO_FETCH ^ attributes.INIT_OK,
)
if current_value is attributes.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is attributes.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
impl = source_state.get_impl(self.key)
instances_iterable = impl.get_collection(source_state, source_dict)
# if this is a CollectionAttributeImpl, then empty should
# be False, otherwise "self.key in source_dict" should not be
# True
assert not instances_iterable.empty if impl.collection else True
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances_iterable:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, dest_list, _adapt=False
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self, state, dict_, key, passive=attributes.PASSIVE_OFF
):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, "get_collection"):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
@property
def _effective_sync_backref(self):
if self.viewonly:
return False
else:
return self.sync_backref is not False
@staticmethod
def _check_sync_backref(rel_a, rel_b):
if rel_a.viewonly and rel_b.sync_backref:
raise sa_exc.InvalidRequestError(
"Relationship %s cannot specify sync_backref=True since %s "
"includes viewonly=True." % (rel_b, rel_a)
)
if (
rel_a.viewonly
and not rel_b.viewonly
and rel_b.sync_backref is not False
):
rel_b.sync_backref = False
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
# viewonly and sync_backref cases
# 1. self.viewonly==True and other.sync_backref==True -> error
# 2. self.viewonly==True and other.viewonly==False and
# other.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(self, other)
# 3. other.viewonly==True and self.sync_backref==True -> error
# 4. other.viewonly==True and self.viewonly==False and
# self.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(other, self)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
@util.preload_module("sqlalchemy.orm.mapper")
def entity(self): # type: () -> Union[AliasedInsp, mapperlib.Mapper]
"""Return the target mapped entity, which is an inspect() of the
class or aliased class that is referred towards.
"""
mapperlib = util.preloaded.orm_mapper
if callable(self.argument) and not isinstance(
self.argument, (type, mapperlib.Mapper)
):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
return mapperlib.class_mapper(argument, configure=False)
try:
entity = inspect(argument)
except sa_exc.NoInspectionAvailable:
pass
else:
if hasattr(entity, "mapper"):
return entity
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument))
)
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`_orm.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
return self.entity.mapper
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
attr_value = getattr(self, attr)
if callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
val = getattr(self, attr)
if val is not None:
setattr(
self,
attr,
_orm_deannotate(
coercions.expect(
roles.ColumnArgumentRole, val, argname=attr
)
),
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
coercions.expect(
roles.ColumnArgumentRole, x, argname="order_by"
)
for x in util.to_list(self.order_by)
]
self._user_defined_foreign_keys = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="foreign_keys"
)
for x in util.to_column_set(self._user_defined_foreign_keys)
)
self.remote_side = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="remote_side"
)
for x in util.to_column_set(self.remote_side)
)
self.target = self.entity.persist_selectable
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
@util.preload_module("sqlalchemy.orm.mapper")
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
mapperlib = util.preloaded.orm_mapper
if self.parent.non_primary and not mapperlib.class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
@property
def cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
@cascade.setter
def cascade(self, cascade):
self._set_cascade(cascade)
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if self.viewonly:
non_viewonly = set(cascade).difference(
CascadeOptions._viewonly_cascades
)
if non_viewonly:
raise sa_exc.ArgumentError(
'Cascade settings "%s" apply to persistence operations '
"and should not be combined with a viewonly=True "
"relationship." % (", ".join(sorted(non_viewonly)))
)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
def _check_cascade_settings(self, cascade):
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"For %(direction)s relationship %(rel)s, delete-orphan "
"cascade is normally "
'configured only on the "one" side of a one-to-many '
"relationship, "
'and not on the "many" side of a many-to-one or many-to-many '
"relationship. "
"To force this relationship to allow a particular "
'"%(relatedcls)s" object to be referred towards by only '
'a single "%(clsname)s" object at a time via the '
"%(rel)s relationship, which "
"would allow "
"delete-orphan cascade to take place in this direction, set "
"the single_parent=True flag."
% {
"rel": self,
"direction": "many-to-one"
if self.direction is MANYTOONE
else "many-to-many",
"clsname": self.parent.class_.__name__,
"relatedcls": self.mapper.class_.__name__,
},
code="bbf0",
)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn(
"On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only." % self
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper):
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if (
self.secondary is not None
and self.secondary.c.contains_column(c)
):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`_orm.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
kwargs.setdefault("sync_backref", self.sync_backref)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
@util.preload_module("sqlalchemy.orm.dependency")
def _post_init(self):
dependency = util.preloaded.orm_dependency
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = (
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic=False,
source_selectable=None,
dest_selectable=None,
of_type_entity=None,
alias_secondary=False,
extra_criteria=(),
):
aliased = False
if alias_secondary and self.secondary is not None:
aliased = True
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
if of_type_entity:
dest_mapper = of_type_entity.mapper
if dest_selectable is None:
dest_selectable = of_type_entity.selectable
aliased = True
else:
dest_mapper = self.mapper
if dest_selectable is None:
dest_selectable = self.entity.selectable
if self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable._anonymous_fromclause()
aliased = True
elif (
dest_selectable is not self.mapper._with_polymorphic_selectable
or self.mapper.with_polymorphic
):
aliased = True
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (
source_selectable is not None
and (
source_selectable
is not self.parent._with_polymorphic_selectable
or source_selectable._is_subquery
)
)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable,
dest_selectable,
aliased,
single_crit,
extra_criteria,
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
class JoinCondition(object):
def __init__(
self,
parent_persist_selectable,
child_persist_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self):
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
We'd want to remove "parentmapper" also, but apparently there's
an exotic use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity", "orm_key")
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity", "orm_key")
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
except sa_exc.NoForeignKeysError as nfe:
if self.secondary is not None:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
),
from_=nfe,
)
else:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
),
from_=nfe,
)
except sa_exc.AmbiguousForeignKeysError as afe:
if self.secondary is not None:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary)
),
from_=afe,
)
else:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop
),
from_=afe,
)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = dict(element._annotations)
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = dict(element._annotations)
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
# use set() to avoid generating ``__eq__()`` expressions
# against each element
if element in set(remote_side):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self):
if self.prop is None:
return
def parentmappers_(elem):
if "remote" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.mapper})
elif "local" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.parent})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set(
[
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
]
)
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets = weakref.WeakKeyDictionary()
@util.preload_module("sqlalchemy.orm.mapper")
def _warn_for_conflicting_sync_targets(self):
mapperlib = util.preloaded.orm_mapper
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
pr.mapper in mapperlib._mapper_registry
and pr not in self.prop._reverse_property
and pr.key not in self.prop._overlaps
and self.prop.key not in pr._overlaps
and not self.prop.parent.is_sibling(pr.parent)
and not self.prop.mapper.is_sibling(pr.mapper)
and (
self.prop.key != pr.key
or not self.prop.parent.common_parent(pr.parent)
)
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"If this is not the intention, consider if these "
"relationships should be linked with "
"back_populates, or if viewonly=True should be "
"applied to one or more if they are read-only. "
"For the less common case that foreign key "
"constraints are partially overlapping, the "
"orm.foreign() "
"annotation can be used to isolate the columns that "
"should be written towards. The 'overlaps' "
"parameter may be used to remove this warning."
% (
self.prop,
from_,
to_,
", ".join(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
),
)
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set(
[
col
for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
]
)
def join_targets(
self,
source_selectable,
dest_selectable,
aliased,
single_crit=None,
extra_criteria=(),
):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if extra_criteria:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)
else:
primaryjoin = primaryjoin & sql.and_(*extra_criteria)
if aliased:
if secondary is not None:
secondary = secondary._anonymous_fromclause(flat=True)
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and "local" in col._annotations)
or reverse_direction
and (
(has_secondary and col in lookup)
or (not has_secondary and "remote" in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True
)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
| 38.684152
| 79
| 0.579735
|
794b94e622838696d16e69f60268694f6b889c37
| 247
|
py
|
Python
|
actions/admin.py
|
Hoofeycheng/Bookmarks
|
f2721633cd39393f0c92993579071679bb975ab0
|
[
"MIT"
] | null | null | null |
actions/admin.py
|
Hoofeycheng/Bookmarks
|
f2721633cd39393f0c92993579071679bb975ab0
|
[
"MIT"
] | null | null | null |
actions/admin.py
|
Hoofeycheng/Bookmarks
|
f2721633cd39393f0c92993579071679bb975ab0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from actions.models import Action
# Register your models here.
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin):
list_display = ("user","verb","target","created")
search_fields = ("verb",)
| 17.642857
| 53
| 0.724696
|
794b9623be2e950e65752f82342df7cdb2df9222
| 3,374
|
py
|
Python
|
aleph/util.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 1
|
2017-07-28T12:54:09.000Z
|
2017-07-28T12:54:09.000Z
|
aleph/util.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 7
|
2017-08-16T12:49:23.000Z
|
2018-02-16T10:22:11.000Z
|
aleph/util.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 6
|
2017-07-26T12:29:53.000Z
|
2017-08-18T09:35:50.000Z
|
# coding: utf-8
import os
import six
import yaml
import shutil
from os import path
from hashlib import sha1
from celery import Task
from tempfile import mkdtemp
from aleph.text import string_value
TMP_PREFIX = six.text_type('aleph.tmp.')
PDF_MIME = 'application/pdf'
def checksum(filename):
"""Generate a hash for a given file name."""
hash = sha1()
with open(filename, 'rb') as fh:
while True:
block = fh.read(2 ** 10)
if not block:
break
hash.update(block)
return hash.hexdigest()
def make_tempdir(name=None):
name = string_value(name) or 'data'
dirpath = path.join(mkdtemp(prefix=TMP_PREFIX), name)
os.makedirs(dirpath)
return dirpath
def remove_tempdir(dirpath):
if dirpath is None:
return
parent = path.normpath(path.join(dirpath, '..'))
name = path.dirname(parent)
if path.exists(parent) and name is not None \
and name.startswith(TMP_PREFIX):
shutil.rmtree(parent)
elif path.isdir(dirpath):
shutil.rmtree(dirpath)
def make_tempfile(name=None, suffix=None):
name = string_value(name) or 'data'
suffix = string_value(suffix)
if suffix is not None:
name = '%s.%s' % (name, suffix.strip('.'))
return os.path.join(make_tempdir(), name)
def remove_tempfile(filepath):
if filepath is None:
return
remove_tempdir(path.dirname(filepath))
def load_config_file(file_path):
"""Load a YAML (or JSON) graph model configuration file."""
file_path = os.path.abspath(file_path)
with open(file_path, 'r') as fh:
data = yaml.load(fh) or {}
return resolve_includes(file_path, data)
def resolve_includes(file_path, data):
"""Handle include statements in the graph configuration file.
This allows the YAML graph configuration to be broken into
multiple smaller fragments that are easier to maintain."""
if isinstance(data, (list, tuple, set)):
data = [resolve_includes(file_path, i) for i in data]
elif isinstance(data, dict):
include_paths = data.pop('include', [])
if not isinstance(include_paths, (list, tuple, set)):
include_paths = [include_paths]
for include_path in include_paths:
dir_prefix = os.path.dirname(file_path)
include_path = os.path.join(dir_prefix, include_path)
data.update(load_config_file(include_path))
for key, value in data.items():
data[key] = resolve_includes(file_path, value)
return data
def is_list(obj):
return isinstance(obj, (list, tuple, set))
def unique_list(lst):
"""Make a list unique, retaining order of initial appearance."""
uniq = []
for item in lst:
if item not in uniq:
uniq.append(item)
return uniq
def ensure_list(obj):
"""Make the returned object a list, otherwise wrap as single item."""
if obj is None:
return []
if not is_list(obj):
return [obj]
return obj
def dict_list(data, *keys):
"""Get an entry as a list from a dict. Provide a fallback key."""
for key in keys:
if key in data:
return ensure_list(data[key])
return []
class SessionTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
from aleph.core import db
db.session.remove()
| 26.992
| 73
| 0.647303
|
794b98856db2c09616f594b0ed31b8f17c1cdf35
| 6,370
|
py
|
Python
|
generated/ansible-collection/apimanagementdelegationsetting_info.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/ansible-collection/apimanagementdelegationsetting_info.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/ansible-collection/apimanagementdelegationsetting_info.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apimanagementdelegationsetting_info
version_added: '2.9'
short_description: Get DelegationSetting info.
description:
- Get info of DelegationSetting.
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
name:
description:
- Resource name.
type: str
id:
description:
- Resource ID.
type: str
type:
description:
- Resource type for API Management resource.
type: str
url:
description:
- A delegation Url.
type: str
validation_key:
description:
- >-
A base64-encoded validation key to validate, that a request is coming
from Azure API Management.
type: str
subscriptions:
description:
- Subscriptions delegation settings.
type: dict
suboptions:
enabled:
description:
- Enable or disable delegation for subscriptions.
type: boolean
user_registration:
description:
- User registration delegation settings.
type: dict
suboptions:
enabled:
description:
- Enable or disable delegation for user registration.
type: boolean
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: ApiManagementPortalSettingsGetDelegation
azure.rm.apimanagementdelegationsetting.info:
resource_group: myResourceGroup
name: myService
'''
RETURN = '''
delegation_settings:
description: >-
A list of dict results where the key is the name of the DelegationSetting
and the values are the facts for that DelegationSetting.
returned: always
type: complex
contains:
delegationsetting_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type for API Management resource.
returned: always
type: str
sample: null
properties:
description:
- Delegation settings contract properties.
returned: always
type: dict
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
from msrestazure.azure_exceptions import CloudError
class AzureRMDelegationSettingsInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=true
),
name=dict(
type='str',
required=true
)
)
self.resource_group = None
self.name = None
self.id = None
self.name = None
self.type = None
self.properties = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-01-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMDelegationSettingsInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.name is not None):
self.results['delegation_settings'] = self.format_item(self.get())
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.ApiManagement' +
'/service' +
'/{{ service_name }}' +
'/portalsettings' +
'/delegation')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results['temp_item'] = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return results
def format_item(item):
return item
def main():
AzureRMDelegationSettingsInfo()
if __name__ == '__main__':
main()
| 28.693694
| 113
| 0.557614
|
794b98bda43dae12c172364de06264c693c118a7
| 1,730
|
py
|
Python
|
e2e_graphsage/data/datasets/edge_dataset.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
e2e_graphsage/data/datasets/edge_dataset.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
e2e_graphsage/data/datasets/edge_dataset.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
"""
"""
import random
from collections import Iterable
import numpy as np
import torch.utils.data
class EdgeDataset(torch.utils.data.Dataset):
def __init__(self, adjacency_list, src_node_ids=None):
formatted_adjacency_list = []
for neighbors in adjacency_list:
neighbors = np.array(neighbors, dtype=np.int64)
neighbors = neighbors[neighbors != -1]
neighbors = np.ascontiguousarray(neighbors)
formatted_adjacency_list.append(neighbors)
formatted_adjacency_list = np.array(formatted_adjacency_list)
self.adjacency_list = formatted_adjacency_list
if src_node_ids is None:
self.src_node_ids = np.arange(len(adjacency_list)).astype(np.int64)
else:
self.src_node_ids = np.array(src_node_ids, dtype=np.int64)
assert self.src_node_ids.max() < len(adjacency_list)
def __len__(self):
return len(self.src_node_ids)
def __getitem__(self, idx):
if isinstance(idx, Iterable):
src_node_ids = self.src_node_ids[idx]
all_neighbors = self.adjacency_list[src_node_ids]
neigh_node_ids = [
random.choice(neighbors)
for neighbors in all_neighbors
]
src_node_id = torch.LongTensor(src_node_ids)
neigh_node_id = torch.LongTensor(neigh_node_ids)
else:
src_node_id = self.src_node_ids[idx]
neighbors = self.adjacency_list[src_node_id]
neigh_node_id = random.choice(neighbors)
src_node_id = torch.LongTensor(src_node_id)[0]
neigh_node_id = torch.LongTensor(neigh_node_id)[0]
return src_node_id, neigh_node_id
| 32.641509
| 79
| 0.653179
|
794b996cb4b1bda4b398d4a9ead25e18d747cd35
| 10,274
|
py
|
Python
|
tests/previs/test_media.py
|
tws0002/anima
|
73c256d1f7716a2db7933d6d8519a51333c7e5b4
|
[
"BSD-2-Clause"
] | 7
|
2016-03-30T14:43:33.000Z
|
2020-11-12T17:56:40.000Z
|
tests/previs/test_media.py
|
tws0002/anima
|
73c256d1f7716a2db7933d6d8519a51333c7e5b4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/previs/test_media.py
|
tws0002/anima
|
73c256d1f7716a2db7933d6d8519a51333c7e5b4
|
[
"BSD-2-Clause"
] | 3
|
2017-04-13T04:29:04.000Z
|
2019-05-08T00:28:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import unittest
from anima.edit import Media, Video, Track, Clip, File
class MediaTestCase(unittest.TestCase):
"""tests the anima.previs.Media class
"""
def test_to_xml_method_is_working_properly(self):
"""testing if the to xml method is working properly
"""
m = Media()
v = Video()
v.width = 1024
v.height = 778
m.video = v
t = Track()
t.enabled = True
t.locked = False
v.tracks.append(t)
# clip 1
f = File()
f.duration = 34
f.name = 'shot2'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
c = Clip()
c.id = 'shot2'
c.start = 1
c.end = 35
c.name = 'shot2'
c.enabled = True
c.duration = 34
c.in_ = 0
c.out = 34
c.file = f
t.clips.append(c)
# clip 2
f = File()
f.duration = 30
f.name = 'shot'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov'
c = Clip()
c.id = 'shot'
c.start = 35
c.end = 65
c.name = 'shot'
c.enabled = True
c.duration = 30
c.in_ = 0
c.out = 30
c.file = f
t.clips.append(c)
# clip 3
f = File()
f.duration = 45
f.name = 'shot1'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'
c = Clip()
c.id = 'shot1'
c.start = 65
c.end = 110
c.name = 'shot1'
c.enabled = True
c.duration = 45
c.in_ = 0
c.out = 45
c.file = f
t.clips.append(c)
expected_xml = \
"""<media>
<video>
<format>
<samplecharacteristics>
<width>1024</width>
<height>778</height>
</samplecharacteristics>
</format>
<track>
<locked>FALSE</locked>
<enabled>TRUE</enabled>
<clipitem id="shot2">
<end>35</end>
<name>shot2</name>
<enabled>True</enabled>
<start>1</start>
<in>0</in>
<duration>34</duration>
<out>34</out>
<file id="shot2.mov">
<duration>34</duration>
<name>shot2</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot">
<end>65</end>
<name>shot</name>
<enabled>True</enabled>
<start>35</start>
<in>0</in>
<duration>30</duration>
<out>30</out>
<file id="shot.mov">
<duration>30</duration>
<name>shot</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot1">
<end>110</end>
<name>shot1</name>
<enabled>True</enabled>
<start>65</start>
<in>0</in>
<duration>45</duration>
<out>45</out>
<file id="shot1.mov">
<duration>45</duration>
<name>shot1</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>
</file>
</clipitem>
</track>
</video>
</media>"""
self.assertEqual(
expected_xml,
m.to_xml()
)
def test_from_xml_method_is_working_properly(self):
"""testing if the from_xml method will fill object attributes from the
given xml node
"""
from xml.etree import ElementTree
media_node = ElementTree.Element('media')
video_node = ElementTree.SubElement(media_node, 'video')
format_node = ElementTree.SubElement(video_node, 'format')
sc_node = ElementTree.SubElement(format_node, 'samplecharacteristics')
width_node = ElementTree.SubElement(sc_node, 'width')
width_node.text = 1024
height_node = ElementTree.SubElement(sc_node, 'height')
height_node.text = 778
track_node = ElementTree.SubElement(video_node, 'track')
locked_node = ElementTree.SubElement(track_node, 'locked')
locked_node.text = 'FALSE'
enabled_node = ElementTree.SubElement(track_node, 'enabled')
enabled_node.text = 'TRUE'
# clip1
clip_node = ElementTree.SubElement(track_node, 'clipitem',
attrib={'id': 'shot2'})
end_node = ElementTree.SubElement(clip_node, 'end')
end_node.text = '35'
name_node = ElementTree.SubElement(clip_node, 'name')
name_node.text = 'shot2'
enabled_node = ElementTree.SubElement(clip_node, 'enabled')
enabled_node.text = 'True'
start_node = ElementTree.SubElement(clip_node, 'start')
start_node.text = '1'
in_node = ElementTree.SubElement(clip_node, 'in')
in_node.text = '0'
duration_node = ElementTree.SubElement(clip_node, 'duration')
duration_node.text = '34'
out_node = ElementTree.SubElement(clip_node, 'out')
out_node.text = '34'
file_node = ElementTree.SubElement(clip_node, 'file')
duration_node = ElementTree.SubElement(file_node, 'duration')
duration_node.text = '34'
name_node = ElementTree.SubElement(file_node, 'name')
name_node.text = 'shot2'
pathurl_node = ElementTree.SubElement(file_node, 'pathurl')
pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
pathurl_node.text = pathurl
# clip2
clip_node = ElementTree.SubElement(track_node, 'clipitem',
attrib={'id': 'shot'})
end_node = ElementTree.SubElement(clip_node, 'end')
end_node.text = '65'
name_node = ElementTree.SubElement(clip_node, 'name')
name_node.text = 'shot'
enabled_node = ElementTree.SubElement(clip_node, 'enabled')
enabled_node.text = 'True'
start_node = ElementTree.SubElement(clip_node, 'start')
start_node.text = '35'
in_node = ElementTree.SubElement(clip_node, 'in')
in_node.text = '0'
duration_node = ElementTree.SubElement(clip_node, 'duration')
duration_node.text = '30'
out_node = ElementTree.SubElement(clip_node, 'out')
out_node.text = '30'
file_node = ElementTree.SubElement(clip_node, 'file')
duration_node = ElementTree.SubElement(file_node, 'duration')
duration_node.text = '30'
name_node = ElementTree.SubElement(file_node, 'name')
name_node.text = 'shot'
pathurl_node = ElementTree.SubElement(file_node, 'pathurl')
pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov'
pathurl_node.text = pathurl
# clip3
clip_node = ElementTree.SubElement(track_node, 'clipitem',
attrib={'id': 'shot1'})
end_node = ElementTree.SubElement(clip_node, 'end')
end_node.text = '110'
name_node = ElementTree.SubElement(clip_node, 'name')
name_node.text = 'shot1'
enabled_node = ElementTree.SubElement(clip_node, 'enabled')
enabled_node.text = 'True'
start_node = ElementTree.SubElement(clip_node, 'start')
start_node.text = '65'
in_node = ElementTree.SubElement(clip_node, 'in')
in_node.text = '0'
duration_node = ElementTree.SubElement(clip_node, 'duration')
duration_node.text = '45'
out_node = ElementTree.SubElement(clip_node, 'out')
out_node.text = '45'
file_node = ElementTree.SubElement(clip_node, 'file')
duration_node = ElementTree.SubElement(file_node, 'duration')
duration_node.text = '45'
name_node = ElementTree.SubElement(file_node, 'name')
name_node.text = 'shot1'
pathurl_node = ElementTree.SubElement(file_node, 'pathurl')
pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'
pathurl_node.text = pathurl
m = Media()
m.from_xml(media_node)
v = m.video
self.assertEqual(1024, v.width)
self.assertEqual(778, v.height)
t = v.tracks[0]
self.assertEqual(False, t.locked)
self.assertEqual(True, t.enabled)
# clip1
c = t.clips[0]
self.assertEqual(35, c.end)
self.assertEqual('shot2', c.name)
self.assertEqual(True, c.enabled)
self.assertEqual(1, c.start)
self.assertEqual(0, c.in_)
self.assertEqual(34, c.duration)
self.assertEqual(34, c.out)
f = c.file
self.assertEqual(34, f.duration)
self.assertEqual('shot2', f.name)
self.assertEqual(
'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov',
f.pathurl
)
# clip2
c = t.clips[1]
self.assertEqual(65, c.end)
self.assertEqual('shot', c.name)
self.assertEqual(True, c.enabled)
self.assertEqual(35, c.start)
self.assertEqual(0, c.in_)
self.assertEqual(30, c.duration)
self.assertEqual(30, c.out)
f = c.file
self.assertEqual(30, f.duration)
self.assertEqual('shot', f.name)
self.assertEqual(
'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov',
f.pathurl
)
# clip3
c = t.clips[2]
self.assertEqual(110, c.end)
self.assertEqual('shot1', c.name)
self.assertEqual(True, c.enabled)
self.assertEqual(65, c.start)
self.assertEqual(0, c.in_)
self.assertEqual(45, c.duration)
self.assertEqual(45, c.out)
f = c.file
self.assertEqual(45, f.duration)
self.assertEqual('shot1', f.name)
self.assertEqual(
'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov',
f.pathurl
)
| 32.206897
| 96
| 0.57845
|
794b9ac4575e6969b3179a91d698e0b4115fe7f4
| 758
|
py
|
Python
|
study/pyaudio_study/02.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | 2
|
2020-04-06T09:09:19.000Z
|
2020-07-24T03:59:55.000Z
|
study/pyaudio_study/02.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
study/pyaudio_study/02.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
import wave
import pyaudio
## 不会报错,但是文件打不开
## 根本没有用
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5 # 录制时长
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print(" recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print(" done!!!")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
| 21.657143
| 54
| 0.667546
|
794b9ba3f165235a3d0f888ef5957753f2fce509
| 1,200
|
py
|
Python
|
Leetcode/Python Solutions/Stack/scoreofParentheses.py
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
bc656fd655617407856e0ce45b68585fa81c5035
|
[
"MIT"
] | 1
|
2020-01-06T02:21:56.000Z
|
2020-01-06T02:21:56.000Z
|
Leetcode/Python Solutions/Stack/scoreofParentheses.py
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
bc656fd655617407856e0ce45b68585fa81c5035
|
[
"MIT"
] | null | null | null |
Leetcode/Python Solutions/Stack/scoreofParentheses.py
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
bc656fd655617407856e0ce45b68585fa81c5035
|
[
"MIT"
] | 3
|
2021-02-22T17:41:01.000Z
|
2022-01-13T05:03:19.000Z
|
"""
LeetCode Problem: 856. Score of Parentheses
Link: https://leetcode.com/problems/score-of-parentheses/
Language: Python
Written by: Mostofa Adib Shakib
"""
# Optimal Solution
# Time Complexity: O(n)
# Space Complexity: O(1)
class Solution(object):
def scoreOfParentheses(self, S):
ans = bal = 0
for i, x in enumerate(S):
if x == '(':
bal += 1
else:
bal -= 1
if S[i-1] == '(':
ans += 1 << bal
return ans
# Stack based Solution
# Time Complexity: O(n)
# Space Complexity: O(n)
class Solution:
def scoreOfParentheses(self, S: str) -> int:
stack = []
length = len(S)
for i in range(length):
char = S[i]
if char == "(":
stack.append(char)
elif stack and stack[-1] == '(':
stack.pop()
stack.append(1)
else:
num = 0
while stack and stack[-1] != '(':
num += stack.pop()
stack.pop()
stack.append(2*num)
return sum(stack)
| 23.529412
| 57
| 0.4525
|
794b9c3a3f7f4d609aff26075db91c07407bddb0
| 32,746
|
py
|
Python
|
tests/dataset/test_pandas_dataset.py
|
zachlindsey/great_expectations
|
953e152e35e065b443563ab87e94dd84573bdad1
|
[
"Apache-2.0"
] | 1
|
2021-04-11T20:54:23.000Z
|
2021-04-11T20:54:23.000Z
|
tests/dataset/test_pandas_dataset.py
|
zachlindsey/great_expectations
|
953e152e35e065b443563ab87e94dd84573bdad1
|
[
"Apache-2.0"
] | 53
|
2021-10-02T02:26:51.000Z
|
2021-12-28T20:49:25.000Z
|
tests/dataset/test_pandas_dataset.py
|
zachlindsey/great_expectations
|
953e152e35e065b443563ab87e94dd84573bdad1
|
[
"Apache-2.0"
] | 1
|
2022-03-03T16:47:32.000Z
|
2022-03-03T16:47:32.000Z
|
import datetime
import json
from unittest import mock
import pandas as pd
import pytest
import great_expectations as ge
from great_expectations import DataContext
from great_expectations.core import ExpectationSuite
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.profile import ColumnsExistProfiler
from great_expectations.self_check.util import (
expectationSuiteSchema,
expectationValidationResultSchema,
)
def test_expect_column_values_to_be_dateutil_parseable():
D = ge.dataset.PandasDataset(
{
"c1": ["03/06/09", "23 April 1973", "January 9, 2016"],
"c2": ["9/8/2012", "covfefe", 25],
"c3": ["Jared", "June 1, 2013", "July 18, 1976"],
"c4": ["1", "2", "49000004632"],
"already_datetime": [
datetime.datetime(2015, 1, 1),
datetime.datetime(2016, 1, 1),
datetime.datetime(2017, 1, 1),
],
}
)
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
"in": {"column": "c1"},
"out": {
"success": True,
"unexpected_list": [],
"unexpected_index_list": [],
},
},
{
"in": {"column": "c2", "catch_exceptions": True},
# 'out':{'success':False, 'unexpected_list':['covfefe', 25], 'unexpected_index_list': [1, 2]}},
"error": {
"traceback_substring": "TypeError: Values passed to expect_column_values_to_be_dateutil_parseable must be of type string"
},
},
{
"in": {"column": "c3"},
"out": {
"success": False,
"unexpected_list": ["Jared"],
"unexpected_index_list": [0],
},
},
{
"in": {"column": "c3", "mostly": 0.5},
"out": {
"success": True,
"unexpected_list": ["Jared"],
"unexpected_index_list": [0],
},
},
{
"in": {"column": "c4"},
"out": {
"success": False,
"unexpected_list": ["49000004632"],
"unexpected_index_list": [2],
},
},
{
"in": {"column": "already_datetime", "catch_exceptions": True},
"error": {
"traceback_substring": "TypeError: Values passed to expect_column_values_to_be_dateutil_parseable must be of type string"
},
},
]
for t in T:
out = D.expect_column_values_to_be_dateutil_parseable(**t["in"])
if "out" in t:
assert t["out"]["success"] == out.success
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
elif "error" in t:
assert out.exception_info["raised_exception"] is True
assert (
t["error"]["traceback_substring"]
in out.exception_info["exception_traceback"]
)
def test_expect_column_values_to_be_json_parseable():
d1 = json.dumps({"i": [1, 2, 3], "j": 35, "k": {"x": "five", "y": 5, "z": "101"}})
d2 = json.dumps({"i": 1, "j": 2, "k": [3, 4, 5]})
d3 = json.dumps({"i": "a", "j": "b", "k": "c"})
d4 = json.dumps(
{"i": [4, 5], "j": [6, 7], "k": [8, 9], "l": {4: "x", 5: "y", 6: "z"}}
)
D = ge.dataset.PandasDataset(
{
"json_col": [d1, d2, d3, d4],
"not_json": [4, 5, 6, 7],
"py_dict": [
{"a": 1, "out": 1},
{"b": 2, "out": 4},
{"c": 3, "out": 9},
{"d": 4, "out": 16},
],
"most": [d1, d2, d3, "d4"],
}
)
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
"in": {"column": "json_col"},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"in": {"column": "not_json"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2, 3],
"unexpected_list": [4, 5, 6, 7],
},
},
{
"in": {"column": "py_dict"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2, 3],
"unexpected_list": [
{"a": 1, "out": 1},
{"b": 2, "out": 4},
{"c": 3, "out": 9},
{"d": 4, "out": 16},
],
},
},
{
"in": {"column": "most"},
"out": {
"success": False,
"unexpected_index_list": [3],
"unexpected_list": ["d4"],
},
},
{
"in": {"column": "most", "mostly": 0.75},
"out": {
"success": True,
"unexpected_index_list": [3],
"unexpected_list": ["d4"],
},
},
]
for t in T:
out = D.expect_column_values_to_be_json_parseable(**t["in"])
assert t["out"]["success"] == out.success
assert t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
def test_expectation_decorator_summary_mode():
df = ge.dataset.PandasDataset(
{
"x": [1, 2, 3, 4, 5, 6, 7, 7, None, None],
}
)
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
# print '&'*80
# print json.dumps(df.expect_column_values_to_be_between('x', min_value=1, max_value=5, result_format="SUMMARY"), indent=2)
exp_output = expectationValidationResultSchema.load(
{
"success": False,
"result": {
"element_count": 10,
"missing_count": 2,
"missing_percent": 20.0,
"unexpected_count": 3,
"partial_unexpected_counts": [
{"value": 7.0, "count": 2},
{"value": 6.0, "count": 1},
],
"unexpected_percent": 37.5,
"unexpected_percent_nonmissing": 37.5,
"partial_unexpected_list": [6.0, 7.0, 7.0],
"partial_unexpected_index_list": [5, 6, 7],
},
}
)
assert (
df.expect_column_values_to_be_between(
"x", min_value=1, max_value=5, result_format="SUMMARY"
)
== exp_output
)
exp_output = expectationValidationResultSchema.load(
{
"success": True,
"result": {
"observed_value": 4.375,
"element_count": 10,
"missing_count": 2,
"missing_percent": 20.0,
},
}
)
assert (
df.expect_column_mean_to_be_between("x", 3, 7, result_format="SUMMARY")
== exp_output
)
def test_positional_arguments():
df = ge.dataset.PandasDataset(
{"x": [1, 3, 5, 7, 9], "y": [2, 4, 6, 8, 10], "z": [None, "a", "b", "c", "abc"]}
)
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
exp_output = expectationValidationResultSchema.load(
{
"success": True,
"result": {
"observed_value": 5.0,
"element_count": 5,
"missing_count": None,
"missing_percent": None,
},
}
)
assert df.expect_column_mean_to_be_between("x", 4, 6) == exp_output
out = df.expect_column_values_to_be_between("y", 1, 6)
t = {
"out": {
"success": False,
"unexpected_list": [8, 10],
"unexpected_index_list": [3, 4],
}
}
if "out" in t:
assert t["out"]["success"] == out.success
if "unexpected_index_list" in t["out"]:
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
if "unexpected_list" in t["out"]:
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
out = df.expect_column_values_to_be_between("y", 1, 8, strict_max=True)
t = {
"out": {
"success": False,
"unexpected_list": [8, 10],
"unexpected_index_list": [3, 4],
}
}
if "out" in t:
assert t["out"]["success"] == out.success
if "unexpected_index_list" in t["out"]:
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
if "unexpected_list" in t["out"]:
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
out = df.expect_column_values_to_be_between("y", 2, 100, strict_min=True)
t = {
"out": {"success": False, "unexpected_list": [2], "unexpected_index_list": [0]}
}
if "out" in t:
assert t["out"]["success"] == out.success
if "unexpected_index_list" in t["out"]:
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
if "unexpected_list" in t["out"]:
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
out = df.expect_column_values_to_be_between("y", 1, 6, mostly=0.5)
t = {
"out": {
"success": True,
"unexpected_list": [8, 10],
"unexpected_index_list": [3, 4],
}
}
if "out" in t:
assert t["out"]["success"] == out.success
if "unexpected_index_list" in t["out"]:
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
if "unexpected_list" in t["out"]:
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
out = df.expect_column_values_to_be_in_set("z", ["a", "b", "c"])
t = {
"out": {
"success": False,
"unexpected_list": ["abc"],
"unexpected_index_list": [4],
}
}
if "out" in t:
assert t["out"]["success"] == out.success
if "unexpected_index_list" in t["out"]:
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
if "unexpected_list" in t["out"]:
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
out = df.expect_column_values_to_be_in_set("z", ["a", "b", "c"], mostly=0.5)
t = {
"out": {
"success": True,
"unexpected_list": ["abc"],
"unexpected_index_list": [4],
}
}
if "out" in t:
assert t["out"]["success"] == out.success
if "unexpected_index_list" in t["out"]:
assert (
t["out"]["unexpected_index_list"] == out.result["unexpected_index_list"]
)
if "unexpected_list" in t["out"]:
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
def test_result_format_argument_in_decorators():
df = ge.dataset.PandasDataset(
{"x": [1, 3, 5, 7, 9], "y": [2, 4, 6, 8, 10], "z": [None, "a", "b", "c", "abc"]}
)
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
# Test explicit Nones in result_format
exp_output = expectationValidationResultSchema.load(
{
"success": True,
"result": {
"observed_value": 5.0,
"element_count": 5,
"missing_count": None,
"missing_percent": None,
},
}
)
assert (
df.expect_column_mean_to_be_between("x", 4, 6, result_format=None) == exp_output
)
exp_output = expectationValidationResultSchema.load(
{
"result": {
"element_count": 5,
"missing_count": 0,
"missing_percent": 0.0,
"partial_unexpected_counts": [
{"count": 1, "value": 10},
{"count": 1, "value": 8},
],
"partial_unexpected_index_list": [3, 4],
"partial_unexpected_list": [8, 10],
"unexpected_count": 2,
"unexpected_index_list": [3, 4],
"unexpected_list": [8, 10],
"unexpected_percent": 40.0,
"unexpected_percent_nonmissing": 40.0,
},
"success": False,
}
)
assert (
df.expect_column_values_to_be_between("y", 1, 6, result_format=None)
== exp_output
)
# Test unknown output format
with pytest.raises(ValueError):
df.expect_column_values_to_be_between("y", 1, 6, result_format="QUACK")
with pytest.raises(ValueError):
df.expect_column_mean_to_be_between("x", 4, 6, result_format="QUACK")
def test_from_pandas():
pd_df = pd.DataFrame(
{"x": [1, 3, 5, 7, 9], "y": [2, 4, 6, 8, 10], "z": [None, "a", "b", "c", "abc"]}
)
ge_df = ge.from_pandas(pd_df)
assert isinstance(ge_df, ge.data_asset.DataAsset)
assert list(ge_df.columns) == ["x", "y", "z"]
assert list(ge_df["x"]) == list(pd_df["x"])
assert list(ge_df["y"]) == list(pd_df["y"])
assert list(ge_df["z"]) == list(pd_df["z"])
# make an empty subclass to test dataset_class argument
class CustomPandasDataset(ge.dataset.PandasDataset):
pass
ge_df_custom = ge.from_pandas(pd_df, dataset_class=CustomPandasDataset)
assert not isinstance(ge_df, CustomPandasDataset)
assert isinstance(ge_df_custom, CustomPandasDataset)
assert list(ge_df_custom.columns) == ["x", "y", "z"]
assert list(ge_df_custom["x"]) == list(pd_df["x"])
assert list(ge_df_custom["y"]) == list(pd_df["y"])
assert list(ge_df_custom["z"]) == list(pd_df["z"])
def test_ge_pandas_concatenating_no_autoinspect():
df1 = ge.dataset.PandasDataset({"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]})
df1.expect_column_to_exist("A")
df1.expect_column_to_exist("B")
df1.expect_column_values_to_match_regex("A", "^A[0-2]$")
df1.expect_column_values_to_match_regex("B", "^B[0-2]$")
df2 = ge.dataset.PandasDataset({"A": ["A3", "A4", "A5"], "B": ["B3", "B4", "B5"]})
df2.expect_column_to_exist("A")
df2.expect_column_to_exist("B")
df2.expect_column_values_to_match_regex("A", "^A[3-5]$")
df2.expect_column_values_to_match_regex("B", "^B[3-5]$")
df = pd.concat([df1, df2])
exp_c = []
# The concatenated data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Have no expectations (since no default expectations are created), even expectations that were common
# to the concatenated dataframes and still make sense (since no autoinspection happens).
assert isinstance(df, ge.dataset.PandasDataset)
assert df.get_expectation_suite().expectations == exp_c
def test_ge_pandas_joining():
df1 = ge.dataset.PandasDataset(
{"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=["K0", "K1", "K2"]
)
df1.expect_column_values_to_match_regex("A", "^A[0-2]$")
df1.expect_column_values_to_match_regex("B", "^B[0-2]$")
df2 = ge.dataset.PandasDataset(
{"C": ["C0", "C2", "C3"], "D": ["C0", "D2", "D3"]}, index=["K0", "K2", "K3"]
)
df2.expect_column_values_to_match_regex("C", "^C[0-2]$")
df2.expect_column_values_to_match_regex("D", "^D[0-2]$")
df = df1.join(df2)
exp_j = [
# No autoinspection is default 20180920
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'A'}},
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'B'}},
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'C'}},
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'D'}}
]
# The joined data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Have no expectations (no autoinspection)
assert isinstance(df, ge.dataset.PandasDataset)
assert df.get_expectation_suite().expectations == exp_j
def test_ge_pandas_merging():
df1 = ge.dataset.PandasDataset({"id": [1, 2, 3, 4], "name": ["a", "b", "c", "d"]})
df1.expect_column_values_to_match_regex("name", "^[A-Za-z ]+$")
df2 = ge.dataset.PandasDataset(
{"id": [1, 2, 3, 4], "salary": [57000, 52000, 59000, 65000]}
)
df2.expect_column_values_to_match_regex("salary", "^[0-9]{4,6}$")
df = df1.merge(df2, on="id")
exp_m = [
# No autoinspection as of 20180920
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'id'}},
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'name'}},
# {'expectation_type': 'expect_column_to_exist',
# 'kwargs': {'column': 'salary'}}
]
# The merged data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Have no expectations (no autoinspection is now default)
assert isinstance(df, ge.dataset.PandasDataset)
assert df.get_expectation_suite().expectations == exp_m
def test_ge_pandas_sampling(empty_data_context):
context: DataContext = empty_data_context
df = ge.dataset.PandasDataset(
{
"A": [1, 2, 3, 4],
"B": [5, 6, 7, 8],
"C": ["a", "b", "c", "d"],
"D": ["e", "f", "g", "h"],
}
)
# Put some simple expectations on the data frame
df.profile(profiler=ColumnsExistProfiler)
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ["a", "b", "c", "d"])
df.expect_column_values_to_be_in_set("D", ["e", "f", "g", "h"])
exp1 = df.get_expectation_suite().expectations
# The sampled data frame should:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Inherit ALL the expectations of the parent data frame
samp1 = df.sample(n=2)
assert isinstance(samp1, ge.dataset.PandasDataset)
assert samp1.get_expectation_suite().expectations == exp1
samp1 = df.sample(frac=0.25, replace=True)
assert isinstance(samp1, ge.dataset.PandasDataset)
assert samp1.get_expectation_suite().expectations == exp1
# Change expectation on column "D", sample, and check expectations.
# The failing expectation on column "D" is NOT automatically dropped
# in the sample.
df.expect_column_values_to_be_in_set("D", ["e", "f", "g", "x"])
samp1 = df.sample(n=2)
exp1_dict: dict = expectationSuiteSchema.load(
{
"expectation_suite_name": "test",
"expectations": [
{
"expectation_type": "expect_column_to_exist",
"kwargs": {"column": "A"},
},
{
"expectation_type": "expect_column_to_exist",
"kwargs": {"column": "B"},
},
{
"expectation_type": "expect_column_to_exist",
"kwargs": {"column": "C"},
},
{
"expectation_type": "expect_column_to_exist",
"kwargs": {"column": "D"},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "A", "value_set": [1, 2, 3, 4]},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "B", "value_set": [5, 6, 7, 8]},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "C", "value_set": ["a", "b", "c", "d"]},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "D", "value_set": ["e", "f", "g", "x"]},
},
],
}
)
exp1: ExpectationSuite = ExpectationSuite(**exp1_dict, data_context=context)
assert (
samp1.get_expectation_suite(discard_failed_expectations=False).expectations
== exp1.expectations
)
def test_ge_pandas_subsetting():
df = ge.dataset.PandasDataset(
{
"A": [1, 2, 3, 4],
"B": [5, 6, 7, 8],
"C": ["a", "b", "c", "d"],
"D": ["e", "f", "g", "h"],
}
)
# Put some simple expectations on the data frame
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ["a", "b", "c", "d"])
df.expect_column_values_to_be_in_set("D", ["e", "f", "g", "h"])
# The subsetted data frame should:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Inherit ALL the expectations of the parent data frame
exp1 = df.get_expectation_suite().expectations
sub1 = df[["A", "D"]]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df[["A"]]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df[:3]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df[1:2]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df[:-1]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df[-1:]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df.iloc[:3, 1:4]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
sub1 = df.loc[0:, "A":"B"]
assert isinstance(sub1, ge.dataset.PandasDataset)
assert sub1.get_expectation_suite().expectations == exp1
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@pytest.mark.filterwarnings("ignore:Removed*:UserWarning:great_expectations.data_asset")
def test_ge_pandas_automatic_failure_removal():
df = ge.dataset.PandasDataset(
{
"A": [1, 2, 3, 4],
"B": [5, 6, 7, 8],
"C": ["a", "b", "c", "d"],
"D": ["e", "f", "g", "h"],
}
)
# Put some simple expectations on the data frame
df.profile(ge.profile.ColumnsExistProfiler)
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ["w", "x", "y", "z"])
df.expect_column_values_to_be_in_set("D", ["e", "f", "g", "h"])
# First check that failing expectations are NOT automatically
# dropped when sampling.
# For this data frame, the expectation on column "C" above fails.
exp1 = [
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "A"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "B"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "C"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "D"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "B", "value_set": [5, 6, 7, 8]},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "C", "value_set": ["w", "x", "y", "z"]},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "D", "value_set": ["e", "f", "g", "h"]},
),
]
samp1 = df.sample(n=2)
assert (
samp1.get_expectation_suite(discard_failed_expectations=False).expectations
== exp1
)
# Now check subsetting to verify that failing expectations are NOT
# automatically dropped when subsetting.
sub1 = df[["A", "D"]]
assert (
samp1.get_expectation_suite(discard_failed_expectations=False).expectations
== exp1
)
# Set property/attribute so that failing expectations are
# automatically removed when sampling or subsetting.
df.discard_subset_failing_expectations = True
###
# Note: Order matters in this test, and a validationoperator may change order
###
exp_samp = [
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "A"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "B"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "C"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "D"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "B", "value_set": [5, 6, 7, 8]},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "D", "value_set": ["e", "f", "g", "h"]},
),
]
samp2 = df.sample(n=2)
assert (
samp2.get_expectation_suite(discard_failed_expectations=False).expectations
== exp_samp
)
# Now check subsetting. In additional to the failure on column "C",
# the expectations on column "B" now fail since column "B" doesn't
# exist in the subset.
sub2 = df[["A", "D"]]
exp_sub = [
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "A"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "D"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "D", "value_set": ["e", "f", "g", "h"]},
),
]
assert (
samp2.get_expectation_suite(discard_failed_expectations=False).expectations
== exp_samp
)
def test_subclass_pandas_subset_retains_subclass():
"""A subclass of PandasDataset should still be that subclass after a Pandas subsetting operation"""
class CustomPandasDataset(ge.dataset.PandasDataset):
@ge.dataset.MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_odd(self, column):
return column.map(lambda x: x % 2)
@ge.dataset.MetaPandasDataset.column_map_expectation
def expectation_that_crashes_on_sixes(self, column):
return column.map(lambda x: (x - 6) / 0 != "duck")
df = CustomPandasDataset(
{
"all_odd": [1, 3, 5, 5, 5, 7, 9, 9, 9, 11],
"mostly_odd": [1, 3, 5, 7, 9, 2, 4, 1, 3, 5],
"all_even": [2, 4, 4, 6, 6, 6, 8, 8, 8, 8],
"odd_missing": [1, 3, 5, None, None, None, None, 1, 3, None],
"mixed_missing": [1, 3, 5, None, None, 2, 4, 1, 3, None],
"all_missing": [None, None, None, None, None, None, None, None, None, None],
}
)
df2 = df.sample(frac=0.5)
assert type(df2) == type(df)
def test_validate_map_expectation_on_categorical_column(self):
"""Map expectations should work on categorical columns"""
D = ge.dataset.PandasDataset(
{
"cat_column_1": [
"cat_one",
"cat_two",
"cat_one",
"cat_two",
"cat_one",
"cat_two",
"cat_one",
"cat_two",
],
}
)
D["cat_column_1"] = D["cat_column_1"].astype("category")
D.set_default_expectation_argument("result_format", "COMPLETE")
out = D.expect_column_value_lengths_to_equal("cat_column_1", 7)
self.assertEqual(out["success"], True)
def test_pandas_deepcopy():
import copy
df = ge.dataset.PandasDataset({"a": [1, 2, 3]})
df2 = copy.deepcopy(df)
df["a"] = [2, 3, 4]
# Our copied dataframe should not be affected
assert df2.expect_column_to_exist("a").success == True
assert list(df["a"]) == [2, 3, 4]
assert list(df2["a"]) == [1, 2, 3]
def test_ge_value_count_of_object_dtype_column_with_mixed_types():
"""
Having mixed type values in a object dtype column (e.g., strings and floats)
used to raise a TypeError when sorting value_counts. This test verifies
that the issue is fixed.
"""
df = ge.dataset.PandasDataset(
{
"A": [1.5, 0.009, 0.5, "I am a string in an otherwise float column"],
}
)
value_counts = df.get_column_value_counts("A")
assert value_counts["I am a string in an otherwise float column"] == 1
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_adding_expectation_to_pandas_dataset_not_send_usage_message(mock_emit):
"""
What does this test and why?
When an Expectation is called using a PandasDataset, it validates the dataset using the implementation of
the Expectation. As part of the process, it also adds the Expectation to the active
ExpectationSuite. This test ensures that this in-direct way of adding an Expectation to the ExpectationSuite
(ie not calling add_expectations() directly) does not emit a usage_stats event.
"""
df = ge.dataset.PandasDataset(
{
"A": [[1, 2], None, [4, 5], 6],
}
)
validation = df.expect_column_values_to_be_of_type("A", "list")
# add_expectation() will not send usage_statistics event when called from a Pandas Dataset
assert mock_emit.call_count == 0
assert mock_emit.call_args_list == []
def test_expect_values_to_be_of_type_list():
"""
Having lists in a Pandas column used to raise a ValueError when parsing to
see if any rows had missing values. This test verifies that the issue is fixed.
"""
df = ge.dataset.PandasDataset(
{
"A": [[1, 2], None, [4, 5], 6],
}
)
validation = df.expect_column_values_to_be_of_type("A", "list")
assert not validation.success
def test_expect_values_quantiles_to_be_between():
"""
Test that quantile bounds set to zero actually get interpreted as such. Zero
used to be interpreted as None (and thus +-inf) and we'd get false negatives.
"""
T = [
([1, 2, 3, 4, 5], [0.5], [[0, 0]], False),
([0, 0, 0, 0, 0], [0.5], [[0, 0]], True),
]
for data, quantiles, value_ranges, success in T:
df = ge.dataset.PandasDataset({"A": data})
validation = df.expect_column_quantile_values_to_be_between(
"A",
{
"quantiles": quantiles,
"value_ranges": value_ranges,
},
)
assert validation.success is success
| 34.324948
| 137
| 0.552709
|
794b9d7d0c7cd66873bcba290c972c6163ebecfc
| 3,515
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/arcobacterspl.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/arcobacterspl.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/arcobacterspl.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Arcobacter sp. L.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ArcobacterSpL(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Arcobacter sp. L graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Arcobacter sp. L graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ArcobacterSpL",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.546296
| 223
| 0.672831
|
794b9df3796df451dbf7748dc7e3cdc72ecff807
| 186,573
|
py
|
Python
|
Lib/test/test_argparse.py
|
MarshalL-son/cpython
|
9b027d4cea57e98c76f5176cc3188dc81603356c
|
[
"0BSD"
] | null | null | null |
Lib/test/test_argparse.py
|
MarshalL-son/cpython
|
9b027d4cea57e98c76f5176cc3188dc81603356c
|
[
"0BSD"
] | 2
|
2022-01-20T18:33:26.000Z
|
2022-03-01T04:06:41.000Z
|
Lib/test/test_argparse.py
|
duke4ever/cpython
|
ad5e8520f3e117f45481513014548a39879d30d2
|
[
"0BSD"
] | null | null | null |
# Author: Steven J. Bethard <steven.bethard@gmail.com>.
import inspect
import io
import operator
import os
import shutil
import stat
import sys
import textwrap
import tempfile
import unittest
import argparse
import warnings
from test.support import os_helper
from unittest import mock
class StdIOBuffer(io.TextIOWrapper):
'''Replacement for writable io.StringIO that behaves more like real file
Unlike StringIO, provides a buffer attribute that holds the underlying
binary data, allowing it to replace sys.stdout/sys.stderr in more
contexts.
'''
def __init__(self, initial_value='', newline='\n'):
initial_value = initial_value.encode('utf-8')
super().__init__(io.BufferedWriter(io.BytesIO(initial_value)),
'utf-8', newline=newline)
def getvalue(self):
self.flush()
return self.buffer.raw.getvalue().decode('utf-8')
class TestCase(unittest.TestCase):
def setUp(self):
# The tests assume that line wrapping occurs at 80 columns, but this
# behaviour can be overridden by setting the COLUMNS environment
# variable. To ensure that this width is used, set COLUMNS to 80.
env = os_helper.EnvironmentVarGuard()
env['COLUMNS'] = '80'
self.addCleanup(env.__exit__)
class TempDirMixin(object):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.old_dir = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_dir)
for root, dirs, files in os.walk(self.temp_dir, topdown=False):
for name in files:
os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE)
shutil.rmtree(self.temp_dir, True)
def create_writable_file(self, filename):
file_path = os.path.join(self.temp_dir, filename)
with open(file_path, 'w', encoding="utf-8") as file:
file.write(filename)
return file_path
def create_readonly_file(self, filename):
os.chmod(self.create_writable_file(filename), stat.S_IREAD)
class Sig(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class NS(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
sorted_items = sorted(self.__dict__.items())
kwarg_str = ', '.join(['%s=%r' % tup for tup in sorted_items])
return '%s(%s)' % (type(self).__name__, kwarg_str)
def __eq__(self, other):
return vars(self) == vars(other)
class ArgumentParserError(Exception):
def __init__(self, message, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def stderr_to_parser_error(parse_args, *args, **kwargs):
# if this is being called recursively and stderr or stdout is already being
# redirected, simply call the function and let the enclosing function
# catch the exception
if isinstance(sys.stderr, StdIOBuffer) or isinstance(sys.stdout, StdIOBuffer):
return parse_args(*args, **kwargs)
# if this is not being called recursively, redirect stderr and
# use it as the ArgumentParserError message
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StdIOBuffer()
sys.stderr = StdIOBuffer()
try:
try:
result = parse_args(*args, **kwargs)
for key in list(vars(result)):
attr = getattr(result, key)
if attr is sys.stdout:
setattr(result, key, old_stdout)
elif attr is sys.stdout.buffer:
setattr(result, key, getattr(old_stdout, 'buffer', BIN_STDOUT_SENTINEL))
elif attr is sys.stderr:
setattr(result, key, old_stderr)
elif attr is sys.stderr.buffer:
setattr(result, key, getattr(old_stderr, 'buffer', BIN_STDERR_SENTINEL))
return result
except SystemExit as e:
code = e.code
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
raise ArgumentParserError(
"SystemExit", stdout, stderr, code) from None
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
parse_args = super(ErrorRaisingArgumentParser, self).parse_args
return stderr_to_parser_error(parse_args, *args, **kwargs)
def exit(self, *args, **kwargs):
exit = super(ErrorRaisingArgumentParser, self).exit
return stderr_to_parser_error(exit, *args, **kwargs)
def error(self, *args, **kwargs):
error = super(ErrorRaisingArgumentParser, self).error
return stderr_to_parser_error(error, *args, **kwargs)
class ParserTesterMetaclass(type):
"""Adds parser tests using the class attributes.
Classes of this type should specify the following attributes:
argument_signatures -- a list of Sig objects which specify
the signatures of Argument objects to be created
failures -- a list of args lists that should cause the parser
to fail
successes -- a list of (initial_args, options, remaining_args) tuples
where initial_args specifies the string args to be parsed,
options is a dict that should match the vars() of the options
parsed out of initial_args, and remaining_args should be any
remaining unparsed arguments
"""
def __init__(cls, name, bases, bodydict):
if name == 'ParserTestCase':
return
# default parser signature is empty
if not hasattr(cls, 'parser_signature'):
cls.parser_signature = Sig()
if not hasattr(cls, 'parser_class'):
cls.parser_class = ErrorRaisingArgumentParser
# ---------------------------------------
# functions for adding optional arguments
# ---------------------------------------
def no_groups(parser, argument_signatures):
"""Add all arguments directly to the parser"""
for sig in argument_signatures:
parser.add_argument(*sig.args, **sig.kwargs)
def one_group(parser, argument_signatures):
"""Add all arguments under a single group in the parser"""
group = parser.add_argument_group('foo')
for sig in argument_signatures:
group.add_argument(*sig.args, **sig.kwargs)
def many_groups(parser, argument_signatures):
"""Add each argument in its own group to the parser"""
for i, sig in enumerate(argument_signatures):
group = parser.add_argument_group('foo:%i' % i)
group.add_argument(*sig.args, **sig.kwargs)
# --------------------------
# functions for parsing args
# --------------------------
def listargs(parser, args):
"""Parse the args by passing in a list"""
return parser.parse_args(args)
def sysargs(parser, args):
"""Parse the args by defaulting to sys.argv"""
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
try:
return parser.parse_args()
finally:
sys.argv = old_sys_argv
# class that holds the combination of one optional argument
# addition method and one arg parsing method
class AddTests(object):
def __init__(self, tester_cls, add_arguments, parse_args):
self._add_arguments = add_arguments
self._parse_args = parse_args
add_arguments_name = self._add_arguments.__name__
parse_args_name = self._parse_args.__name__
for test_func in [self.test_failures, self.test_successes]:
func_name = test_func.__name__
names = func_name, add_arguments_name, parse_args_name
test_name = '_'.join(names)
def wrapper(self, test_func=test_func):
test_func(self)
try:
wrapper.__name__ = test_name
except TypeError:
pass
setattr(tester_cls, test_name, wrapper)
def _get_parser(self, tester):
args = tester.parser_signature.args
kwargs = tester.parser_signature.kwargs
parser = tester.parser_class(*args, **kwargs)
self._add_arguments(parser, tester.argument_signatures)
return parser
def test_failures(self, tester):
parser = self._get_parser(tester)
for args_str in tester.failures:
args = args_str.split()
with tester.assertRaises(ArgumentParserError, msg=args):
parser.parse_args(args)
def test_successes(self, tester):
parser = self._get_parser(tester)
for args, expected_ns in tester.successes:
if isinstance(args, str):
args = args.split()
result_ns = self._parse_args(parser, args)
tester.assertEqual(expected_ns, result_ns)
# add tests for each combination of an optionals adding method
# and an arg parsing method
for add_arguments in [no_groups, one_group, many_groups]:
for parse_args in [listargs, sysargs]:
AddTests(cls, add_arguments, parse_args)
bases = TestCase,
ParserTestCase = ParserTesterMetaclass('ParserTestCase', bases, {})
# ===============
# Optionals tests
# ===============
class TestOptionalsSingleDash(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [Sig('-x')]
failures = ['-x', 'a', '--foo', '-x --foo', '-x -y']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
('-xa', NS(x='a')),
('-x -1', NS(x='-1')),
('-x-1', NS(x='-1')),
]
class TestOptionalsSingleDashCombined(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [
Sig('-x', action='store_true'),
Sig('-yyy', action='store_const', const=42),
Sig('-z'),
]
failures = ['a', '--foo', '-xa', '-x --foo', '-x -z', '-z -x',
'-yx', '-yz a', '-yyyx', '-yyyza', '-xyza']
successes = [
('', NS(x=False, yyy=None, z=None)),
('-x', NS(x=True, yyy=None, z=None)),
('-za', NS(x=False, yyy=None, z='a')),
('-z a', NS(x=False, yyy=None, z='a')),
('-xza', NS(x=True, yyy=None, z='a')),
('-xz a', NS(x=True, yyy=None, z='a')),
('-x -za', NS(x=True, yyy=None, z='a')),
('-x -z a', NS(x=True, yyy=None, z='a')),
('-y', NS(x=False, yyy=42, z=None)),
('-yyy', NS(x=False, yyy=42, z=None)),
('-x -yyy -za', NS(x=True, yyy=42, z='a')),
('-x -yyy -z a', NS(x=True, yyy=42, z='a')),
]
class TestOptionalsSingleDashLong(ParserTestCase):
"""Test an Optional with a multi-character single-dash option string"""
argument_signatures = [Sig('-foo')]
failures = ['-foo', 'a', '--foo', '-foo --foo', '-foo -y', '-fooa']
successes = [
('', NS(foo=None)),
('-foo a', NS(foo='a')),
('-foo -1', NS(foo='-1')),
('-fo a', NS(foo='a')),
('-f a', NS(foo='a')),
]
class TestOptionalsSingleDashSubsetAmbiguous(ParserTestCase):
"""Test Optionals where option strings are subsets of each other"""
argument_signatures = [Sig('-f'), Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-foo', '-fo', '-foo b', '-foob', '-fooba', '-foora']
successes = [
('', NS(f=None, foobar=None, foorab=None)),
('-f a', NS(f='a', foobar=None, foorab=None)),
('-fa', NS(f='a', foobar=None, foorab=None)),
('-foa', NS(f='oa', foobar=None, foorab=None)),
('-fooa', NS(f='ooa', foobar=None, foorab=None)),
('-foobar a', NS(f=None, foobar='a', foorab=None)),
('-foorab a', NS(f=None, foobar=None, foorab='a')),
]
class TestOptionalsSingleDashAmbiguous(ParserTestCase):
"""Test Optionals that partially match but are not subsets"""
argument_signatures = [Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-f a', '-fa', '-foa', '-foo', '-fo', '-foo b']
successes = [
('', NS(foobar=None, foorab=None)),
('-foob a', NS(foobar='a', foorab=None)),
('-foor a', NS(foobar=None, foorab='a')),
('-fooba a', NS(foobar='a', foorab=None)),
('-foora a', NS(foobar=None, foorab='a')),
('-foobar a', NS(foobar='a', foorab=None)),
('-foorab a', NS(foobar=None, foorab='a')),
]
class TestOptionalsNumeric(ParserTestCase):
"""Test an Optional with a short opt string"""
argument_signatures = [Sig('-1', dest='one')]
failures = ['-1', 'a', '-1 --foo', '-1 -y', '-1 -1', '-1 -2']
successes = [
('', NS(one=None)),
('-1 a', NS(one='a')),
('-1a', NS(one='a')),
('-1-2', NS(one='-2')),
]
class TestOptionalsDoubleDash(ParserTestCase):
"""Test an Optional with a double-dash option string"""
argument_signatures = [Sig('--foo')]
failures = ['--foo', '-f', '-f a', 'a', '--foo -x', '--foo --bar']
successes = [
('', NS(foo=None)),
('--foo a', NS(foo='a')),
('--foo=a', NS(foo='a')),
('--foo -2.5', NS(foo='-2.5')),
('--foo=-2.5', NS(foo='-2.5')),
]
class TestOptionalsDoubleDashPartialMatch(ParserTestCase):
"""Tests partial matching with a double-dash option string"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--bat'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--ba=4', '--badge 5']
successes = [
('', NS(badger=False, bat=None)),
('--bat X', NS(badger=False, bat='X')),
('--bad', NS(badger=True, bat=None)),
('--badg', NS(badger=True, bat=None)),
('--badge', NS(badger=True, bat=None)),
('--badger', NS(badger=True, bat=None)),
]
class TestOptionalsDoubleDashPrefixMatch(ParserTestCase):
"""Tests when one double-dash option string is a prefix of another"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--ba'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--badge 5']
successes = [
('', NS(badger=False, ba=None)),
('--ba X', NS(badger=False, ba='X')),
('--ba=X', NS(badger=False, ba='X')),
('--bad', NS(badger=True, ba=None)),
('--badg', NS(badger=True, ba=None)),
('--badge', NS(badger=True, ba=None)),
('--badger', NS(badger=True, ba=None)),
]
class TestOptionalsSingleDoubleDash(ParserTestCase):
"""Test an Optional with single- and double-dash option strings"""
argument_signatures = [
Sig('-f', action='store_true'),
Sig('--bar'),
Sig('-baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-fbaz', '-bazf', '-b B', 'B']
successes = [
('', NS(f=False, bar=None, baz=None)),
('-f', NS(f=True, bar=None, baz=None)),
('--ba B', NS(f=False, bar='B', baz=None)),
('-f --bar B', NS(f=True, bar='B', baz=None)),
('-f -b', NS(f=True, bar=None, baz=42)),
('-ba -f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixChars(ParserTestCase):
"""Test an Optional with option strings with custom prefixes"""
parser_signature = Sig(prefix_chars='+:/', add_help=False)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz', '-h', '--help', '+h', '::help', '/help']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixCharsAddedHelp(ParserTestCase):
"""When ``-`` not in prefix_chars, default operators created for help
should use the prefix_chars in use rather than - or --
http://bugs.python.org/issue9444"""
parser_signature = Sig(prefix_chars='+:/', add_help=True)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42))
]
class TestOptionalsAlternatePrefixCharsMultipleShortArgs(ParserTestCase):
"""Verify that Optionals must be called with their defined prefixes"""
parser_signature = Sig(prefix_chars='+-', add_help=False)
argument_signatures = [
Sig('-x', action='store_true'),
Sig('+y', action='store_true'),
Sig('+z', action='store_true'),
]
failures = ['-w',
'-xyz',
'+x',
'-y',
'+xyz',
]
successes = [
('', NS(x=False, y=False, z=False)),
('-x', NS(x=True, y=False, z=False)),
('+y -x', NS(x=True, y=True, z=False)),
('+yz -x', NS(x=True, y=True, z=True)),
]
class TestOptionalsShortLong(ParserTestCase):
"""Test a combination of single- and double-dash option strings"""
argument_signatures = [
Sig('-v', '--verbose', '-n', '--noisy', action='store_true'),
]
failures = ['--x --verbose', '-N', 'a', '-v x']
successes = [
('', NS(verbose=False)),
('-v', NS(verbose=True)),
('--verbose', NS(verbose=True)),
('-n', NS(verbose=True)),
('--noisy', NS(verbose=True)),
]
class TestOptionalsDest(ParserTestCase):
"""Tests various means of setting destination"""
argument_signatures = [Sig('--foo-bar'), Sig('--baz', dest='zabbaz')]
failures = ['a']
successes = [
('--foo-bar f', NS(foo_bar='f', zabbaz=None)),
('--baz g', NS(foo_bar=None, zabbaz='g')),
('--foo-bar h --baz i', NS(foo_bar='h', zabbaz='i')),
('--baz j --foo-bar k', NS(foo_bar='k', zabbaz='j')),
]
class TestOptionalsDefault(ParserTestCase):
"""Tests specifying a default for an Optional"""
argument_signatures = [Sig('-x'), Sig('-y', default=42)]
failures = ['a']
successes = [
('', NS(x=None, y=42)),
('-xx', NS(x='x', y=42)),
('-yy', NS(x=None, y='y')),
]
class TestOptionalsNargsDefault(ParserTestCase):
"""Tests not specifying the number of args for an Optional"""
argument_signatures = [Sig('-x')]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
]
class TestOptionalsNargs1(ParserTestCase):
"""Tests specifying 1 arg for an Optional"""
argument_signatures = [Sig('-x', nargs=1)]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x=['a'])),
]
class TestOptionalsNargs3(ParserTestCase):
"""Tests specifying 3 args for an Optional"""
argument_signatures = [Sig('-x', nargs=3)]
failures = ['a', '-x', '-x a', '-x a b', 'a -x', 'a -x b']
successes = [
('', NS(x=None)),
('-x a b c', NS(x=['a', 'b', 'c'])),
]
class TestOptionalsNargsOptional(ParserTestCase):
"""Tests specifying an Optional arg for an Optional"""
argument_signatures = [
Sig('-w', nargs='?'),
Sig('-x', nargs='?', const=42),
Sig('-y', nargs='?', default='spam'),
Sig('-z', nargs='?', type=int, const='42', default='84'),
]
failures = ['2']
successes = [
('', NS(w=None, x=None, y='spam', z=84)),
('-w', NS(w=None, x=None, y='spam', z=84)),
('-w 2', NS(w='2', x=None, y='spam', z=84)),
('-x', NS(w=None, x=42, y='spam', z=84)),
('-x 2', NS(w=None, x='2', y='spam', z=84)),
('-y', NS(w=None, x=None, y=None, z=84)),
('-y 2', NS(w=None, x=None, y='2', z=84)),
('-z', NS(w=None, x=None, y='spam', z=42)),
('-z 2', NS(w=None, x=None, y='spam', z=2)),
]
class TestOptionalsNargsZeroOrMore(ParserTestCase):
"""Tests specifying args for an Optional that accepts zero or more"""
argument_signatures = [
Sig('-x', nargs='*'),
Sig('-y', nargs='*', default='spam'),
]
failures = ['a']
successes = [
('', NS(x=None, y='spam')),
('-x', NS(x=[], y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y', NS(x=None, y=[])),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsNargsOneOrMore(ParserTestCase):
"""Tests specifying args for an Optional that accepts one or more"""
argument_signatures = [
Sig('-x', nargs='+'),
Sig('-y', nargs='+', default='spam'),
]
failures = ['a', '-x', '-y', 'a -x', 'a -y b']
successes = [
('', NS(x=None, y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsChoices(ParserTestCase):
"""Tests specifying the choices for an Optional"""
argument_signatures = [
Sig('-f', choices='abc'),
Sig('-g', type=int, choices=range(5))]
failures = ['a', '-f d', '-fad', '-ga', '-g 6']
successes = [
('', NS(f=None, g=None)),
('-f a', NS(f='a', g=None)),
('-f c', NS(f='c', g=None)),
('-g 0', NS(f=None, g=0)),
('-g 03', NS(f=None, g=3)),
('-fb -g4', NS(f='b', g=4)),
]
class TestOptionalsRequired(ParserTestCase):
"""Tests an optional action that is required"""
argument_signatures = [
Sig('-x', type=int, required=True),
]
failures = ['a', '']
successes = [
('-x 1', NS(x=1)),
('-x42', NS(x=42)),
]
class TestOptionalsActionStore(ParserTestCase):
"""Tests the store action for an Optional"""
argument_signatures = [Sig('-x', action='store')]
failures = ['a', 'a -x']
successes = [
('', NS(x=None)),
('-xfoo', NS(x='foo')),
]
class TestOptionalsActionStoreConst(ParserTestCase):
"""Tests the store_const action for an Optional"""
argument_signatures = [Sig('-y', action='store_const', const=object)]
failures = ['a']
successes = [
('', NS(y=None)),
('-y', NS(y=object)),
]
class TestOptionalsActionStoreFalse(ParserTestCase):
"""Tests the store_false action for an Optional"""
argument_signatures = [Sig('-z', action='store_false')]
failures = ['a', '-za', '-z a']
successes = [
('', NS(z=True)),
('-z', NS(z=False)),
]
class TestOptionalsActionStoreTrue(ParserTestCase):
"""Tests the store_true action for an Optional"""
argument_signatures = [Sig('--apple', action='store_true')]
failures = ['a', '--apple=b', '--apple b']
successes = [
('', NS(apple=False)),
('--apple', NS(apple=True)),
]
class TestBooleanOptionalAction(ParserTestCase):
"""Tests BooleanOptionalAction"""
argument_signatures = [Sig('--foo', action=argparse.BooleanOptionalAction)]
failures = ['--foo bar', '--foo=bar']
successes = [
('', NS(foo=None)),
('--foo', NS(foo=True)),
('--no-foo', NS(foo=False)),
('--foo --no-foo', NS(foo=False)), # useful for aliases
('--no-foo --foo', NS(foo=True)),
]
def test_const(self):
# See bpo-40862
parser = argparse.ArgumentParser()
with self.assertRaises(TypeError) as cm:
parser.add_argument('--foo', const=True, action=argparse.BooleanOptionalAction)
self.assertIn("got an unexpected keyword argument 'const'", str(cm.exception))
class TestBooleanOptionalActionRequired(ParserTestCase):
"""Tests BooleanOptionalAction required"""
argument_signatures = [
Sig('--foo', required=True, action=argparse.BooleanOptionalAction)
]
failures = ['']
successes = [
('--foo', NS(foo=True)),
('--no-foo', NS(foo=False)),
]
class TestOptionalsActionAppend(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append')]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=None)),
('--baz a', NS(baz=['a'])),
('--baz a --baz b', NS(baz=['a', 'b'])),
]
class TestOptionalsActionAppendWithDefault(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append', default=['X'])]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=['X'])),
('--baz a', NS(baz=['X', 'a'])),
('--baz a --baz b', NS(baz=['X', 'a', 'b'])),
]
class TestConstActionsMissingConstKwarg(ParserTestCase):
"""Tests that const gets default value of None when not provided"""
argument_signatures = [
Sig('-f', action='append_const'),
Sig('--foo', action='append_const'),
Sig('-b', action='store_const'),
Sig('--bar', action='store_const')
]
failures = ['-f v', '--foo=bar', '--foo bar']
successes = [
('', NS(f=None, foo=None, b=None, bar=None)),
('-f', NS(f=[None], foo=None, b=None, bar=None)),
('--foo', NS(f=None, foo=[None], b=None, bar=None)),
('-b', NS(f=None, foo=None, b=None, bar=None)),
('--bar', NS(f=None, foo=None, b=None, bar=None)),
]
class TestOptionalsActionAppendConst(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=None)),
('-b', NS(b=[Exception])),
('-b -cx -b -cyz', NS(b=[Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionAppendConstWithDefault(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception, default=['X']),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=['X'])),
('-b', NS(b=['X', Exception])),
('-b -cx -b -cyz', NS(b=['X', Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionCount(ParserTestCase):
"""Tests the count action for an Optional"""
argument_signatures = [Sig('-x', action='count')]
failures = ['a', '-x a', '-x b', '-x a -x b']
successes = [
('', NS(x=None)),
('-x', NS(x=1)),
]
class TestOptionalsAllowLongAbbreviation(ParserTestCase):
"""Allow long options to be abbreviated unambiguously"""
argument_signatures = [
Sig('--foo'),
Sig('--foobaz'),
Sig('--fooble', action='store_true'),
]
failures = ['--foob 5', '--foob']
successes = [
('', NS(foo=None, foobaz=None, fooble=False)),
('--foo 7', NS(foo='7', foobaz=None, fooble=False)),
('--fooba a', NS(foo=None, foobaz='a', fooble=False)),
('--foobl --foo g', NS(foo='g', foobaz=None, fooble=True)),
]
class TestOptionalsDisallowLongAbbreviation(ParserTestCase):
"""Do not allow abbreviations of long options at all"""
parser_signature = Sig(allow_abbrev=False)
argument_signatures = [
Sig('--foo'),
Sig('--foodle', action='store_true'),
Sig('--foonly'),
]
failures = ['-foon 3', '--foon 3', '--food', '--food --foo 2']
successes = [
('', NS(foo=None, foodle=False, foonly=None)),
('--foo 3', NS(foo='3', foodle=False, foonly=None)),
('--foonly 7 --foodle --foo 2', NS(foo='2', foodle=True, foonly='7')),
]
class TestOptionalsDisallowLongAbbreviationPrefixChars(ParserTestCase):
"""Disallowing abbreviations works with alternative prefix characters"""
parser_signature = Sig(prefix_chars='+', allow_abbrev=False)
argument_signatures = [
Sig('++foo'),
Sig('++foodle', action='store_true'),
Sig('++foonly'),
]
failures = ['+foon 3', '++foon 3', '++food', '++food ++foo 2']
successes = [
('', NS(foo=None, foodle=False, foonly=None)),
('++foo 3', NS(foo='3', foodle=False, foonly=None)),
('++foonly 7 ++foodle ++foo 2', NS(foo='2', foodle=True, foonly='7')),
]
class TestDisallowLongAbbreviationAllowsShortGrouping(ParserTestCase):
"""Do not allow abbreviations of long options at all"""
parser_signature = Sig(allow_abbrev=False)
argument_signatures = [
Sig('-r'),
Sig('-c', action='count'),
]
failures = ['-r', '-c -r']
successes = [
('', NS(r=None, c=None)),
('-ra', NS(r='a', c=None)),
('-rcc', NS(r='cc', c=None)),
('-cc', NS(r=None, c=2)),
('-cc -ra', NS(r='a', c=2)),
('-ccrcc', NS(r='cc', c=2)),
]
class TestDisallowLongAbbreviationAllowsShortGroupingPrefix(ParserTestCase):
"""Short option grouping works with custom prefix and allow_abbrev=False"""
parser_signature = Sig(prefix_chars='+', allow_abbrev=False)
argument_signatures = [
Sig('+r'),
Sig('+c', action='count'),
]
failures = ['+r', '+c +r']
successes = [
('', NS(r=None, c=None)),
('+ra', NS(r='a', c=None)),
('+rcc', NS(r='cc', c=None)),
('+cc', NS(r=None, c=2)),
('+cc +ra', NS(r='a', c=2)),
('+ccrcc', NS(r='cc', c=2)),
]
# ================
# Positional tests
# ================
class TestPositionalsNargsNone(ParserTestCase):
"""Test a Positional that doesn't specify nargs"""
argument_signatures = [Sig('foo')]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo='a')),
]
class TestPositionalsNargs1(ParserTestCase):
"""Test a Positional that specifies an nargs of 1"""
argument_signatures = [Sig('foo', nargs=1)]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo=['a'])),
]
class TestPositionalsNargs2(ParserTestCase):
"""Test a Positional that specifies an nargs of 2"""
argument_signatures = [Sig('foo', nargs=2)]
failures = ['', 'a', '-x', 'a b c']
successes = [
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMore(ParserTestCase):
"""Test a Positional that specifies unlimited nargs"""
argument_signatures = [Sig('foo', nargs='*')]
failures = ['-x']
successes = [
('', NS(foo=[])),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMoreDefault(ParserTestCase):
"""Test a Positional that specifies unlimited nargs and a default"""
argument_signatures = [Sig('foo', nargs='*', default='bar')]
failures = ['-x']
successes = [
('', NS(foo='bar')),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOneOrMore(ParserTestCase):
"""Test a Positional that specifies one or more nargs"""
argument_signatures = [Sig('foo', nargs='+')]
failures = ['', '-x']
successes = [
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOptional(ParserTestCase):
"""Tests an Optional Positional"""
argument_signatures = [Sig('foo', nargs='?')]
failures = ['-x', 'a b']
successes = [
('', NS(foo=None)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalDefault(ParserTestCase):
"""Tests an Optional Positional with a default value"""
argument_signatures = [Sig('foo', nargs='?', default=42)]
failures = ['-x', 'a b']
successes = [
('', NS(foo=42)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalConvertedDefault(ParserTestCase):
"""Tests an Optional Positional with a default value
that needs to be converted to the appropriate type.
"""
argument_signatures = [
Sig('foo', nargs='?', type=int, default='42'),
]
failures = ['-x', 'a b', '1 2']
successes = [
('', NS(foo=42)),
('1', NS(foo=1)),
]
class TestPositionalsNargsNoneNone(ParserTestCase):
"""Test two Positionals that don't specify nargs"""
argument_signatures = [Sig('foo'), Sig('bar')]
failures = ['', '-x', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsNone1(ParserTestCase):
"""Test a Positional with no nargs followed by one with 1"""
argument_signatures = [Sig('foo'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargs2None(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar')]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsNoneZeroOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with unlimited"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='*')]
failures = ['', '--foo']
successes = [
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOneOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with one or more"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOptional(ParserTestCase):
"""Test a Positional with no nargs followed by one with an Optional"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo='a', bar=None)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsZeroOrMoreNone(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar')]
failures = ['', '--foo']
successes = [
('a', NS(foo=[], bar='a')),
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOneOrMoreNone(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOptionalNone(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='?', default=42), Sig('bar')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=42, bar='a')),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargs2ZeroOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with unlimited"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='*')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a', 'b'], bar=[])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2OneOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with one or more"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a', 'a b']
successes = [
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2Optional(ParserTestCase):
"""Test a Positional with 2 nargs followed by one optional"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a', 'a b c d']
successes = [
('a b', NS(foo=['a', 'b'], bar=None)),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsZeroOrMore1(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar', nargs=1)]
failures = ['', '--foo', ]
successes = [
('a', NS(foo=[], bar=['a'])),
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOneOrMore1(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOptional1(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargsNoneZeroOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, unlimited nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='*'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=[], baz=['b'])),
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
]
class TestPositionalsNargsNoneOneOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, one or more nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='+'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a', 'b']
successes = [
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
('a b c d', NS(foo='a', bar=['b', 'c'], baz=['d'])),
]
class TestPositionalsNargsNoneOptional1(ParserTestCase):
"""Test three Positionals: no nargs, optional narg and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='?', default=0.625),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=0.625, baz=['b'])),
('a b c', NS(foo='a', bar='b', baz=['c'])),
]
class TestPositionalsNargsOptionalOptional(ParserTestCase):
"""Test two optional nargs"""
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='?', default=42),
]
failures = ['--foo', 'a b c']
successes = [
('', NS(foo=None, bar=42)),
('a', NS(foo='a', bar=42)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsOptionalZeroOrMore(ParserTestCase):
"""Test an Optional narg followed by unlimited nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='*')]
failures = ['--foo']
successes = [
('', NS(foo=None, bar=[])),
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsOptionalOneOrMore(ParserTestCase):
"""Test an Optional narg followed by one or more nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='+')]
failures = ['', '--foo']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsChoicesString(ParserTestCase):
"""Test a set of single-character choices"""
argument_signatures = [Sig('spam', choices=set('abcdefg'))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('a', NS(spam='a')),
('g', NS(spam='g')),
]
class TestPositionalsChoicesInt(ParserTestCase):
"""Test a set of integer choices"""
argument_signatures = [Sig('spam', type=int, choices=range(20))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('4', NS(spam=4)),
('15', NS(spam=15)),
]
class TestPositionalsActionAppend(ParserTestCase):
"""Test the 'append' action"""
argument_signatures = [
Sig('spam', action='append'),
Sig('spam', action='append', nargs=2),
]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(spam=['a', ['b', 'c']])),
]
# ========================================
# Combined optionals and positionals tests
# ========================================
class TestOptionalsNumericAndPositionals(ParserTestCase):
"""Tests negative number args when numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-4', dest='y', action='store_true'),
]
failures = ['-2', '-315']
successes = [
('', NS(x=None, y=False)),
('a', NS(x='a', y=False)),
('-4', NS(x=None, y=True)),
('-4 a', NS(x='a', y=True)),
]
class TestOptionalsAlmostNumericAndPositionals(ParserTestCase):
"""Tests negative number args when almost numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-k4', dest='y', action='store_true'),
]
failures = ['-k3']
successes = [
('', NS(x=None, y=False)),
('-2', NS(x='-2', y=False)),
('a', NS(x='a', y=False)),
('-k4', NS(x=None, y=True)),
('-k4 a', NS(x='a', y=True)),
]
class TestEmptyAndSpaceContainingArguments(ParserTestCase):
argument_signatures = [
Sig('x', nargs='?'),
Sig('-y', '--yyy', dest='y'),
]
failures = ['-y']
successes = [
([''], NS(x='', y=None)),
(['a badger'], NS(x='a badger', y=None)),
(['-a badger'], NS(x='-a badger', y=None)),
(['-y', ''], NS(x=None, y='')),
(['-y', 'a badger'], NS(x=None, y='a badger')),
(['-y', '-a badger'], NS(x=None, y='-a badger')),
(['--yyy=a badger'], NS(x=None, y='a badger')),
(['--yyy=-a badger'], NS(x=None, y='-a badger')),
]
class TestPrefixCharacterOnlyArguments(ParserTestCase):
parser_signature = Sig(prefix_chars='-+')
argument_signatures = [
Sig('-', dest='x', nargs='?', const='badger'),
Sig('+', dest='y', type=int, default=42),
Sig('-+-', dest='z', action='store_true'),
]
failures = ['-y', '+ -']
successes = [
('', NS(x=None, y=42, z=False)),
('-', NS(x='badger', y=42, z=False)),
('- X', NS(x='X', y=42, z=False)),
('+ -3', NS(x=None, y=-3, z=False)),
('-+-', NS(x=None, y=42, z=True)),
('- ===', NS(x='===', y=42, z=False)),
]
class TestNargsZeroOrMore(ParserTestCase):
"""Tests specifying args for an Optional that accepts zero or more"""
argument_signatures = [Sig('-x', nargs='*'), Sig('y', nargs='*')]
failures = []
successes = [
('', NS(x=None, y=[])),
('-x', NS(x=[], y=[])),
('-x a', NS(x=['a'], y=[])),
('-x a -- b', NS(x=['a'], y=['b'])),
('a', NS(x=None, y=['a'])),
('a -x', NS(x=[], y=['a'])),
('a -x b', NS(x=['b'], y=['a'])),
]
class TestNargsRemainder(ParserTestCase):
"""Tests specifying a positional with nargs=REMAINDER"""
argument_signatures = [Sig('x'), Sig('y', nargs='...'), Sig('-z')]
failures = ['', '-z', '-z Z']
successes = [
('X', NS(x='X', y=[], z=None)),
('-z Z X', NS(x='X', y=[], z='Z')),
('X A B -z Z', NS(x='X', y=['A', 'B', '-z', 'Z'], z=None)),
('X Y --foo', NS(x='X', y=['Y', '--foo'], z=None)),
]
class TestOptionLike(ParserTestCase):
"""Tests options that may or may not be arguments"""
argument_signatures = [
Sig('-x', type=float),
Sig('-3', type=float, dest='y'),
Sig('z', nargs='*'),
]
failures = ['-x', '-y2.5', '-xa', '-x -a',
'-x -3', '-x -3.5', '-3 -3.5',
'-x -2.5', '-x -2.5 a', '-3 -.5',
'a x -1', '-x -1 a', '-3 -1 a']
successes = [
('', NS(x=None, y=None, z=[])),
('-x 2.5', NS(x=2.5, y=None, z=[])),
('-x 2.5 a', NS(x=2.5, y=None, z=['a'])),
('-3.5', NS(x=None, y=0.5, z=[])),
('-3-.5', NS(x=None, y=-0.5, z=[])),
('-3 .5', NS(x=None, y=0.5, z=[])),
('a -3.5', NS(x=None, y=0.5, z=['a'])),
('a', NS(x=None, y=None, z=['a'])),
('a -x 1', NS(x=1.0, y=None, z=['a'])),
('-x 1 a', NS(x=1.0, y=None, z=['a'])),
('-3 1 a', NS(x=None, y=1.0, z=['a'])),
]
class TestDefaultSuppress(ParserTestCase):
"""Test actions with suppressed defaults"""
argument_signatures = [
Sig('foo', nargs='?', default=argparse.SUPPRESS),
Sig('bar', nargs='*', default=argparse.SUPPRESS),
Sig('--baz', action='store_true', default=argparse.SUPPRESS),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefaultSuppress(ParserTestCase):
"""Test actions with a parser-level default of SUPPRESS"""
parser_signature = Sig(argument_default=argparse.SUPPRESS)
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefault42(ParserTestCase):
"""Test actions with a parser-level default of 42"""
parser_signature = Sig(argument_default=42)
argument_signatures = [
Sig('--version', action='version', version='1.0'),
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS(foo=42, bar=42, baz=42, version=42)),
('a', NS(foo='a', bar=42, baz=42, version=42)),
('a b', NS(foo='a', bar=['b'], baz=42, version=42)),
('--baz', NS(foo=42, bar=42, baz=True, version=42)),
('a --baz', NS(foo='a', bar=42, baz=True, version=42)),
('--baz a b', NS(foo='a', bar=['b'], baz=True, version=42)),
]
class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFile, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
('recursive', '-a\n'
'A\n'
'@hello'),
('invalid', '@no-such-path\n'),
]
for path, text in file_texts:
with open(path, 'w', encoding="utf-8") as file:
file.write(text)
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('-a'),
Sig('x'),
Sig('y', nargs='+'),
]
failures = ['', '-b', 'X', '@invalid', '@missing']
successes = [
('X Y', NS(a=None, x='X', y=['Y'])),
('X -a A Y Z', NS(a='A', x='X', y=['Y', 'Z'])),
('@hello X', NS(a=None, x='hello world!', y=['X'])),
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
(["-a", "", "X", "Y"], NS(a='', x='X', y=['Y'])),
]
class TestArgumentsFromFileConverter(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFileConverter, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
]
for path, text in file_texts:
with open(path, 'w', encoding="utf-8") as file:
file.write(text)
class FromFileConverterArgumentParser(ErrorRaisingArgumentParser):
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser_class = FromFileConverterArgumentParser
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('y', nargs='+'),
]
failures = []
successes = [
('@hello X', NS(y=['hello', 'world!', 'X'])),
]
# =====================
# Type conversion tests
# =====================
class TestFileTypeRepr(TestCase):
def test_r(self):
type = argparse.FileType('r')
self.assertEqual("FileType('r')", repr(type))
def test_wb_1(self):
type = argparse.FileType('wb', 1)
self.assertEqual("FileType('wb', 1)", repr(type))
def test_r_latin(self):
type = argparse.FileType('r', encoding='latin_1')
self.assertEqual("FileType('r', encoding='latin_1')", repr(type))
def test_w_big5_ignore(self):
type = argparse.FileType('w', encoding='big5', errors='ignore')
self.assertEqual("FileType('w', encoding='big5', errors='ignore')",
repr(type))
def test_r_1_replace(self):
type = argparse.FileType('r', 1, errors='replace')
self.assertEqual("FileType('r', 1, errors='replace')", repr(type))
BIN_STDOUT_SENTINEL = object()
BIN_STDERR_SENTINEL = object()
class StdStreamComparer:
def __init__(self, attr):
# We try to use the actual stdXXX.buffer attribute as our
# marker, but but under some test environments,
# sys.stdout/err are replaced by io.StringIO which won't have .buffer,
# so we use a sentinel simply to show that the tests do the right thing
# for any buffer supporting object
self.getattr = operator.attrgetter(attr)
if attr == 'stdout.buffer':
self.backupattr = BIN_STDOUT_SENTINEL
elif attr == 'stderr.buffer':
self.backupattr = BIN_STDERR_SENTINEL
else:
self.backupattr = object() # Not equal to anything
def __eq__(self, other):
try:
return other == self.getattr(sys)
except AttributeError:
return other == self.backupattr
eq_stdin = StdStreamComparer('stdin')
eq_stdout = StdStreamComparer('stdout')
eq_stderr = StdStreamComparer('stderr')
eq_bstdin = StdStreamComparer('stdin.buffer')
eq_bstdout = StdStreamComparer('stdout.buffer')
eq_bstderr = StdStreamComparer('stderr.buffer')
class RFile(object):
seen = {}
def __init__(self, name):
self.name = name
def __eq__(self, other):
if other in self.seen:
text = self.seen[other]
else:
text = self.seen[other] = other.read()
other.close()
if not isinstance(text, str):
text = text.decode('ascii')
return self.name == other.name == text
class TestFileTypeR(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeR, self).setUp()
for file_name in ['foo', 'bar']:
with open(os.path.join(self.temp_dir, file_name),
'w', encoding="utf-8") as file:
file.write(file_name)
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType()),
Sig('spam', type=argparse.FileType('r')),
]
failures = ['-x', '', 'non-existent-file.txt']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=eq_stdin, spam=eq_stdin)),
('readonly', NS(x=None, spam=RFile('readonly'))),
]
class TestFileTypeDefaults(TempDirMixin, ParserTestCase):
"""Test that a file is not created unless the default is needed"""
def setUp(self):
super(TestFileTypeDefaults, self).setUp()
file = open(os.path.join(self.temp_dir, 'good'), 'w', encoding="utf-8")
file.write('good')
file.close()
argument_signatures = [
Sig('-c', type=argparse.FileType('r'), default='no-file.txt'),
]
# should provoke no such file error
failures = ['']
# should not provoke error because default file is created
successes = [('-c good', NS(c=RFile('good')))]
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeRB, self).setUp()
for file_name in ['foo', 'bar']:
with open(os.path.join(self.temp_dir, file_name),
'w', encoding="utf-8") as file:
file.write(file_name)
argument_signatures = [
Sig('-x', type=argparse.FileType('rb')),
Sig('spam', type=argparse.FileType('rb')),
]
failures = ['-x', '']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=eq_bstdin, spam=eq_bstdin)),
]
class WFile(object):
seen = set()
def __init__(self, name):
self.name = name
def __eq__(self, other):
if other not in self.seen:
text = 'Check that file is writable.'
if 'b' in other.mode:
text = text.encode('ascii')
other.write(text)
other.close()
self.seen.add(other)
return self.name == other.name
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeW(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing files"""
def setUp(self):
super().setUp()
self.create_readonly_file('readonly')
self.create_writable_file('writable')
argument_signatures = [
Sig('-x', type=argparse.FileType('w')),
Sig('spam', type=argparse.FileType('w')),
]
failures = ['-x', '', 'readonly']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('writable', NS(x=None, spam=WFile('writable'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=eq_stdout, spam=eq_stdout)),
]
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeX(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing new files only"""
def setUp(self):
super().setUp()
self.create_readonly_file('readonly')
self.create_writable_file('writable')
argument_signatures = [
Sig('-x', type=argparse.FileType('x')),
Sig('spam', type=argparse.FileType('x')),
]
failures = ['-x', '', 'readonly', 'writable']
successes = [
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=eq_stdout, spam=eq_stdout)),
]
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeWB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing binary files"""
argument_signatures = [
Sig('-x', type=argparse.FileType('wb')),
Sig('spam', type=argparse.FileType('wb')),
]
failures = ['-x', '']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=eq_bstdout, spam=eq_bstdout)),
]
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeXB(TestFileTypeX):
"Test the FileType option/argument type for writing new binary files only"
argument_signatures = [
Sig('-x', type=argparse.FileType('xb')),
Sig('spam', type=argparse.FileType('xb')),
]
successes = [
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=eq_bstdout, spam=eq_bstdout)),
]
class TestFileTypeOpenArgs(TestCase):
"""Test that open (the builtin) is correctly called"""
def test_open_args(self):
FT = argparse.FileType
cases = [
(FT('rb'), ('rb', -1, None, None)),
(FT('w', 1), ('w', 1, None, None)),
(FT('w', errors='replace'), ('w', -1, None, 'replace')),
(FT('wb', encoding='big5'), ('wb', -1, 'big5', None)),
(FT('w', 0, 'l1', 'strict'), ('w', 0, 'l1', 'strict')),
]
with mock.patch('builtins.open') as m:
for type, args in cases:
type('foo')
m.assert_called_with('foo', *args)
class TestFileTypeMissingInitialization(TestCase):
"""
Test that add_argument throws an error if FileType class
object was passed instead of instance of FileType
"""
def test(self):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument('-x', type=argparse.FileType)
self.assertEqual(
'%r is a FileType class object, instance of it must be passed'
% (argparse.FileType,),
str(cm.exception)
)
class TestTypeCallable(ParserTestCase):
"""Test some callables as option/argument types"""
argument_signatures = [
Sig('--eggs', type=complex),
Sig('spam', type=float),
]
failures = ['a', '42j', '--eggs a', '--eggs 2i']
successes = [
('--eggs=42 42', NS(eggs=42, spam=42.0)),
('--eggs 2j -- -1.5', NS(eggs=2j, spam=-1.5)),
('1024.675', NS(eggs=None, spam=1024.675)),
]
class TestTypeUserDefined(ParserTestCase):
"""Test a user-defined option/argument type"""
class MyType(TestCase):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=MyType),
Sig('spam', type=MyType),
]
failures = []
successes = [
('a -x b', NS(x=MyType('b'), spam=MyType('a'))),
('-xf g', NS(x=MyType('f'), spam=MyType('g'))),
]
class TestTypeClassicClass(ParserTestCase):
"""Test a classic class type"""
class C:
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=C),
Sig('spam', type=C),
]
failures = []
successes = [
('a -x b', NS(x=C('b'), spam=C('a'))),
('-xf g', NS(x=C('f'), spam=C('g'))),
]
class TestTypeRegistration(TestCase):
"""Test a user-defined type by registering it"""
def test(self):
def get_my_type(string):
return 'my_type{%s}' % string
parser = argparse.ArgumentParser()
parser.register('type', 'my_type', get_my_type)
parser.add_argument('-x', type='my_type')
parser.add_argument('y', type='my_type')
self.assertEqual(parser.parse_args('1'.split()),
NS(x=None, y='my_type{1}'))
self.assertEqual(parser.parse_args('-x 1 42'.split()),
NS(x='my_type{1}', y='my_type{42}'))
# ============
# Action tests
# ============
class TestActionUserDefined(ParserTestCase):
"""Test a user-defined option/argument action"""
class OptionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
# check destination and option string
assert self.dest == 'spam', 'dest: %s' % self.dest
assert option_string == '-s', 'flag: %s' % option_string
# when option is before argument, badger=2, and when
# option is after argument, badger=<whatever was set>
expected_ns = NS(spam=0.25)
if value in [0.125, 0.625]:
expected_ns.badger = 2
elif value in [2.0]:
expected_ns.badger = 84
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError as e:
raise ArgumentParserError('opt_action failed: %s' % e)
setattr(namespace, 'spam', value)
class PositionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
assert option_string is None, ('option_string: %s' %
option_string)
# check destination
assert self.dest == 'badger', 'dest: %s' % self.dest
# when argument is before option, spam=0.25, and when
# option is after argument, spam=<whatever was set>
expected_ns = NS(badger=2)
if value in [42, 84]:
expected_ns.spam = 0.25
elif value in [1]:
expected_ns.spam = 0.625
elif value in [2]:
expected_ns.spam = 0.125
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError as e:
raise ArgumentParserError('arg_action failed: %s' % e)
setattr(namespace, 'badger', value)
argument_signatures = [
Sig('-s', dest='spam', action=OptionalAction,
type=float, default=0.25),
Sig('badger', action=PositionalAction,
type=int, nargs='?', default=2),
]
failures = []
successes = [
('-s0.125', NS(spam=0.125, badger=2)),
('42', NS(spam=0.25, badger=42)),
('-s 0.625 1', NS(spam=0.625, badger=1)),
('84 -s2', NS(spam=2.0, badger=84)),
]
class TestActionRegistration(TestCase):
"""Test a user-defined action supplied by registering it"""
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, 'foo[%s]' % values)
def test(self):
parser = argparse.ArgumentParser()
parser.register('action', 'my_action', self.MyAction)
parser.add_argument('badger', action='my_action')
self.assertEqual(parser.parse_args(['1']), NS(badger='foo[1]'))
self.assertEqual(parser.parse_args(['42']), NS(badger='foo[42]'))
class TestActionExtend(ParserTestCase):
argument_signatures = [
Sig('--foo', action="extend", nargs="+", type=str),
]
failures = ()
successes = [
('--foo f1 --foo f2 f3 f4', NS(foo=['f1', 'f2', 'f3', 'f4'])),
]
# ================
# Subparsers tests
# ================
class TestAddSubparsers(TestCase):
"""Test the add_subparsers method"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def _get_parser(self, subparser_help=False, prefix_chars=None,
aliases=False):
# create a parser with a subparsers argument
if prefix_chars:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description', prefix_chars=prefix_chars)
parser.add_argument(
prefix_chars[0] * 2 + 'foo', action='store_true', help='foo help')
else:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'--foo', action='store_true', help='foo help')
parser.add_argument(
'bar', type=float, help='bar help')
# check that only one subparsers argument can be added
subparsers_kwargs = {'required': False}
if aliases:
subparsers_kwargs['metavar'] = 'COMMAND'
subparsers_kwargs['title'] = 'commands'
else:
subparsers_kwargs['help'] = 'command help'
subparsers = parser.add_subparsers(**subparsers_kwargs)
self.assertArgumentParserError(parser.add_subparsers)
# add first sub-parser
parser1_kwargs = dict(description='1 description')
if subparser_help:
parser1_kwargs['help'] = '1 help'
if aliases:
parser1_kwargs['aliases'] = ['1alias1', '1alias2']
parser1 = subparsers.add_parser('1', **parser1_kwargs)
parser1.add_argument('-w', type=int, help='w help')
parser1.add_argument('x', choices='abc', help='x help')
# add second sub-parser
parser2_kwargs = dict(description='2 description')
if subparser_help:
parser2_kwargs['help'] = '2 help'
parser2 = subparsers.add_parser('2', **parser2_kwargs)
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
# add third sub-parser
parser3_kwargs = dict(description='3 description')
if subparser_help:
parser3_kwargs['help'] = '3 help'
parser3 = subparsers.add_parser('3', **parser3_kwargs)
parser3.add_argument('t', type=int, help='t help')
parser3.add_argument('u', nargs='...', help='u help')
# return the main parser
return parser
def setUp(self):
super().setUp()
self.parser = self._get_parser()
self.command_help_parser = self._get_parser(subparser_help=True)
def test_parse_args_failures(self):
# check some failure cases:
for args_str in ['', 'a', 'a a', '0.5 a', '0.5 1',
'0.5 1 -y', '0.5 2 -w']:
args = args_str.split()
self.assertArgumentParserError(self.parser.parse_args, args)
def test_parse_args(self):
# check some non-failure cases:
self.assertEqual(
self.parser.parse_args('0.5 1 b -w 7'.split()),
NS(foo=False, bar=0.5, w=7, x='b'),
)
self.assertEqual(
self.parser.parse_args('0.25 --foo 2 -y 2 3j -- -1j'.split()),
NS(foo=True, bar=0.25, y='2', z=[3j, -1j]),
)
self.assertEqual(
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
self.assertEqual(
self.parser.parse_args('-1.5 3 11 -- a --foo 7 -- b'.split()),
NS(foo=False, bar=-1.5, t=11, u=['a', '--foo', '7', '--', 'b']),
)
def test_parse_known_args(self):
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), []),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -p 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7 -p'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -q -rs -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-q', '-rs']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -W 1 b -X Y -w 7 Z'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-W', '-X', 'Y', 'Z']),
)
def test_dest(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--foo', action='store_true')
subparsers = parser.add_subparsers(dest='bar')
parser1 = subparsers.add_parser('1')
parser1.add_argument('baz')
self.assertEqual(NS(foo=False, bar='1', baz='2'),
parser.parse_args('1 2'.split()))
def _test_required_subparsers(self, parser):
# Should parse the sub command
ret = parser.parse_args(['run'])
self.assertEqual(ret.command, 'run')
# Error when the command is missing
self.assertArgumentParserError(parser.parse_args, ())
def test_required_subparsers_via_attribute(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
subparsers.add_parser('run')
self._test_required_subparsers(parser)
def test_required_subparsers_via_kwarg(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers(dest='command', required=True)
subparsers.add_parser('run')
self._test_required_subparsers(parser)
def test_required_subparsers_default(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('run')
# No error here
ret = parser.parse_args(())
self.assertIsNone(ret.command)
def test_required_subparsers_no_destination_error(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers(required=True)
subparsers.add_parser('foo')
subparsers.add_parser('bar')
with self.assertRaises(ArgumentParserError) as excinfo:
parser.parse_args(())
self.assertRegex(
excinfo.exception.stderr,
'error: the following arguments are required: {foo,bar}\n$'
)
def test_wrong_argument_subparsers_no_destination_error(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers(required=True)
subparsers.add_parser('foo')
subparsers.add_parser('bar')
with self.assertRaises(ArgumentParserError) as excinfo:
parser.parse_args(('baz',))
self.assertRegex(
excinfo.exception.stderr,
r"error: argument {foo,bar}: invalid choice: 'baz' \(choose from 'foo', 'bar'\)\n$"
)
def test_optional_subparsers(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers(dest='command', required=False)
subparsers.add_parser('run')
# No error here
ret = parser.parse_args(())
self.assertIsNone(ret.command)
def test_help(self):
self.assertEqual(self.parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
options:
-h, --help show this help message and exit
--foo foo help
'''))
def test_help_extra_prefix_chars(self):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [++foo] bar {1,2,3} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [++foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
options:
-h, --help show this help message and exit
++foo foo help
'''))
def test_help_non_breaking_spaces(self):
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
"--non-breaking", action='store_false',
help='help message containing non-breaking spaces shall not '
'wrap\N{NO-BREAK SPACE}at non-breaking spaces')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--non-breaking]
main description
options:
-h, --help show this help message and exit
--non-breaking help message containing non-breaking spaces shall not
wrap\N{NO-BREAK SPACE}at non-breaking spaces
'''))
def test_help_blank(self):
# Issue 24444
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'foo',
help=' ')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] foo
main description
positional arguments:
foo \n
options:
-h, --help show this help message and exit
'''))
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'foo', choices=[],
help='%(choices)s')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] {}
main description
positional arguments:
{} \n
options:
-h, --help show this help message and exit
'''))
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
'usage: PROG [+h] [++foo] bar {1,2,3} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [+h] [++foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
options:
+h, ++help show this help message and exit
++foo foo help
'''))
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
1 1 help
2 2 help
3 3 help
options:
-h, --help show this help message and exit
--foo foo help
'''))
def test_subparser_title_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG',
description='main description')
parser.add_argument('--foo', action='store_true', help='foo help')
parser.add_argument('bar', help='bar help')
subparsers = parser.add_subparsers(title='subcommands',
description='command help',
help='additional text')
parser1 = subparsers.add_parser('1')
parser2 = subparsers.add_parser('2')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
options:
-h, --help show this help message and exit
--foo foo help
subcommands:
command help
{1,2} additional text
'''))
def _test_subparser_help(self, args_str, expected_help):
with self.assertRaises(ArgumentParserError) as cm:
self.parser.parse_args(args_str.split())
self.assertEqual(expected_help, cm.exception.stdout)
def test_subparser1_help(self):
self._test_subparser_help('5.0 1 -h', textwrap.dedent('''\
usage: PROG bar 1 [-h] [-w W] {a,b,c}
1 description
positional arguments:
{a,b,c} x help
options:
-h, --help show this help message and exit
-w W w help
'''))
def test_subparser2_help(self):
self._test_subparser_help('5.0 2 -h', textwrap.dedent('''\
usage: PROG bar 2 [-h] [-y {1,2,3}] [z ...]
2 description
positional arguments:
z z help
options:
-h, --help show this help message and exit
-y {1,2,3} y help
'''))
def test_alias_invocation(self):
parser = self._get_parser(aliases=True)
self.assertEqual(
parser.parse_known_args('0.5 1alias1 b'.split()),
(NS(foo=False, bar=0.5, w=None, x='b'), []),
)
self.assertEqual(
parser.parse_known_args('0.5 1alias2 b'.split()),
(NS(foo=False, bar=0.5, w=None, x='b'), []),
)
def test_error_alias_invocation(self):
parser = self._get_parser(aliases=True)
self.assertArgumentParserError(parser.parse_args,
'0.5 1alias3 b'.split())
def test_alias_help(self):
parser = self._get_parser(aliases=True, subparser_help=True)
self.maxDiff = None
self.assertEqual(parser.format_help(), textwrap.dedent("""\
usage: PROG [-h] [--foo] bar COMMAND ...
main description
positional arguments:
bar bar help
options:
-h, --help show this help message and exit
--foo foo help
commands:
COMMAND
1 (1alias1, 1alias2)
1 help
2 2 help
3 3 help
"""))
# ============
# Groups tests
# ============
class TestPositionalsGroups(TestCase):
"""Tests that order of group positionals matches construction order"""
def test_nongroup_first(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('foo')
group = parser.add_argument_group('g')
group.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_group_first(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
group.add_argument('foo')
parser.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_interleaved_groups(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
parser.add_argument('foo')
group.add_argument('bar')
parser.add_argument('baz')
group = parser.add_argument_group('yyy')
group.add_argument('frell')
expected = NS(foo='1', bar='2', baz='3', frell='4')
result = parser.parse_args('1 2 3 4'.split())
self.assertEqual(expected, result)
# ===================
# Parent parser tests
# ===================
class TestParentParsers(TestCase):
"""Tests that parsers can be created with parent parsers"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def setUp(self):
super().setUp()
self.wxyz_parent = ErrorRaisingArgumentParser(add_help=False)
self.wxyz_parent.add_argument('--w')
x_group = self.wxyz_parent.add_argument_group('x')
x_group.add_argument('-y')
self.wxyz_parent.add_argument('z')
self.abcd_parent = ErrorRaisingArgumentParser(add_help=False)
self.abcd_parent.add_argument('a')
self.abcd_parent.add_argument('-b')
c_group = self.abcd_parent.add_argument_group('c')
c_group.add_argument('--d')
self.w_parent = ErrorRaisingArgumentParser(add_help=False)
self.w_parent.add_argument('--w')
self.z_parent = ErrorRaisingArgumentParser(add_help=False)
self.z_parent.add_argument('z')
# parents with mutually exclusive groups
self.ab_mutex_parent = ErrorRaisingArgumentParser(add_help=False)
group = self.ab_mutex_parent.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
self.main_program = os.path.basename(sys.argv[0])
def test_single_parent(self):
parser = ErrorRaisingArgumentParser(parents=[self.wxyz_parent])
self.assertEqual(parser.parse_args('-y 1 2 --w 3'.split()),
NS(w='3', y='1', z='2'))
def test_single_parent_mutex(self):
self._test_mutex_ab(self.ab_mutex_parent.parse_args)
parser = ErrorRaisingArgumentParser(parents=[self.ab_mutex_parent])
self._test_mutex_ab(parser.parse_args)
def test_single_granparent_mutex(self):
parents = [self.ab_mutex_parent]
parser = ErrorRaisingArgumentParser(add_help=False, parents=parents)
parser = ErrorRaisingArgumentParser(parents=[parser])
self._test_mutex_ab(parser.parse_args)
def _test_mutex_ab(self, parse_args):
self.assertEqual(parse_args([]), NS(a=False, b=False))
self.assertEqual(parse_args(['-a']), NS(a=True, b=False))
self.assertEqual(parse_args(['-b']), NS(a=False, b=True))
self.assertArgumentParserError(parse_args, ['-a', '-b'])
self.assertArgumentParserError(parse_args, ['-b', '-a'])
self.assertArgumentParserError(parse_args, ['-c'])
self.assertArgumentParserError(parse_args, ['-a', '-c'])
self.assertArgumentParserError(parse_args, ['-b', '-c'])
def test_multiple_parents(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('--d 1 --w 2 3 4'.split()),
NS(a='3', b=None, d='1', w='2', y=None, z='4'))
def test_multiple_parents_mutex(self):
parents = [self.ab_mutex_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('-a --w 2 3'.split()),
NS(a=True, b=False, w='2', y=None, z='3'))
self.assertArgumentParserError(
parser.parse_args, '-a --w 2 3 -b'.split())
self.assertArgumentParserError(
parser.parse_args, '-a -b --w 2 3'.split())
def test_conflicting_parents(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.w_parent, self.wxyz_parent])
def test_conflicting_parents_mutex(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.abcd_parent, self.ab_mutex_parent])
def test_same_argument_name_parents(self):
parents = [self.wxyz_parent, self.z_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('1 2'.split()),
NS(w=None, y=None, z='2'))
def test_subparser_parents(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
abcde_parser = subparsers.add_parser('bar', parents=[self.abcd_parent])
abcde_parser.add_argument('e')
self.assertEqual(parser.parse_args('bar -b 1 --d 2 3 4'.split()),
NS(a='3', b='1', d='2', e='4'))
def test_subparser_parents_mutex(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
parents = [self.ab_mutex_parent]
abc_parser = subparsers.add_parser('foo', parents=parents)
c_group = abc_parser.add_argument_group('c_group')
c_group.add_argument('c')
parents = [self.wxyz_parent, self.ab_mutex_parent]
wxyzabe_parser = subparsers.add_parser('bar', parents=parents)
wxyzabe_parser.add_argument('e')
self.assertEqual(parser.parse_args('foo -a 4'.split()),
NS(a=True, b=False, c='4'))
self.assertEqual(parser.parse_args('bar -b --w 2 3 4'.split()),
NS(a=False, b=True, w='2', y=None, z='3', e='4'))
self.assertArgumentParserError(
parser.parse_args, 'foo -a -b 4'.split())
self.assertArgumentParserError(
parser.parse_args, 'bar -b -a 4'.split())
def test_parent_help(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-b B] [--d D] [--w W] [-y Y] a z
positional arguments:
a
z
options:
-h, --help show this help message and exit
-b B
--w W
c:
--d D
x:
-y Y
'''.format(progname, ' ' if progname else '' )))
def test_groups_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
g = parent.add_argument_group(title='g', description='gd')
g.add_argument('-w')
g.add_argument('-x')
m = parent.add_mutually_exclusive_group()
m.add_argument('-y')
m.add_argument('-z')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertRaises(ArgumentParserError, parser.parse_args,
['-y', 'Y', '-z', 'Z'])
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-w W] [-x X] [-y Y | -z Z]
options:
-h, --help show this help message and exit
-y Y
-z Z
g:
gd
-w W
-x X
'''.format(progname, ' ' if progname else '' )))
# ==============================
# Mutually exclusive group tests
# ==============================
class TestMutuallyExclusiveGroupErrors(TestCase):
def test_invalid_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
raises = self.assertRaises
raises(TypeError, parser.add_mutually_exclusive_group, title='foo')
def test_invalid_add_argument(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_mutually_exclusive_group()
add_argument = group.add_argument
raises = self.assertRaises
raises(ValueError, add_argument, '--foo', required=True)
raises(ValueError, add_argument, 'bar')
raises(ValueError, add_argument, 'bar', nargs='+')
raises(ValueError, add_argument, 'bar', nargs=1)
raises(ValueError, add_argument, 'bar', nargs=argparse.PARSER)
def test_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--foo', action='store_true')
group1.add_argument('--bar', action='store_false')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--soup', action='store_true')
group2.add_argument('--nuts', action='store_false')
expected = '''\
usage: PROG [-h] [--foo | --bar] [--soup | --nuts]
options:
-h, --help show this help message and exit
--foo
--bar
--soup
--nuts
'''
self.assertEqual(parser.format_help(), textwrap.dedent(expected))
def test_empty_group(self):
# See issue 26952
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
with self.assertRaises(ValueError):
parser.parse_args(['-h'])
class MEMixin(object):
def test_failures_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
error = ArgumentParserError
for args_string in self.failures:
self.assertRaises(error, parse_args, args_string.split())
def test_failures_when_required(self):
parse_args = self.get_parser(required=True).parse_args
error = ArgumentParserError
for args_string in self.failures + ['']:
self.assertRaises(error, parse_args, args_string.split())
def test_successes_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
successes = self.successes + self.successes_when_not_required
for args_string, expected_ns in successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_successes_when_required(self):
parse_args = self.get_parser(required=True).parse_args
for args_string, expected_ns in self.successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_usage_when_not_required(self):
format_usage = self.get_parser(required=False).format_usage
expected_usage = self.usage_when_not_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_usage_when_required(self):
format_usage = self.get_parser(required=True).format_usage
expected_usage = self.usage_when_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_help_when_not_required(self):
format_help = self.get_parser(required=False).format_help
help = self.usage_when_not_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
def test_help_when_required(self):
format_help = self.get_parser(required=True).format_help
help = self.usage_when_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
class TestMutuallyExclusiveSimple(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--bar', help='bar help')
group.add_argument('--baz', nargs='?', const='Z', help='baz help')
return parser
failures = ['--bar X --baz Y', '--bar X --baz']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--bar X --bar Z', NS(bar='Z', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
('--baz', NS(bar=None, baz='Z')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz [BAZ]]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz [BAZ])
'''
help = '''\
options:
-h, --help show this help message and exit
--bar BAR bar help
--baz [BAZ] baz help
'''
class TestMutuallyExclusiveLong(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('--abcde', help='abcde help')
parser.add_argument('--fghij', help='fghij help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--klmno', help='klmno help')
group.add_argument('--pqrst', help='pqrst help')
return parser
failures = ['--klmno X --pqrst Y']
successes = [
('--klmno X', NS(abcde=None, fghij=None, klmno='X', pqrst=None)),
('--abcde Y --klmno X',
NS(abcde='Y', fghij=None, klmno='X', pqrst=None)),
('--pqrst X', NS(abcde=None, fghij=None, klmno=None, pqrst='X')),
('--pqrst X --fghij Y',
NS(abcde=None, fghij='Y', klmno=None, pqrst='X')),
]
successes_when_not_required = [
('', NS(abcde=None, fghij=None, klmno=None, pqrst=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
[--klmno KLMNO | --pqrst PQRST]
'''
usage_when_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
(--klmno KLMNO | --pqrst PQRST)
'''
help = '''\
options:
-h, --help show this help message and exit
--abcde ABCDE abcde help
--fghij FGHIJ fghij help
--klmno KLMNO klmno help
--pqrst PQRST pqrst help
'''
class TestMutuallyExclusiveFirstSuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-x', help=argparse.SUPPRESS)
group.add_argument('-y', action='store_false', help='y help')
return parser
failures = ['-x X -y']
successes = [
('-x X', NS(x='X', y=True)),
('-x X -x Y', NS(x='Y', y=True)),
('-y', NS(x=None, y=False)),
]
successes_when_not_required = [
('', NS(x=None, y=True)),
]
usage_when_not_required = '''\
usage: PROG [-h] [-y]
'''
usage_when_required = '''\
usage: PROG [-h] -y
'''
help = '''\
options:
-h, --help show this help message and exit
-y y help
'''
class TestMutuallyExclusiveManySuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
add = group.add_argument
add('--spam', action='store_true', help=argparse.SUPPRESS)
add('--badger', action='store_false', help=argparse.SUPPRESS)
add('--bladder', help=argparse.SUPPRESS)
return parser
failures = [
'--spam --badger',
'--badger --bladder B',
'--bladder B --spam',
]
successes = [
('--spam', NS(spam=True, badger=True, bladder=None)),
('--badger', NS(spam=False, badger=False, bladder=None)),
('--bladder B', NS(spam=False, badger=True, bladder='B')),
('--spam --spam', NS(spam=True, badger=True, bladder=None)),
]
successes_when_not_required = [
('', NS(spam=False, badger=True, bladder=None)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h]
'''
help = '''\
options:
-h, --help show this help message and exit
'''
class TestMutuallyExclusiveOptionalAndPositional(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
return parser
failures = [
'--foo --spam S',
'--spam S X',
'X --foo',
'X Y Z --spam S',
'--foo X Y',
]
successes = [
('--foo', NS(foo=True, spam=None, badger='X')),
('--spam S', NS(foo=False, spam='S', badger='X')),
('X', NS(foo=False, spam=None, badger=['X'])),
('X Y Z', NS(foo=False, spam=None, badger=['X', 'Y', 'Z'])),
]
successes_when_not_required = [
('', NS(foo=False, spam=None, badger='X')),
]
usage_when_not_required = '''\
usage: PROG [-h] [--foo | --spam SPAM | badger ...]
'''
usage_when_required = '''\
usage: PROG [-h] (--foo | --spam SPAM | badger ...)
'''
help = '''\
positional arguments:
badger BADGER
options:
-h, --help show this help message and exit
--foo FOO
--spam SPAM SPAM
'''
class TestMutuallyExclusiveOptionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-x', action='store_true', help='x help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a', action='store_true', help='a help')
group.add_argument('-b', action='store_true', help='b help')
parser.add_argument('-y', action='store_true', help='y help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['-a -b', '-b -c', '-a -c', '-a -b -c']
successes = [
('-a', NS(a=True, b=False, c=False, x=False, y=False)),
('-b', NS(a=False, b=True, c=False, x=False, y=False)),
('-c', NS(a=False, b=False, c=True, x=False, y=False)),
('-a -x', NS(a=True, b=False, c=False, x=True, y=False)),
('-y -b', NS(a=False, b=True, c=False, x=False, y=True)),
('-x -y -c', NS(a=False, b=False, c=True, x=True, y=True)),
]
successes_when_not_required = [
('', NS(a=False, b=False, c=False, x=False, y=False)),
('-x', NS(a=False, b=False, c=False, x=True, y=False)),
('-y', NS(a=False, b=False, c=False, x=False, y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-x] [-a] [-b] [-y] [-c]
'''
help = '''\
options:
-h, --help show this help message and exit
-x x help
-a a help
-b b help
-y y help
-c c help
'''
class TestMutuallyExclusiveInGroup(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
titled_group = parser.add_argument_group(
title='Titled group', description='Group description')
mutex_group = \
titled_group.add_mutually_exclusive_group(required=required)
mutex_group.add_argument('--bar', help='bar help')
mutex_group.add_argument('--baz', help='baz help')
return parser
failures = ['--bar X --baz Y', '--baz X --bar Y']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz BAZ]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz BAZ)
'''
help = '''\
options:
-h, --help show this help message and exit
Titled group:
Group description
--bar BAR bar help
--baz BAZ baz help
'''
class TestMutuallyExclusiveOptionalsAndPositionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('x', help='x help')
parser.add_argument('-y', action='store_true', help='y help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('a', nargs='?', help='a help')
group.add_argument('-b', action='store_true', help='b help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['X A -b', '-b -c', '-c X A']
successes = [
('X A', NS(a='A', b=False, c=False, x='X', y=False)),
('X -b', NS(a=None, b=True, c=False, x='X', y=False)),
('X -c', NS(a=None, b=False, c=True, x='X', y=False)),
('X A -y', NS(a='A', b=False, c=False, x='X', y=True)),
('X -y -b', NS(a=None, b=True, c=False, x='X', y=True)),
]
successes_when_not_required = [
('X', NS(a=None, b=False, c=False, x='X', y=False)),
('X -y', NS(a=None, b=False, c=False, x='X', y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-y] [-b] [-c] x [a]
'''
help = '''\
positional arguments:
x x help
a a help
options:
-h, --help show this help message and exit
-y y help
-b b help
-c c help
'''
class TestMutuallyExclusiveNested(MEMixin, TestCase):
# Nesting mutually exclusive groups is an undocumented feature
# that came about by accident through inheritance and has been
# the source of many bugs. It is deprecated and this test should
# eventually be removed along with it.
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a')
group.add_argument('-b')
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
group2 = group.add_mutually_exclusive_group(required=required)
group2.add_argument('-c')
group2.add_argument('-d')
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
group3 = group2.add_mutually_exclusive_group(required=required)
group3.add_argument('-e')
group3.add_argument('-f')
return parser
usage_when_not_required = '''\
usage: PROG [-h] [-a A | -b B | [-c C | -d D | [-e E | -f F]]]
'''
usage_when_required = '''\
usage: PROG [-h] (-a A | -b B | (-c C | -d D | (-e E | -f F)))
'''
help = '''\
options:
-h, --help show this help message and exit
-a A
-b B
-c C
-d D
-e E
-f F
'''
# We are only interested in testing the behavior of format_usage().
test_failures_when_not_required = None
test_failures_when_required = None
test_successes_when_not_required = None
test_successes_when_required = None
# =================================================
# Mutually exclusive group in parent parser tests
# =================================================
class MEPBase(object):
def get_parser(self, required=None):
parent = super(MEPBase, self).get_parser(required=required)
parser = ErrorRaisingArgumentParser(
prog=parent.prog, add_help=False, parents=[parent])
return parser
class TestMutuallyExclusiveGroupErrorsParent(
MEPBase, TestMutuallyExclusiveGroupErrors):
pass
class TestMutuallyExclusiveSimpleParent(
MEPBase, TestMutuallyExclusiveSimple):
pass
class TestMutuallyExclusiveLongParent(
MEPBase, TestMutuallyExclusiveLong):
pass
class TestMutuallyExclusiveFirstSuppressedParent(
MEPBase, TestMutuallyExclusiveFirstSuppressed):
pass
class TestMutuallyExclusiveManySuppressedParent(
MEPBase, TestMutuallyExclusiveManySuppressed):
pass
class TestMutuallyExclusiveOptionalAndPositionalParent(
MEPBase, TestMutuallyExclusiveOptionalAndPositional):
pass
class TestMutuallyExclusiveOptionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsMixed):
pass
class TestMutuallyExclusiveOptionalsAndPositionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsAndPositionalsMixed):
pass
# =================
# Set default tests
# =================
class TestSetDefaults(TestCase):
def test_set_defaults_no_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
parser.set_defaults(y='bar', z=1)
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([]))
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar', z=1),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='baz', y='bar', z=2),
parser.parse_args([], NS(x='baz', z=2)))
def test_set_defaults_with_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo', y='bar')
parser.add_argument('-x', default='xfoox')
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([]))
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar'),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS(x='baz')))
def test_set_defaults_subparsers(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser('a')
parser_a.set_defaults(y='bar')
self.assertEqual(NS(x='foo', y='bar'),
parser.parse_args('a'.split()))
def test_set_defaults_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
parent.set_defaults(x='foo')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertEqual(NS(x='foo'), parser.parse_args([]))
def test_set_defaults_on_parent_and_subparser(self):
parser = argparse.ArgumentParser()
xparser = parser.add_subparsers().add_parser('X')
parser.set_defaults(foo=1)
xparser.set_defaults(foo=2)
self.assertEqual(NS(foo=2), parser.parse_args(['X']))
def test_set_defaults_same_as_add_argument(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
parser.add_argument('-w')
parser.add_argument('-x', default='XX')
parser.add_argument('y', nargs='?')
parser.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
def test_set_defaults_same_as_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
group = parser.add_argument_group('foo')
group.add_argument('-w')
group.add_argument('-x', default='XX')
group.add_argument('y', nargs='?')
group.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
# =================
# Get default tests
# =================
class TestGetDefault(TestCase):
def test_get_default(self):
parser = ErrorRaisingArgumentParser()
self.assertIsNone(parser.get_default("foo"))
self.assertIsNone(parser.get_default("bar"))
parser.add_argument("--foo")
self.assertIsNone(parser.get_default("foo"))
self.assertIsNone(parser.get_default("bar"))
parser.add_argument("--bar", type=int, default=42)
self.assertIsNone(parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
parser.set_defaults(foo="badger")
self.assertEqual("badger", parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
# ==========================
# Namespace 'contains' tests
# ==========================
class TestNamespaceContainsSimple(TestCase):
def test_empty(self):
ns = argparse.Namespace()
self.assertNotIn('', ns)
self.assertNotIn('x', ns)
def test_non_empty(self):
ns = argparse.Namespace(x=1, y=2)
self.assertNotIn('', ns)
self.assertIn('x', ns)
self.assertIn('y', ns)
self.assertNotIn('xx', ns)
self.assertNotIn('z', ns)
# =====================
# Help formatting tests
# =====================
class TestHelpFormattingMetaclass(type):
def __init__(cls, name, bases, bodydict):
if name == 'HelpTestCase':
return
class AddTests(object):
def __init__(self, test_class, func_suffix, std_name):
self.func_suffix = func_suffix
self.std_name = std_name
for test_func in [self.test_format,
self.test_print,
self.test_print_file]:
test_name = '%s_%s' % (test_func.__name__, func_suffix)
def test_wrapper(self, test_func=test_func):
test_func(self)
try:
test_wrapper.__name__ = test_name
except TypeError:
pass
setattr(test_class, test_name, test_wrapper)
def _get_parser(self, tester):
parser = argparse.ArgumentParser(
*tester.parser_signature.args,
**tester.parser_signature.kwargs)
for argument_sig in getattr(tester, 'argument_signatures', []):
parser.add_argument(*argument_sig.args,
**argument_sig.kwargs)
group_sigs = getattr(tester, 'argument_group_signatures', [])
for group_sig, argument_sigs in group_sigs:
group = parser.add_argument_group(*group_sig.args,
**group_sig.kwargs)
for argument_sig in argument_sigs:
group.add_argument(*argument_sig.args,
**argument_sig.kwargs)
subparsers_sigs = getattr(tester, 'subparsers_signatures', [])
if subparsers_sigs:
subparsers = parser.add_subparsers()
for subparser_sig in subparsers_sigs:
subparsers.add_parser(*subparser_sig.args,
**subparser_sig.kwargs)
return parser
def _test(self, tester, parser_text):
expected_text = getattr(tester, self.func_suffix)
expected_text = textwrap.dedent(expected_text)
tester.assertEqual(expected_text, parser_text)
def test_format(self, tester):
parser = self._get_parser(tester)
format = getattr(parser, 'format_%s' % self.func_suffix)
self._test(tester, format())
def test_print(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
old_stream = getattr(sys, self.std_name)
setattr(sys, self.std_name, StdIOBuffer())
try:
print_()
parser_text = getattr(sys, self.std_name).getvalue()
finally:
setattr(sys, self.std_name, old_stream)
self._test(tester, parser_text)
def test_print_file(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
sfile = StdIOBuffer()
print_(sfile)
parser_text = sfile.getvalue()
self._test(tester, parser_text)
# add tests for {format,print}_{usage,help}
for func_suffix, std_name in [('usage', 'stdout'),
('help', 'stdout')]:
AddTests(cls, func_suffix, std_name)
bases = TestCase,
HelpTestCase = TestHelpFormattingMetaclass('HelpTestCase', bases, {})
class TestHelpBiggerOptionals(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG')
argument_signatures = [
Sig('-v', '--version', action='version', version='0.1'),
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] foo bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
EPILOG
'''
version = '''\
0.1
'''
class TestShortColumns(HelpTestCase):
'''Test extremely small number of columns.
TestCase prevents "COLUMNS" from being too small in the tests themselves,
but we don't want any exceptions thrown in such cases. Only ugly representation.
'''
def setUp(self):
env = os_helper.EnvironmentVarGuard()
env.set("COLUMNS", '15')
self.addCleanup(env.__exit__)
parser_signature = TestHelpBiggerOptionals.parser_signature
argument_signatures = TestHelpBiggerOptionals.argument_signatures
argument_group_signatures = TestHelpBiggerOptionals.argument_group_signatures
usage = '''\
usage: PROG
[-h]
[-v]
[-x]
[--y Y]
foo
bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo
FOO HELP
bar
BAR HELP
options:
-h, --help
show this
help
message and
exit
-v, --version
show
program's
version
number and
exit
-x
X HELP
--y Y
Y HELP
EPILOG
'''
version = TestHelpBiggerOptionals.version
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG')
argument_signatures = [
Sig('-v', '--version', action='version', version='0.1'),
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = [
(Sig('GROUP TITLE', description='GROUP DESCRIPTION'), [
Sig('baz', help='BAZ HELP'),
Sig('-z', nargs='+', help='Z HELP')]),
]
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] [-z Z [Z ...]] foo bar baz
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
GROUP TITLE:
GROUP DESCRIPTION
baz BAZ HELP
-z Z [Z ...] Z HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerPositionals(HelpTestCase):
"""Make sure that help aligns when arguments are longer"""
parser_signature = Sig(usage='USAGE', description='DESCRIPTION')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('ekiekiekifekang', help='EKI HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: USAGE
'''
help = usage + '''\
DESCRIPTION
positional arguments:
ekiekiekifekang EKI HELP
bar BAR HELP
options:
-h, --help show this help message and exit
-x X HELP
--y Y Y HELP
'''
version = ''
class TestHelpReformatting(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(
prog='PROG',
description=' oddly formatted\n'
'description\n'
'\n'
'that is so long that it should go onto multiple '
'lines when wrapped')
argument_signatures = [
Sig('-x', metavar='XX', help='oddly\n'
' formatted -x help'),
Sig('y', metavar='yyy', help='normal y help'),
]
argument_group_signatures = [
(Sig('title', description='\n'
' oddly formatted group\n'
'\n'
'description'),
[Sig('-a', action='store_true',
help=' oddly \n'
'formatted -a help \n'
' again, so long that it should be wrapped over '
'multiple lines')]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
oddly formatted description that is so long that it should go onto \
multiple
lines when wrapped
positional arguments:
yyy normal y help
options:
-h, --help show this help message and exit
-x XX oddly formatted -x help
title:
oddly formatted group description
-a oddly formatted -a help again, so long that it should \
be wrapped
over multiple lines
'''
version = ''
class TestHelpWrappingShortNames(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(prog='PROG', description= 'D\nD' * 30)
argument_signatures = [
Sig('-x', metavar='XX', help='XHH HX' * 20),
Sig('y', metavar='yyy', help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', action='store_true', help='AHHH HHA' * 10)]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyy YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
options:
-h, --help show this help message and exit
-x XX XHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH \
HXXHH HXXHH
HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HX
ALPHAS:
-a AHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH \
HHAAHHH
HHAAHHH HHAAHHH HHA
'''
version = ''
class TestHelpWrappingLongNames(HelpTestCase):
"""Make sure that text after long names starts on the next line"""
parser_signature = Sig(usage='USAGE', description= 'D D' * 30)
argument_signatures = [
Sig('-v', '--version', action='version', version='V V' * 30),
Sig('-x', metavar='X' * 25, help='XH XH' * 20),
Sig('y', metavar='y' * 25, help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', metavar='A' * 25, help='AH AH' * 20),
Sig('z', metavar='z' * 25, help='ZH ZH' * 20)]),
]
usage = '''\
usage: USAGE
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyyyyyyyyyyyyyyyyyyyyyyyy
YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
XH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH \
XHXH XHXH
XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XH
ALPHAS:
-a AAAAAAAAAAAAAAAAAAAAAAAAA
AH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH \
AHAH AHAH
AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AH
zzzzzzzzzzzzzzzzzzzzzzzzz
ZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH \
ZHZH ZHZH
ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZH
'''
version = '''\
V VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV \
VV VV VV
VV VV VV VV V
'''
class TestHelpUsage(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', nargs='+', help='w'),
Sig('-x', nargs='*', help='x'),
Sig('a', help='a'),
Sig('b', help='b', nargs=2),
Sig('c', help='c', nargs='?'),
Sig('--foo', help='Whether to foo', action=argparse.BooleanOptionalAction),
Sig('--bar', help='Whether to bar', default=True,
action=argparse.BooleanOptionalAction),
Sig('-f', '--foobar', '--barfoo', action=argparse.BooleanOptionalAction),
Sig('--bazz', action=argparse.BooleanOptionalAction,
default=argparse.SUPPRESS, help='Bazz!'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-y', nargs='?', help='y'),
Sig('-z', nargs=3, help='z'),
Sig('d', help='d', nargs='*'),
Sig('e', help='e', nargs='+'),
])
]
usage = '''\
usage: PROG [-h] [-w W [W ...]] [-x [X ...]] [--foo | --no-foo]
[--bar | --no-bar]
[-f | --foobar | --no-foobar | --barfoo | --no-barfoo]
[--bazz | --no-bazz] [-y [Y]] [-z Z Z Z]
a b b [c] [d ...] e [e ...]
'''
help = usage + '''\
positional arguments:
a a
b b
c c
options:
-h, --help show this help message and exit
-w W [W ...] w
-x [X ...] x
--foo, --no-foo Whether to foo
--bar, --no-bar Whether to bar (default: True)
-f, --foobar, --no-foobar, --barfoo, --no-barfoo
--bazz, --no-bazz Bazz!
group:
-y [Y] y
-z Z Z Z z
d d
e e
'''
version = ''
class TestHelpOnlyUserGroups(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = [
(Sig('xxxx'), [
Sig('-x', help='x'),
Sig('a', help='a'),
]),
(Sig('yyyy'), [
Sig('b', help='b'),
Sig('-y', help='y'),
]),
]
usage = '''\
usage: PROG [-x X] [-y Y] a b
'''
help = usage + '''\
xxxx:
-x X x
a a
yyyy:
b b
-y Y y
'''
version = ''
class TestHelpUsageLongProg(HelpTestCase):
"""Test usage messages where the prog is long"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W'),
Sig('-x', metavar='X'),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w W] [-x X] a b
'''
help = usage + '''\
positional arguments:
a
b
options:
-h, --help show this help message and exit
-w W
-x X
'''
version = ''
class TestHelpUsageLongProgOptionsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the optionals wrap"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] [-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b
'''
help = usage + '''\
positional arguments:
a
b
options:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageLongProgPositionalsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the positionals wrap"""
parser_signature = Sig(prog='P' * 60, add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpUsageOptionalsWrap(HelpTestCase):
"""Test usage messages where the optionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
Sig('c'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] \
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b c
'''
help = usage + '''\
positional arguments:
a
b
c
options:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsWrap(HelpTestCase):
"""Test usage messages where the positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x'),
Sig('-y'),
Sig('-z'),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x X] [-y Y] [-z Z]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
options:
-h, --help show this help message and exit
-x X
-y Y
-z Z
'''
version = ''
class TestHelpUsageOptionalsPositionalsWrap(HelpTestCase):
"""Test usage messages where the optionals and positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
options:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageOptionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only optionals and they wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
'''
help = usage + '''\
options:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only positionals and they wrap"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpVariableExpansion(HelpTestCase):
"""Test that variables are expanded properly in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', type=int,
help='x %(prog)s %(default)s %(type)s %%'),
Sig('-y', action='store_const', default=42, const='XXX',
help='y %(prog)s %(default)s %(const)s'),
Sig('--foo', choices='abc',
help='foo %(prog)s %(default)s %(choices)s'),
Sig('--bar', default='baz', choices=[1, 2], metavar='BBB',
help='bar %(prog)s %(default)s %(dest)s'),
Sig('spam', help='spam %(prog)s %(default)s'),
Sig('badger', default=0.5, help='badger %(prog)s %(default)s'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-a', help='a %(prog)s %(default)s'),
Sig('-b', default=-1, help='b %(prog)s %(default)s'),
])
]
usage = ('''\
usage: PROG [-h] [-x X] [-y] [--foo {a,b,c}] [--bar BBB] [-a A] [-b B]
spam badger
''')
help = usage + '''\
positional arguments:
spam spam PROG None
badger badger PROG 0.5
options:
-h, --help show this help message and exit
-x X x PROG None int %
-y y PROG 42 XXX
--foo {a,b,c} foo PROG None a, b, c
--bar BBB bar PROG baz bar
group:
-a A a PROG None
-b B b PROG -1
'''
version = ''
class TestHelpVariableExpansionUsageSupplied(HelpTestCase):
"""Test that variables are expanded properly when usage= is present"""
parser_signature = Sig(prog='PROG', usage='%(prog)s FOO')
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG FOO
''')
help = usage + '''\
options:
-h, --help show this help message and exit
'''
version = ''
class TestHelpVariableExpansionNoArguments(HelpTestCase):
"""Test that variables are expanded properly with no arguments"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG
''')
help = usage
version = ''
class TestHelpSuppressUsage(HelpTestCase):
"""Test that items can be suppressed in usage messages"""
parser_signature = Sig(prog='PROG', usage=argparse.SUPPRESS)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
help = '''\
positional arguments:
spam spam help
options:
-h, --help show this help message and exit
--foo FOO foo help
'''
usage = ''
version = ''
class TestHelpSuppressOptional(HelpTestCase):
"""Test that optional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help=argparse.SUPPRESS),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG spam
'''
help = usage + '''\
positional arguments:
spam spam help
'''
version = ''
class TestHelpSuppressOptionalGroup(HelpTestCase):
"""Test that optional groups can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('group'), [Sig('--bar', help=argparse.SUPPRESS)]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
options:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpSuppressPositional(HelpTestCase):
"""Test that positional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help=argparse.SUPPRESS),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO]
'''
help = usage + '''\
options:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpRequiredOptional(HelpTestCase):
"""Test that required options don't look optional"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', required=True, help='foo help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] --foo FOO
'''
help = usage + '''\
options:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpAlternatePrefixChars(HelpTestCase):
"""Test that options display with different prefix characters"""
parser_signature = Sig(prog='PROG', prefix_chars='^;', add_help=False)
argument_signatures = [
Sig('^^foo', action='store_true', help='foo help'),
Sig(';b', ';;bar', help='bar help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [^^foo] [;b BAR]
'''
help = usage + '''\
options:
^^foo foo help
;b BAR, ;;bar BAR bar help
'''
version = ''
class TestHelpNoHelpOptional(HelpTestCase):
"""Test that the --help argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
options:
--foo FOO foo help
'''
version = ''
class TestHelpNone(HelpTestCase):
"""Test that no errors occur if no help is specified"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo'),
Sig('spam'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam
options:
-h, --help show this help message and exit
--foo FOO
'''
version = ''
class TestHelpTupleMetavar(HelpTestCase):
"""Test specifying metavar as a tuple"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', help='w', nargs='+', metavar=('W1', 'W2')),
Sig('-x', help='x', nargs='*', metavar=('X1', 'X2')),
Sig('-y', help='y', nargs=3, metavar=('Y1', 'Y2', 'Y3')),
Sig('-z', help='z', nargs='?', metavar=('Z1', )),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w W1 [W2 ...]] [-x [X1 [X2 ...]]] [-y Y1 Y2 Y3] \
[-z [Z1]]
'''
help = usage + '''\
options:
-h, --help show this help message and exit
-w W1 [W2 ...] w
-x [X1 [X2 ...]] x
-y Y1 Y2 Y3 y
-z [Z1] z
'''
version = ''
class TestHelpRawText(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawTextHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should also\n'
'appear as given here'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
options:
-h, --help show this help message and exit
--foo FOO foo help should also
appear as given here
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpRawDescription(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should not\n'
' retain this odd formatting'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
options:
-h, --help show this help message and exit
--foo FOO foo help should not retain this odd formatting
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpArgumentDefaults(HelpTestCase):
"""Test the ArgumentDefaultsHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='description')
argument_signatures = [
Sig('--foo', help='foo help - oh and by the way, %(default)s'),
Sig('--bar', action='store_true', help='bar help'),
Sig('--taz', action=argparse.BooleanOptionalAction,
help='Whether to taz it', default=True),
Sig('--quux', help="Set the quux", default=42),
Sig('spam', help='spam help'),
Sig('badger', nargs='?', default='wooden', help='badger help'),
]
argument_group_signatures = [
(Sig('title', description='description'),
[Sig('--baz', type=int, default=42, help='baz help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar] [--taz | --no-taz] [--quux QUUX]
[--baz BAZ]
spam [badger]
'''
help = usage + '''\
description
positional arguments:
spam spam help
badger badger help (default: wooden)
options:
-h, --help show this help message and exit
--foo FOO foo help - oh and by the way, None
--bar bar help (default: False)
--taz, --no-taz Whether to taz it (default: True)
--quux QUUX Set the quux (default: 42)
title:
description
--baz BAZ baz help (default: 42)
'''
version = ''
class TestHelpVersionAction(HelpTestCase):
"""Test the default help for the version action"""
parser_signature = Sig(prog='PROG', description='description')
argument_signatures = [Sig('-V', '--version', action='version', version='3.6')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-V]
'''
help = usage + '''\
description
options:
-h, --help show this help message and exit
-V, --version show program's version number and exit
'''
version = ''
class TestHelpVersionActionSuppress(HelpTestCase):
"""Test that the --version argument can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-v', '--version', action='version', version='1.0',
help=argparse.SUPPRESS),
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
options:
-h, --help show this help message and exit
--foo FOO foo help
'''
class TestHelpSubparsersOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands')
argument_signatures = [Sig('-v', '--version', action='version', version='0.1')]
subparsers_signatures = [Sig(name=name)
for name in ('a', 'b', 'c', 'd', 'e')]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
class TestHelpSubparsersWithHelpOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands')
argument_signatures = [Sig('-v', '--version', action='version', version='0.1')]
subcommand_data = (('a', 'a subcommand help'),
('b', 'b subcommand help'),
('c', 'c subcommand help'),
('d', 'd subcommand help'),
('e', 'e subcommand help'),
)
subparsers_signatures = [Sig(name=name, help=help)
for name, help in subcommand_data]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
a a subcommand help
b b subcommand help
c c subcommand help
d d subcommand help
e e subcommand help
options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
class TestHelpMetavarTypeFormatter(HelpTestCase):
def custom_type(string):
return string
parser_signature = Sig(prog='PROG', description='description',
formatter_class=argparse.MetavarTypeHelpFormatter)
argument_signatures = [Sig('a', type=int),
Sig('-b', type=custom_type),
Sig('-c', type=float, metavar='SOME FLOAT')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-b custom_type] [-c SOME FLOAT] int
'''
help = usage + '''\
description
positional arguments:
int
options:
-h, --help show this help message and exit
-b custom_type
-c SOME FLOAT
'''
version = ''
# =====================================
# Optional/Positional constructor tests
# =====================================
class TestInvalidArgumentConstructors(TestCase):
"""Test a bunch of invalid Argument constructors"""
def assertTypeError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(TypeError, parser.add_argument,
*args, **kwargs)
def assertValueError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(ValueError, parser.add_argument,
*args, **kwargs)
def test_invalid_keyword_arguments(self):
self.assertTypeError('-x', bar=None)
self.assertTypeError('-y', callback='foo')
self.assertTypeError('-y', callback_args=())
self.assertTypeError('-y', callback_kwargs={})
def test_missing_destination(self):
self.assertTypeError()
for action in ['append', 'store']:
self.assertTypeError(action=action)
def test_invalid_option_strings(self):
self.assertValueError('--')
self.assertValueError('---')
def test_invalid_type(self):
self.assertValueError('--foo', type='int')
self.assertValueError('--foo', type=(int, float))
def test_invalid_action(self):
self.assertValueError('-x', action='foo')
self.assertValueError('foo', action='baz')
self.assertValueError('--foo', action=('store', 'append'))
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", action="store-true")
self.assertIn('unknown action', str(cm.exception))
def test_multiple_dest(self):
parser = argparse.ArgumentParser()
parser.add_argument(dest='foo')
with self.assertRaises(ValueError) as cm:
parser.add_argument('bar', dest='baz')
self.assertIn('dest supplied twice for positional argument',
str(cm.exception))
def test_no_argument_actions(self):
for action in ['store_const', 'store_true', 'store_false',
'append_const', 'count']:
for attrs in [dict(type=int), dict(nargs='+'),
dict(choices='ab')]:
self.assertTypeError('-x', action=action, **attrs)
def test_no_argument_no_const_actions(self):
# options with zero arguments
for action in ['store_true', 'store_false', 'count']:
# const is always disallowed
self.assertTypeError('-x', const='foo', action=action)
# nargs is always disallowed
self.assertTypeError('-x', nargs='*', action=action)
def test_more_than_one_argument_actions(self):
for action in ['store', 'append']:
# nargs=0 is disallowed
self.assertValueError('-x', nargs=0, action=action)
self.assertValueError('spam', nargs=0, action=action)
# const is disallowed with non-optional arguments
for nargs in [1, '*', '+']:
self.assertValueError('-x', const='foo',
nargs=nargs, action=action)
self.assertValueError('spam', const='foo',
nargs=nargs, action=action)
def test_required_const_actions(self):
for action in ['store_const', 'append_const']:
# nargs is always disallowed
self.assertTypeError('-x', nargs='+', action=action)
def test_parsers_action_missing_params(self):
self.assertTypeError('command', action='parsers')
self.assertTypeError('command', action='parsers', prog='PROG')
self.assertTypeError('command', action='parsers',
parser_class=argparse.ArgumentParser)
def test_required_positional(self):
self.assertTypeError('foo', required=True)
def test_user_defined_action(self):
class Success(Exception):
pass
class Action(object):
def __init__(self,
option_strings,
dest,
const,
default,
required=False):
if dest == 'spam':
if const is Success:
if default is Success:
raise Success()
def __call__(self, *args, **kwargs):
pass
parser = argparse.ArgumentParser()
self.assertRaises(Success, parser.add_argument, '--spam',
action=Action, default=Success, const=Success)
self.assertRaises(Success, parser.add_argument, 'spam',
action=Action, default=Success, const=Success)
# ================================
# Actions returned by add_argument
# ================================
class TestActionsReturned(TestCase):
def test_dest(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo')
self.assertEqual(action.dest, 'foo')
action = parser.add_argument('-b', '--bar')
self.assertEqual(action.dest, 'bar')
action = parser.add_argument('-x', '-y')
self.assertEqual(action.dest, 'x')
def test_misc(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo', nargs='?', const=42,
default=84, type=int, choices=[1, 2],
help='FOO', metavar='BAR', dest='baz')
self.assertEqual(action.nargs, '?')
self.assertEqual(action.const, 42)
self.assertEqual(action.default, 84)
self.assertEqual(action.type, int)
self.assertEqual(action.choices, [1, 2])
self.assertEqual(action.help, 'FOO')
self.assertEqual(action.metavar, 'BAR')
self.assertEqual(action.dest, 'baz')
# ================================
# Argument conflict handling tests
# ================================
class TestConflictHandling(TestCase):
def test_bad_type(self):
self.assertRaises(ValueError, argparse.ArgumentParser,
conflict_handler='foo')
def test_conflict_error(self):
parser = argparse.ArgumentParser()
parser.add_argument('-x')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '-x')
parser.add_argument('--spam')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '--spam')
def test_resolve_error(self):
get_parser = argparse.ArgumentParser
parser = get_parser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-x', help='OLD X')
parser.add_argument('-x', help='NEW X')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X]
options:
-h, --help show this help message and exit
-x X NEW X
'''))
parser.add_argument('--spam', metavar='OLD_SPAM')
parser.add_argument('--spam', metavar='NEW_SPAM')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X] [--spam NEW_SPAM]
options:
-h, --help show this help message and exit
-x X NEW X
--spam NEW_SPAM
'''))
def test_subparser_conflict(self):
parser = argparse.ArgumentParser()
sp = parser.add_subparsers()
sp.add_parser('fullname', aliases=['alias'])
self.assertRaises(argparse.ArgumentError,
sp.add_parser, 'fullname')
self.assertRaises(argparse.ArgumentError,
sp.add_parser, 'alias')
self.assertRaises(argparse.ArgumentError,
sp.add_parser, 'other', aliases=['fullname'])
self.assertRaises(argparse.ArgumentError,
sp.add_parser, 'other', aliases=['alias'])
# =============================
# Help and Version option tests
# =============================
class TestOptionalsHelpVersionActions(TestCase):
"""Test the help and version actions"""
def assertPrintHelpExit(self, parser, args_str):
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(args_str.split())
self.assertEqual(parser.format_help(), cm.exception.stdout)
def assertArgumentParserError(self, parser, *args):
self.assertRaises(ArgumentParserError, parser.parse_args, args)
def test_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-v', '--version', action='version', version='1.0')
self.assertPrintHelpExit(parser, '-h')
self.assertPrintHelpExit(parser, '--help')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
def test_version_format(self):
parser = ErrorRaisingArgumentParser(prog='PPP')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 3.5')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['-v'])
self.assertEqual('PPP 3.5\n', cm.exception.stdout)
def test_version_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
parser.add_argument('-v', '--version', action='version', version='1.0')
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
def test_version_action(self):
parser = ErrorRaisingArgumentParser(prog='XXX')
parser.add_argument('-V', action='version', version='%(prog)s 3.7')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['-V'])
self.assertEqual('XXX 3.7\n', cm.exception.stdout)
def test_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_alternate_help_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-x', action='help')
parser.add_argument('-y', action='version')
self.assertPrintHelpExit(parser, '-x')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
def test_help_version_extra_arguments(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--version', action='version', version='1.0')
parser.add_argument('-x', action='store_true')
parser.add_argument('y')
# try all combinations of valid prefixes and suffixes
valid_prefixes = ['', '-x', 'foo', '-x bar', 'baz -x']
valid_suffixes = valid_prefixes + ['--bad-option', 'foo bar baz']
for prefix in valid_prefixes:
for suffix in valid_suffixes:
format = '%s %%s %s' % (prefix, suffix)
self.assertPrintHelpExit(parser, format % '-h')
self.assertPrintHelpExit(parser, format % '--help')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
# ======================
# str() and repr() tests
# ======================
class TestStrings(TestCase):
"""Test str() and repr() on Optionals and Positionals"""
def assertStringEqual(self, obj, result_string):
for func in [str, repr]:
self.assertEqual(func(obj), result_string)
def test_optional(self):
option = argparse.Action(
option_strings=['--foo', '-a', '-b'],
dest='b',
type='int',
nargs='+',
default=42,
choices=[1, 2, 3],
required=False,
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
"choices=[1, 2, 3], required=False, help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
argument = argparse.Action(
option_strings=[],
dest='x',
type=float,
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
required=True,
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
"required=True, help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
ns = argparse.Namespace(foo=42, bar='spam')
string = "Namespace(foo=42, bar='spam')"
self.assertStringEqual(ns, string)
def test_namespace_starkwargs_notidentifier(self):
ns = argparse.Namespace(**{'"': 'quote'})
string = """Namespace(**{'"': 'quote'})"""
self.assertStringEqual(ns, string)
def test_namespace_kwargs_and_starkwargs_notidentifier(self):
ns = argparse.Namespace(a=1, **{'"': 'quote'})
string = """Namespace(a=1, **{'"': 'quote'})"""
self.assertStringEqual(ns, string)
def test_namespace_starkwargs_identifier(self):
ns = argparse.Namespace(**{'valid': True})
string = "Namespace(valid=True)"
self.assertStringEqual(ns, string)
def test_parser(self):
parser = argparse.ArgumentParser(prog='PROG')
string = (
"ArgumentParser(prog='PROG', usage=None, description=None, "
"formatter_class=%r, conflict_handler='error', "
"add_help=True)" % argparse.HelpFormatter)
self.assertStringEqual(parser, string)
# ===============
# Namespace tests
# ===============
class TestNamespace(TestCase):
def test_constructor(self):
ns = argparse.Namespace()
self.assertRaises(AttributeError, getattr, ns, 'x')
ns = argparse.Namespace(a=42, b='spam')
self.assertEqual(ns.a, 42)
self.assertEqual(ns.b, 'spam')
def test_equality(self):
ns1 = argparse.Namespace(a=1, b=2)
ns2 = argparse.Namespace(b=2, a=1)
ns3 = argparse.Namespace(a=1)
ns4 = argparse.Namespace(b=2)
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns1, ns3)
self.assertNotEqual(ns1, ns4)
self.assertNotEqual(ns2, ns3)
self.assertNotEqual(ns2, ns4)
self.assertTrue(ns1 != ns3)
self.assertTrue(ns1 != ns4)
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
def test_equality_returns_notimplemented(self):
# See issue 21481
ns = argparse.Namespace(a=1, b=2)
self.assertIs(ns.__eq__(None), NotImplemented)
self.assertIs(ns.__ne__(None), NotImplemented)
# ===================
# File encoding tests
# ===================
class TestEncoding(TestCase):
def _test_module_encoding(self, path):
path, _ = os.path.splitext(path)
path += ".py"
with open(path, 'r', encoding='utf-8') as f:
f.read()
def test_argparse_module_encoding(self):
self._test_module_encoding(argparse.__file__)
def test_test_argparse_module_encoding(self):
self._test_module_encoding(__file__)
# ===================
# ArgumentError tests
# ===================
class TestArgumentError(TestCase):
def test_argument_error(self):
msg = "my error here"
error = argparse.ArgumentError(None, msg)
self.assertEqual(str(error), msg)
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentTypeError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['XXX'])
self.assertEqual('usage: PROG x\nPROG: error: argument x: spam!\n',
cm.exception.stderr)
# =========================
# MessageContentError tests
# =========================
class TestMessageContentError(TestCase):
def test_missing_argument_name_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos', type=str)
parser.add_argument('-req_opt', type=int, required=True)
parser.add_argument('need_one', type=str, nargs='+')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
self.assertRegex(msg, 'need_one')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['myXargument'])
msg = str(cm.exception)
self.assertNotIn(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
self.assertRegex(msg, 'need_one')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['myXargument', '-req_opt=1'])
msg = str(cm.exception)
self.assertNotIn(msg, 'req_pos')
self.assertNotIn(msg, 'req_opt')
self.assertRegex(msg, 'need_one')
def test_optional_optional_not_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos', type=str)
parser.add_argument('--req_opt', type=int, required=True)
parser.add_argument('--opt_opt', type=bool, nargs='?',
default=True)
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
self.assertNotIn(msg, 'opt_opt')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['--req_opt=1'])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertNotIn(msg, 'req_opt')
self.assertNotIn(msg, 'opt_opt')
def test_optional_positional_not_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos')
parser.add_argument('optional_positional', nargs='?', default='eggs')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertNotIn(msg, 'optional_positional')
# ================================================
# Check that the type function is called only once
# ================================================
class TestTypeFunctionCallOnlyOnce(TestCase):
def test_type_function_call_only_once(self):
def spam(string_to_convert):
self.assertEqual(string_to_convert, 'spam!')
return 'foo_converted'
parser = argparse.ArgumentParser()
parser.add_argument('--foo', type=spam, default='bar')
args = parser.parse_args('--foo spam!'.split())
self.assertEqual(NS(foo='foo_converted'), args)
# ==================================================================
# Check semantics regarding the default argument and type conversion
# ==================================================================
class TestTypeFunctionCalledOnDefault(TestCase):
def test_type_function_call_with_non_string_default(self):
def spam(int_to_convert):
self.assertEqual(int_to_convert, 0)
return 'foo_converted'
parser = argparse.ArgumentParser()
parser.add_argument('--foo', type=spam, default=0)
args = parser.parse_args([])
# foo should *not* be converted because its default is not a string.
self.assertEqual(NS(foo=0), args)
def test_type_function_call_with_string_default(self):
def spam(int_to_convert):
return 'foo_converted'
parser = argparse.ArgumentParser()
parser.add_argument('--foo', type=spam, default='0')
args = parser.parse_args([])
# foo is converted because its default is a string.
self.assertEqual(NS(foo='foo_converted'), args)
def test_no_double_type_conversion_of_default(self):
def extend(str_to_convert):
return str_to_convert + '*'
parser = argparse.ArgumentParser()
parser.add_argument('--test', type=extend, default='*')
args = parser.parse_args([])
# The test argument will be two stars, one coming from the default
# value and one coming from the type conversion being called exactly
# once.
self.assertEqual(NS(test='**'), args)
def test_issue_15906(self):
# Issue #15906: When action='append', type=str, default=[] are
# providing, the dest value was the string representation "[]" when it
# should have been an empty list.
parser = argparse.ArgumentParser()
parser.add_argument('--test', dest='test', type=str,
default=[], action='append')
args = parser.parse_args([])
self.assertEqual(args.test, [])
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
def test_arguments_tuple(self):
parser = argparse.ArgumentParser()
parser.parse_args(())
def test_arguments_list(self):
parser = argparse.ArgumentParser()
parser.parse_args([])
def test_arguments_tuple_positional(self):
parser = argparse.ArgumentParser()
parser.add_argument('x')
parser.parse_args(('x',))
def test_arguments_list_positional(self):
parser = argparse.ArgumentParser()
parser.add_argument('x')
parser.parse_args(['x'])
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
args, extras = parser.parse_known_args('--foo F --bar --baz'.split())
self.assertEqual(NS(foo='F'), args)
self.assertEqual(['--bar', '--baz'], extras)
def test_mixed(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', nargs='?', const=1, type=int)
parser.add_argument('--spam', action='store_false')
parser.add_argument('badger')
argv = ["B", "C", "--foo", "-v", "3", "4"]
args, extras = parser.parse_known_args(argv)
self.assertEqual(NS(v=3, spam=True, badger="B"), args)
self.assertEqual(["C", "--foo", "4"], extras)
# ===========================
# parse_intermixed_args tests
# ===========================
class TestIntermixedArgs(TestCase):
def test_basic(self):
# test parsing intermixed optionals and positionals
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('--foo', dest='foo')
bar = parser.add_argument('--bar', dest='bar', required=True)
parser.add_argument('cmd')
parser.add_argument('rest', nargs='*', type=int)
argv = 'cmd --foo x 1 --bar y 2 3'.split()
args = parser.parse_intermixed_args(argv)
# rest gets [1,2,3] despite the foo and bar strings
self.assertEqual(NS(bar='y', cmd='cmd', foo='x', rest=[1, 2, 3]), args)
args, extras = parser.parse_known_args(argv)
# cannot parse the '1,2,3'
self.assertEqual(NS(bar='y', cmd='cmd', foo='x', rest=[]), args)
self.assertEqual(["1", "2", "3"], extras)
argv = 'cmd --foo x 1 --error 2 --bar y 3'.split()
args, extras = parser.parse_known_intermixed_args(argv)
# unknown optionals go into extras
self.assertEqual(NS(bar='y', cmd='cmd', foo='x', rest=[1]), args)
self.assertEqual(['--error', '2', '3'], extras)
# restores attributes that were temporarily changed
self.assertIsNone(parser.usage)
self.assertEqual(bar.required, True)
def test_remainder(self):
# Intermixed and remainder are incompatible
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-z')
parser.add_argument('x')
parser.add_argument('y', nargs='...')
argv = 'X A B -z Z'.split()
# intermixed fails with '...' (also 'A...')
# self.assertRaises(TypeError, parser.parse_intermixed_args, argv)
with self.assertRaises(TypeError) as cm:
parser.parse_intermixed_args(argv)
self.assertRegex(str(cm.exception), r'\.\.\.')
def test_exclusive(self):
# mutually exclusive group; intermixed works fine
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
parser.add_argument('badger', nargs='*', default='X', help='BADGER')
args = parser.parse_intermixed_args('1 --foo 2'.split())
self.assertEqual(NS(badger=['1', '2'], foo=True, spam=None), args)
self.assertRaises(ArgumentParserError, parser.parse_intermixed_args, '1 2'.split())
self.assertEqual(group.required, True)
def test_exclusive_incompatible(self):
# mutually exclusive group including positional - fail
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
self.assertRaises(TypeError, parser.parse_intermixed_args, [])
self.assertEqual(group.required, True)
class TestIntermixedMessageContentError(TestCase):
# case where Intermixed gives different error message
# error is raised by 1st parsing step
def test_missing_argument_name_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos', type=str)
parser.add_argument('-req_opt', type=int, required=True)
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_intermixed_args([])
msg = str(cm.exception)
self.assertNotRegex(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
# ==========================
# add_argument metavar tests
# ==========================
class TestAddArgumentMetavar(TestCase):
EXPECTED_MESSAGE = "length of metavar tuple does not match nargs"
def do_test_no_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
def do_test_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
self.assertEqual(cm.exception.args[0], self.EXPECTED_MESSAGE)
# Unit tests for different values of metavar when nargs=None
def test_nargs_None_metavar_string(self):
self.do_test_no_exception(nargs=None, metavar="1")
def test_nargs_None_metavar_length0(self):
self.do_test_exception(nargs=None, metavar=tuple())
def test_nargs_None_metavar_length1(self):
self.do_test_no_exception(nargs=None, metavar=("1",))
def test_nargs_None_metavar_length2(self):
self.do_test_exception(nargs=None, metavar=("1", "2"))
def test_nargs_None_metavar_length3(self):
self.do_test_exception(nargs=None, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=?
def test_nargs_optional_metavar_string(self):
self.do_test_no_exception(nargs="?", metavar="1")
def test_nargs_optional_metavar_length0(self):
self.do_test_exception(nargs="?", metavar=tuple())
def test_nargs_optional_metavar_length1(self):
self.do_test_no_exception(nargs="?", metavar=("1",))
def test_nargs_optional_metavar_length2(self):
self.do_test_exception(nargs="?", metavar=("1", "2"))
def test_nargs_optional_metavar_length3(self):
self.do_test_exception(nargs="?", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=*
def test_nargs_zeroormore_metavar_string(self):
self.do_test_no_exception(nargs="*", metavar="1")
def test_nargs_zeroormore_metavar_length0(self):
self.do_test_exception(nargs="*", metavar=tuple())
def test_nargs_zeroormore_metavar_length1(self):
self.do_test_no_exception(nargs="*", metavar=("1",))
def test_nargs_zeroormore_metavar_length2(self):
self.do_test_no_exception(nargs="*", metavar=("1", "2"))
def test_nargs_zeroormore_metavar_length3(self):
self.do_test_exception(nargs="*", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=+
def test_nargs_oneormore_metavar_string(self):
self.do_test_no_exception(nargs="+", metavar="1")
def test_nargs_oneormore_metavar_length0(self):
self.do_test_exception(nargs="+", metavar=tuple())
def test_nargs_oneormore_metavar_length1(self):
self.do_test_exception(nargs="+", metavar=("1",))
def test_nargs_oneormore_metavar_length2(self):
self.do_test_no_exception(nargs="+", metavar=("1", "2"))
def test_nargs_oneormore_metavar_length3(self):
self.do_test_exception(nargs="+", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=...
def test_nargs_remainder_metavar_string(self):
self.do_test_no_exception(nargs="...", metavar="1")
def test_nargs_remainder_metavar_length0(self):
self.do_test_no_exception(nargs="...", metavar=tuple())
def test_nargs_remainder_metavar_length1(self):
self.do_test_no_exception(nargs="...", metavar=("1",))
def test_nargs_remainder_metavar_length2(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2"))
def test_nargs_remainder_metavar_length3(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=A...
def test_nargs_parser_metavar_string(self):
self.do_test_no_exception(nargs="A...", metavar="1")
def test_nargs_parser_metavar_length0(self):
self.do_test_exception(nargs="A...", metavar=tuple())
def test_nargs_parser_metavar_length1(self):
self.do_test_no_exception(nargs="A...", metavar=("1",))
def test_nargs_parser_metavar_length2(self):
self.do_test_exception(nargs="A...", metavar=("1", "2"))
def test_nargs_parser_metavar_length3(self):
self.do_test_exception(nargs="A...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=1
def test_nargs_1_metavar_string(self):
self.do_test_no_exception(nargs=1, metavar="1")
def test_nargs_1_metavar_length0(self):
self.do_test_exception(nargs=1, metavar=tuple())
def test_nargs_1_metavar_length1(self):
self.do_test_no_exception(nargs=1, metavar=("1",))
def test_nargs_1_metavar_length2(self):
self.do_test_exception(nargs=1, metavar=("1", "2"))
def test_nargs_1_metavar_length3(self):
self.do_test_exception(nargs=1, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=2
def test_nargs_2_metavar_string(self):
self.do_test_no_exception(nargs=2, metavar="1")
def test_nargs_2_metavar_length0(self):
self.do_test_exception(nargs=2, metavar=tuple())
def test_nargs_2_metavar_length1(self):
self.do_test_exception(nargs=2, metavar=("1",))
def test_nargs_2_metavar_length2(self):
self.do_test_no_exception(nargs=2, metavar=("1", "2"))
def test_nargs_2_metavar_length3(self):
self.do_test_exception(nargs=2, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=3
def test_nargs_3_metavar_string(self):
self.do_test_no_exception(nargs=3, metavar="1")
def test_nargs_3_metavar_length0(self):
self.do_test_exception(nargs=3, metavar=tuple())
def test_nargs_3_metavar_length1(self):
self.do_test_exception(nargs=3, metavar=("1",))
def test_nargs_3_metavar_length2(self):
self.do_test_exception(nargs=3, metavar=("1", "2"))
def test_nargs_3_metavar_length3(self):
self.do_test_no_exception(nargs=3, metavar=("1", "2", "3"))
class TestInvalidNargs(TestCase):
EXPECTED_INVALID_MESSAGE = "invalid nargs value"
EXPECTED_RANGE_MESSAGE = ("nargs for store actions must be != 0; if you "
"have nothing to store, actions such as store "
"true or store const may be more appropriate")
def do_test_range_exception(self, nargs):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", nargs=nargs)
self.assertEqual(cm.exception.args[0], self.EXPECTED_RANGE_MESSAGE)
def do_test_invalid_exception(self, nargs):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", nargs=nargs)
self.assertEqual(cm.exception.args[0], self.EXPECTED_INVALID_MESSAGE)
# Unit tests for different values of nargs
def test_nargs_alphabetic(self):
self.do_test_invalid_exception(nargs='a')
self.do_test_invalid_exception(nargs="abcd")
def test_nargs_zero(self):
self.do_test_range_exception(nargs=0)
# ============================
# from argparse import * tests
# ============================
class TestImportStar(TestCase):
def test(self):
for name in argparse.__all__:
self.assertTrue(hasattr(argparse, name))
def test_all_exports_everything_but_modules(self):
items = [
name
for name, value in vars(argparse).items()
if not (name.startswith("_") or name == 'ngettext')
if not inspect.ismodule(value)
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
class TestWrappingMetavar(TestCase):
def setUp(self):
super().setUp()
self.parser = ErrorRaisingArgumentParser(
'this_is_spammy_prog_with_a_long_name_sorry_about_the_name'
)
# this metavar was triggering library assertion errors due to usage
# message formatting incorrectly splitting on the ] chars within
metavar = '<http[s]://example:1234>'
self.parser.add_argument('--proxy', metavar=metavar)
def test_help_with_metavar(self):
help_text = self.parser.format_help()
self.assertEqual(help_text, textwrap.dedent('''\
usage: this_is_spammy_prog_with_a_long_name_sorry_about_the_name
[-h] [--proxy <http[s]://example:1234>]
options:
-h, --help show this help message and exit
--proxy <http[s]://example:1234>
'''))
class TestExitOnError(TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser(exit_on_error=False)
self.parser.add_argument('--integers', metavar='N', type=int)
def test_exit_on_error_with_good_args(self):
ns = self.parser.parse_args('--integers 4'.split())
self.assertEqual(ns, argparse.Namespace(integers=4))
def test_exit_on_error_with_bad_args(self):
with self.assertRaises(argparse.ArgumentError):
self.parser.parse_args('--integers a'.split())
def tearDownModule():
# Remove global references to avoid looking like we have refleaks.
RFile.seen = {}
WFile.seen = set()
if __name__ == '__main__':
unittest.main()
| 33.460007
| 112
| 0.555975
|
794b9ef97ee26dd7471371a63026dc373f4d1aa0
| 10,188
|
py
|
Python
|
src/qsiprep_analyses/registrations/registrations.py
|
GalBenZvi/qsiprep_analyses
|
51512cffca218210f6b85e5eadd593b382963bbd
|
[
"Apache-2.0"
] | null | null | null |
src/qsiprep_analyses/registrations/registrations.py
|
GalBenZvi/qsiprep_analyses
|
51512cffca218210f6b85e5eadd593b382963bbd
|
[
"Apache-2.0"
] | null | null | null |
src/qsiprep_analyses/registrations/registrations.py
|
GalBenZvi/qsiprep_analyses
|
51512cffca218210f6b85e5eadd593b382963bbd
|
[
"Apache-2.0"
] | null | null | null |
"""
Definition of the :class:`NativeRegistration` class.
"""
from pathlib import Path
from typing import Tuple, Union
import nibabel as nib
from brain_parts.parcellation.parcellations import (
Parcellation as parcellation_manager,
)
from nilearn.image.resampling import resample_to_img
from nipype.interfaces.base import TraitError
from tqdm import tqdm
from qsiprep_analyses.manager import QsiprepManager
from qsiprep_analyses.registrations.utils import (
DEFAULT_PARCELLATION_NAMING,
PROBSEG_THRESHOLD,
QUERIES,
TRANSFORMS,
)
class NativeRegistration(QsiprepManager):
QUERIES = QUERIES
#: Naming
DEFAULT_PARCELLATION_NAMING = DEFAULT_PARCELLATION_NAMING
#: Types of transformations
TRANSFORMS = TRANSFORMS
#: Default probability segmentations' threshold
PROBSEG_THRESHOLD = PROBSEG_THRESHOLD
def __init__(
self,
base_dir: Path,
participant_labels: Union[str, list] = None,
) -> None:
super().__init__(base_dir, participant_labels)
self.parcellation_manager = parcellation_manager()
def initiate_subject(
self, participant_label: str
) -> Tuple[dict, Path, Path]:
"""
Query initially-required patricipant's files
Parameters
----------
participant_label : str
Specific participant's label to be queried
Returns
-------
Tuple[dict,Path,Path]
A tuple of required files for parcellation registration.
"""
return [
grabber(participant_label, queries=self.QUERIES)
for grabber in [
self.get_transforms,
self.get_reference,
self.get_probseg,
]
]
def build_output_dictionary(
self,
parcellation_scheme: str,
reference: Path,
reference_type: str,
) -> dict:
"""
Based on a *reference* image,
reconstruct output names for native parcellation naming.
Parameters
----------
reference : Path
The reference image.
reference_type : str
The reference image type (either "anat" or "dwi")
Returns
-------
dict
A dictionary with keys of "whole-brain" and "gm-cropped" and their
corresponding paths
"""
basic_query = dict(
atlas=parcellation_scheme,
resolution=reference_type,
**self.DEFAULT_PARCELLATION_NAMING.copy(),
)
outputs = dict()
for key, label in zip(["whole_brain", "gm_cropped"], ["", "GM"]):
query = basic_query.copy()
query["label"] = label
outputs[key] = self.data_grabber.build_path(reference, query)
return outputs
def register_to_anatomical(
self,
parcellation_scheme: str,
participant_label: str,
probseg_threshold: float = None,
force: bool = False,
) -> dict:
"""
Register a *parcellation scheme* from standard to native anatomical space. # noqa
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*.
participant_label : str
Specific participant's label
probseg_threshold : float, optional
Threshold for probability segmentation masking, by default None
force : bool, optional
Whether to re-write existing files, by default False
Returns
-------
dict
A dictionary with keys of "whole_brain" and "gm_cropped" native-spaced parcellation schemes.
"""
transforms, reference, gm_probseg = self.initiate_subject(
participant_label
)
whole_brain, gm_cropped = [
self.build_output_dictionary(
parcellation_scheme, reference, "anat"
).get(key)
for key in ["whole_brain", "gm_cropped"]
]
self.parcellation_manager.register_parcellation_scheme(
parcellation_scheme,
participant_label,
reference,
transforms.get("mni2native"),
whole_brain,
force=force,
)
self.parcellation_manager.crop_to_probseg(
parcellation_scheme,
participant_label,
whole_brain,
gm_probseg,
gm_cropped,
masking_threshold=probseg_threshold or self.PROBSEG_THRESHOLD,
force=force,
)
return whole_brain, gm_cropped
def register_dwi(
self,
parcellation_scheme: str,
participant_label: str,
session: str,
anatomical_whole_brain: Path,
anatomical_gm_cropped: Path,
force: bool = False,
):
"""
Resample parcellation scheme from anatomical to DWI space.
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*. # noqa
participant_label : str
Specific participant's label
anatomical_whole_brain : Path
Participant's whole-brain parcellation scheme in anatomical space
anatomical_gm_cropped : Path
Participant's GM-cropped parcellation scheme in anatomical space
force : bool, optional
Whether to re-write existing files, by default False
"""
reference = self.get_reference(
participant_label,
"dwi",
{"session": session},
queries=self.QUERIES,
)
if not reference:
raise FileNotFoundError(
f"Could not find reference file for subject {participant_label}!" # noqa
)
whole_brain, gm_cropped = [
self.build_output_dictionary(
parcellation_scheme, reference, "dwi"
).get(key)
for key in ["whole_brain", "gm_cropped"]
]
for source, target in zip(
[anatomical_whole_brain, anatomical_gm_cropped],
[whole_brain, gm_cropped],
):
if not target.exists() or force:
img = resample_to_img(
str(source), str(reference), interpolation="nearest"
)
nib.save(img, target)
return whole_brain, gm_cropped
def run_single_subject(
self,
parcellation_scheme: str,
participant_label: str,
session: Union[str, list] = None,
probseg_threshold: float = None,
force: bool = False,
) -> dict:
"""
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*. # noqa
participant_label : str
Specific participant's label
session : Union[str, list], optional
Specific sessions available for *participant_label*, by default None # noqa
probseg_threshold : float, optional
Threshold for probability segmentation masking, by default None
force : bool, optional
Whether to re-write existing files, by default False
Returns
-------
dict
A dictionary with keys of "anat" and available or requested sessions,
and corresponding natice parcellations as keys.
"""
outputs = {}
anat_whole_brain, anat_gm_cropped = self.register_to_anatomical(
parcellation_scheme, participant_label, probseg_threshold, force
)
outputs["anat"] = {
"whole_brain": anat_whole_brain,
"gm_cropped": anat_gm_cropped,
}
sessions = self.subjects.get(participant_label) or session
if isinstance(sessions, str):
sessions = [sessions]
for session in sessions:
whole_brain, gm_cropped = self.register_dwi(
parcellation_scheme,
participant_label,
session,
anat_whole_brain,
anat_gm_cropped,
force,
)
outputs[session] = {
"whole_brain": whole_brain,
"gm_cropped": gm_cropped,
}
return outputs
def run_dataset(
self,
parcellation_scheme: str,
participant_label: Union[str, list] = None,
probseg_threshold: float = None,
force: bool = False,
):
"""
Register *parcellation_scheme* to all available (or requested) subjects' native space.
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*. # noqa
participant_label : Union[str, list], optional
Specific subject/s within the dataset to run, by default None
probseg_threshold : float, optional
Threshold for probability segmentation masking, by default None
force : bool, optional
Whether to remove existing products and generate new ones, by default False # noqa
"""
native_parcellations = {}
if participant_label:
if isinstance(participant_label, str):
participant_labels = [participant_label]
elif isinstance(participant_label, list):
participant_labels = participant_label
else:
participant_labels = list(sorted(self.subjects.keys()))
for participant_label in tqdm(participant_labels):
try:
native_parcellations[
participant_label
] = self.run_single_subject(
parcellation_scheme,
participant_label,
probseg_threshold=probseg_threshold,
force=force,
)
except (FileNotFoundError, TraitError):
continue
return native_parcellations
| 32.970874
| 104
| 0.590989
|
794b9f871af88887cf121f2b83583fa9eecf4a09
| 17,779
|
py
|
Python
|
train_cls.py
|
RamsteinWR/Diabetic-Retinopathy-Blindness-Detection
|
24390aeefd197600255a961189872dd4dfc77092
|
[
"MIT"
] | 68
|
2019-09-08T20:04:23.000Z
|
2021-05-05T10:05:14.000Z
|
train_cls.py
|
RamsteinWR/Diabetic-Retinopathy-Blindness-Detection
|
24390aeefd197600255a961189872dd4dfc77092
|
[
"MIT"
] | 1
|
2019-09-24T06:40:33.000Z
|
2019-10-04T09:13:35.000Z
|
train_cls.py
|
RamsteinWR/Diabetic-Retinopathy-Blindness-Detection
|
24390aeefd197600255a961189872dd4dfc77092
|
[
"MIT"
] | 25
|
2019-09-09T04:42:51.000Z
|
2022-03-28T15:01:30.000Z
|
from __future__ import absolute_import
import argparse
import collections
import json
import multiprocessing
import os
from datetime import datetime
import torch
from catalyst.dl import SupervisedRunner, EarlyStoppingCallback
from catalyst.utils import load_checkpoint, unpack_checkpoint
from pytorch_toolbelt.utils import fs
from pytorch_toolbelt.utils.random import set_manual_seed, get_random_name
from pytorch_toolbelt.utils.torch_utils import count_parameters, \
set_trainable
from retinopathy.callbacks import LPRegularizationCallback, \
CustomOptimizerCallback
from retinopathy.dataset import get_class_names, \
get_datasets, get_dataloaders
from retinopathy.factory import get_model, get_optimizer, \
get_optimizable_parameters, get_scheduler
from retinopathy.scripts.clean_checkpoint import clean_checkpoint
from retinopathy.train_utils import report_checkpoint, get_reg_callbacks, get_ord_callbacks, get_cls_callbacks
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42, help='Random seed')
parser.add_argument('--fast', action='store_true')
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--balance', action='store_true')
parser.add_argument('--balance-datasets', action='store_true')
parser.add_argument('--swa', action='store_true')
parser.add_argument('--show', action='store_true')
parser.add_argument('--use-idrid', action='store_true')
parser.add_argument('--use-messidor', action='store_true')
parser.add_argument('--use-aptos2015', action='store_true')
parser.add_argument('--use-aptos2019', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--coarse', action='store_true')
parser.add_argument('-acc', '--accumulation-steps', type=int, default=1, help='Number of batches to process')
parser.add_argument('-dd', '--data-dir', type=str, default='data', help='Data directory')
parser.add_argument('-m', '--model', type=str, default='resnet18_gap', help='')
parser.add_argument('-b', '--batch-size', type=int, default=8, help='Batch Size during training, e.g. -b 64')
parser.add_argument('-e', '--epochs', type=int, default=100, help='Epoch to run')
parser.add_argument('-es', '--early-stopping', type=int, default=None,
help='Maximum number of epochs without improvement')
parser.add_argument('-f', '--fold', action='append', type=int, default=None)
parser.add_argument('-ft', '--fine-tune', default=0, type=int)
parser.add_argument('-lr', '--learning-rate', type=float, default=1e-4, help='Initial learning rate')
parser.add_argument('--criterion-reg', type=str, default=None, nargs='+', help='Criterion')
parser.add_argument('--criterion-ord', type=str, default=None, nargs='+', help='Criterion')
parser.add_argument('--criterion-cls', type=str, default=['ce'], nargs='+', help='Criterion')
parser.add_argument('-l1', type=float, default=0, help='L1 regularization loss')
parser.add_argument('-l2', type=float, default=0, help='L2 regularization loss')
parser.add_argument('-o', '--optimizer', default='Adam', help='Name of the optimizer')
parser.add_argument('-p', '--preprocessing', default=None, help='Preprocessing method')
parser.add_argument('-c', '--checkpoint', type=str, default=None,
help='Checkpoint filename to use as initial model weights')
parser.add_argument('-w', '--workers', default=multiprocessing.cpu_count(), type=int, help='Num workers')
parser.add_argument('-a', '--augmentations', default='medium', type=str, help='')
parser.add_argument('-tta', '--tta', default=None, type=str, help='Type of TTA to use [fliplr, d4]')
parser.add_argument('-t', '--transfer', default=None, type=str, help='')
parser.add_argument('--fp16', action='store_true')
parser.add_argument('-s', '--scheduler', default='multistep', type=str, help='')
parser.add_argument('--size', default=512, type=int, help='Image size for training & inference')
parser.add_argument('-wd', '--weight-decay', default=0, type=float, help='L2 weight decay')
parser.add_argument('-wds', '--weight-decay-step', default=None, type=float,
help='L2 weight decay step to add after each epoch')
parser.add_argument('-d', '--dropout', default=0.0, type=float, help='Dropout before head layer')
parser.add_argument('--warmup', default=0, type=int,
help='Number of warmup epochs with 0.1 of the initial LR and frozed encoder')
parser.add_argument('-x', '--experiment', default=None, type=str, help='Dropout before head layer')
args = parser.parse_args()
data_dir = args.data_dir
num_workers = args.workers
num_epochs = args.epochs
batch_size = args.batch_size
learning_rate = args.learning_rate
l1 = args.l1
l2 = args.l2
early_stopping = args.early_stopping
model_name = args.model
optimizer_name = args.optimizer
image_size = (args.size, args.size)
fast = args.fast
augmentations = args.augmentations
fp16 = args.fp16
fine_tune = args.fine_tune
criterion_reg_name = args.criterion_reg
criterion_cls_name = args.criterion_cls
criterion_ord_name = args.criterion_ord
folds = args.fold
mixup = args.mixup
balance = args.balance
balance_datasets = args.balance_datasets
use_swa = args.swa
show_batches = args.show
scheduler_name = args.scheduler
verbose = args.verbose
weight_decay = args.weight_decay
use_idrid = args.use_idrid
use_messidor = args.use_messidor
use_aptos2015 = args.use_aptos2015
use_aptos2019 = args.use_aptos2019
warmup = args.warmup
dropout = args.dropout
use_unsupervised = False
experiment = args.experiment
preprocessing = args.preprocessing
weight_decay_step = args.weight_decay_step
coarse_grading = args.coarse
class_names = get_class_names(coarse_grading)
assert use_aptos2015 or use_aptos2019 or use_idrid or use_messidor
current_time = datetime.now().strftime('%b%d_%H_%M')
random_name = get_random_name()
if folds is None or len(folds) == 0:
folds = [None]
for fold in folds:
torch.cuda.empty_cache()
checkpoint_prefix = f'{model_name}_{args.size}_{augmentations}'
if preprocessing is not None:
checkpoint_prefix += f'_{preprocessing}'
if use_aptos2019:
checkpoint_prefix += '_aptos2019'
if use_aptos2015:
checkpoint_prefix += '_aptos2015'
if use_messidor:
checkpoint_prefix += '_messidor'
if use_idrid:
checkpoint_prefix += '_idrid'
if coarse_grading:
checkpoint_prefix += '_coarse'
if fold is not None:
checkpoint_prefix += f'_fold{fold}'
checkpoint_prefix += f'_{random_name}'
if experiment is not None:
checkpoint_prefix = experiment
directory_prefix = f'{current_time}/{checkpoint_prefix}'
log_dir = os.path.join('runs', directory_prefix)
os.makedirs(log_dir, exist_ok=False)
config_fname = os.path.join(log_dir, f'{checkpoint_prefix}.json')
with open(config_fname, 'w') as f:
train_session_args = vars(args)
f.write(json.dumps(train_session_args, indent=2))
set_manual_seed(args.seed)
num_classes = len(class_names)
model = get_model(model_name, num_classes=num_classes, dropout=dropout).cuda()
if args.transfer:
transfer_checkpoint = fs.auto_file(args.transfer)
print("Transfering weights from model checkpoint",
transfer_checkpoint)
checkpoint = load_checkpoint(transfer_checkpoint)
pretrained_dict = checkpoint['model_state_dict']
for name, value in pretrained_dict.items():
try:
model.load_state_dict(
collections.OrderedDict([(name, value)]), strict=False)
except Exception as e:
print(e)
report_checkpoint(checkpoint)
if args.checkpoint:
checkpoint = load_checkpoint(fs.auto_file(args.checkpoint))
unpack_checkpoint(checkpoint, model=model)
report_checkpoint(checkpoint)
train_ds, valid_ds, train_sizes = get_datasets(data_dir=data_dir,
use_aptos2019=use_aptos2019,
use_aptos2015=use_aptos2015,
use_idrid=use_idrid,
use_messidor=use_messidor,
use_unsupervised=False,
coarse_grading=coarse_grading,
image_size=image_size,
augmentation=augmentations,
preprocessing=preprocessing,
target_dtype=int,
fold=fold,
folds=4)
train_loader, valid_loader = get_dataloaders(train_ds, valid_ds,
batch_size=batch_size,
num_workers=num_workers,
train_sizes=train_sizes,
balance=balance,
balance_datasets=balance_datasets,
balance_unlabeled=False)
loaders = collections.OrderedDict()
loaders["train"] = train_loader
loaders["valid"] = valid_loader
print('Datasets :', data_dir)
print(' Train size :', len(train_loader), len(train_loader.dataset))
print(' Valid size :', len(valid_loader), len(valid_loader.dataset))
print(' Aptos 2019 :', use_aptos2019)
print(' Aptos 2015 :', use_aptos2015)
print(' IDRID :', use_idrid)
print(' Messidor :', use_messidor)
print('Train session :', directory_prefix)
print(' FP16 mode :', fp16)
print(' Fast mode :', fast)
print(' Mixup :', mixup)
print(' Balance cls. :', balance)
print(' Balance ds. :', balance_datasets)
print(' Warmup epoch :', warmup)
print(' Train epochs :', num_epochs)
print(' Fine-tune ephs :', fine_tune)
print(' Workers :', num_workers)
print(' Fold :', fold)
print(' Log dir :', log_dir)
print(' Augmentations :', augmentations)
print('Model :', model_name)
print(' Parameters :', count_parameters(model))
print(' Image size :', image_size)
print(' Dropout :', dropout)
print(' Classes :', class_names, num_classes)
print('Optimizer :', optimizer_name)
print(' Learning rate :', learning_rate)
print(' Batch size :', batch_size)
print(' Criterion (cls):', criterion_cls_name)
print(' Criterion (reg):', criterion_reg_name)
print(' Criterion (ord):', criterion_ord_name)
print(' Scheduler :', scheduler_name)
print(' Weight decay :', weight_decay, weight_decay_step)
print(' L1 reg. :', l1)
print(' L2 reg. :', l2)
print(' Early stopping :', early_stopping)
# model training
callbacks = []
criterions = {}
main_metric = 'cls/kappa'
if criterion_reg_name is not None:
cb, crits = get_reg_callbacks(criterion_reg_name, class_names=class_names, show=show_batches)
callbacks += cb
criterions.update(crits)
if criterion_ord_name is not None:
cb, crits = get_ord_callbacks(criterion_ord_name, class_names=class_names, show=show_batches)
callbacks += cb
criterions.update(crits)
if criterion_cls_name is not None:
cb, crits = get_cls_callbacks(criterion_cls_name,
num_classes=num_classes,
num_epochs=num_epochs, class_names=class_names, show=show_batches)
callbacks += cb
criterions.update(crits)
if l1 > 0:
callbacks += [LPRegularizationCallback(start_wd=l1, end_wd=l1, schedule=None, prefix='l1', p=1)]
if l2 > 0:
callbacks += [LPRegularizationCallback(start_wd=l2, end_wd=l2, schedule=None, prefix='l2', p=2)]
callbacks += [
CustomOptimizerCallback()
]
runner = SupervisedRunner(input_key='image')
# Pretrain/warmup
if warmup:
set_trainable(model.encoder, False, False)
optimizer = get_optimizer('Adam', get_optimizable_parameters(model),
learning_rate=learning_rate * 0.1)
runner.train(
fp16=fp16,
model=model,
criterion=criterions,
optimizer=optimizer,
scheduler=None,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, 'warmup'),
num_epochs=warmup,
verbose=verbose,
main_metric=main_metric,
minimize_metric=False,
checkpoint_data={"cmd_args": vars(args)}
)
del optimizer
# Main train
if num_epochs:
set_trainable(model.encoder, True, False)
optimizer = get_optimizer(optimizer_name, get_optimizable_parameters(model),
learning_rate=learning_rate,
weight_decay=weight_decay)
if use_swa:
from torchcontrib.optim import SWA
optimizer = SWA(optimizer,
swa_start=len(train_loader),
swa_freq=512)
scheduler = get_scheduler(scheduler_name, optimizer,
lr=learning_rate,
num_epochs=num_epochs,
batches_in_epoch=len(train_loader))
# Additional callbacks that specific to main stage only added here to copy of callbacks
main_stage_callbacks = callbacks
if early_stopping:
es_callback = EarlyStoppingCallback(early_stopping,
min_delta=1e-4,
metric=main_metric, minimize=False)
main_stage_callbacks = callbacks + [es_callback]
runner.train(
fp16=fp16,
model=model,
criterion=criterions,
optimizer=optimizer,
scheduler=scheduler,
callbacks=main_stage_callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, 'main'),
num_epochs=num_epochs,
verbose=verbose,
main_metric=main_metric,
minimize_metric=False,
checkpoint_data={"cmd_args": vars(args)}
)
del optimizer, scheduler
best_checkpoint = os.path.join(log_dir, 'main', 'checkpoints', 'best.pth')
model_checkpoint = os.path.join(log_dir, 'main', 'checkpoints', f'{checkpoint_prefix}.pth')
clean_checkpoint(best_checkpoint, model_checkpoint)
# Restoring best model from checkpoint
checkpoint = load_checkpoint(best_checkpoint)
unpack_checkpoint(checkpoint, model=model)
report_checkpoint(checkpoint)
# Stage 3 - Fine tuning
if fine_tune:
set_trainable(model.encoder, False, False)
optimizer = get_optimizer(optimizer_name, get_optimizable_parameters(model),
learning_rate=learning_rate)
scheduler = get_scheduler('multistep', optimizer,
lr=learning_rate,
num_epochs=fine_tune,
batches_in_epoch=len(train_loader))
runner.train(
fp16=fp16,
model=model,
criterion=criterions,
optimizer=optimizer,
scheduler=scheduler,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, 'finetune'),
num_epochs=fine_tune,
verbose=verbose,
main_metric=main_metric,
minimize_metric=False,
checkpoint_data={"cmd_args": vars(args)}
)
best_checkpoint = os.path.join(log_dir, 'finetune', 'checkpoints', 'best.pth')
model_checkpoint = os.path.join(log_dir, 'finetune', 'checkpoints', f'{checkpoint_prefix}.pth')
clean_checkpoint(best_checkpoint, model_checkpoint)
if __name__ == '__main__':
with torch.autograd.detect_anomaly():
main()
| 45.010127
| 113
| 0.584229
|
794b9faa507659b7057e2debf1173a8b3bc95ef5
| 38,423
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class hop(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-extensive/output/lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-instances-info/lsp-instances/lsp-cspf-exclude-hops/show-mpls-lsp-hop-list/hop. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_hop_address','__lsp_hop_strict_hop','__lsp_hop_loose_hop','__lsp_hop_is_router_id','__lsp_hop_has_protection','__lsp_hop_has_node_protection','__lsp_hop_has_bandwidth_protection','__lsp_hop_has_protection_in_use','__lsp_hop_avoid_node','__lsp_hop_avoid_local','__lsp_hop_avoid_remote',)
_yang_name = 'hop'
_rest_name = 'hop'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_hop_avoid_remote = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-remote", rest_name="lsp-hop-avoid-remote", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_avoid_node = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-node", rest_name="lsp-hop-avoid-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_has_protection_in_use = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection-in-use", rest_name="lsp-hop-has-protection-in-use", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_has_protection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection", rest_name="lsp-hop-has-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_avoid_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-local", rest_name="lsp-hop-avoid-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_has_bandwidth_protection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-bandwidth-protection", rest_name="lsp-hop-has-bandwidth-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_strict_hop = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-strict-hop", rest_name="lsp-hop-strict-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_is_router_id = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-is-router-id", rest_name="lsp-hop-is-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="lsp-hop-address", rest_name="lsp-hop-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)
self.__lsp_hop_has_node_protection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-node-protection", rest_name="lsp-hop-has-node-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_hop_loose_hop = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-loose-hop", rest_name="lsp-hop-loose-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-lsp-extensive', u'output', u'lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-instances-info', u'lsp-instances', u'lsp-cspf-exclude-hops', u'show-mpls-lsp-hop-list', u'hop']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-lsp-extensive', u'output', u'lsp', u'lsp-instances', u'lsp-cspf-exclude-hops', u'hop']
def _get_lsp_hop_address(self):
"""
Getter method for lsp_hop_address, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_address (inet:ipv4-address)
YANG Description: Hop IP address
"""
return self.__lsp_hop_address
def _set_lsp_hop_address(self, v, load=False):
"""
Setter method for lsp_hop_address, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_address() directly.
YANG Description: Hop IP address
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="lsp-hop-address", rest_name="lsp-hop-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_address must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="lsp-hop-address", rest_name="lsp-hop-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__lsp_hop_address = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_address(self):
self.__lsp_hop_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="lsp-hop-address", rest_name="lsp-hop-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)
def _get_lsp_hop_strict_hop(self):
"""
Getter method for lsp_hop_strict_hop, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_strict_hop (boolean)
YANG Description: CSPF path Strict hop
"""
return self.__lsp_hop_strict_hop
def _set_lsp_hop_strict_hop(self, v, load=False):
"""
Setter method for lsp_hop_strict_hop, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_strict_hop (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_strict_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_strict_hop() directly.
YANG Description: CSPF path Strict hop
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-strict-hop", rest_name="lsp-hop-strict-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_strict_hop must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-strict-hop", rest_name="lsp-hop-strict-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_strict_hop = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_strict_hop(self):
self.__lsp_hop_strict_hop = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-strict-hop", rest_name="lsp-hop-strict-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_loose_hop(self):
"""
Getter method for lsp_hop_loose_hop, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_loose_hop (boolean)
YANG Description: CSPF path Loose hop
"""
return self.__lsp_hop_loose_hop
def _set_lsp_hop_loose_hop(self, v, load=False):
"""
Setter method for lsp_hop_loose_hop, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_loose_hop (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_loose_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_loose_hop() directly.
YANG Description: CSPF path Loose hop
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-loose-hop", rest_name="lsp-hop-loose-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_loose_hop must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-loose-hop", rest_name="lsp-hop-loose-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_loose_hop = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_loose_hop(self):
self.__lsp_hop_loose_hop = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-loose-hop", rest_name="lsp-hop-loose-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_is_router_id(self):
"""
Getter method for lsp_hop_is_router_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_is_router_id (boolean)
YANG Description: Hop is a router id hop
"""
return self.__lsp_hop_is_router_id
def _set_lsp_hop_is_router_id(self, v, load=False):
"""
Setter method for lsp_hop_is_router_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_is_router_id (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_is_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_is_router_id() directly.
YANG Description: Hop is a router id hop
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-is-router-id", rest_name="lsp-hop-is-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_is_router_id must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-is-router-id", rest_name="lsp-hop-is-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_is_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_is_router_id(self):
self.__lsp_hop_is_router_id = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-is-router-id", rest_name="lsp-hop-is-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_has_protection(self):
"""
Getter method for lsp_hop_has_protection, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_protection (boolean)
YANG Description: RRO hop Protection available
"""
return self.__lsp_hop_has_protection
def _set_lsp_hop_has_protection(self, v, load=False):
"""
Setter method for lsp_hop_has_protection, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_protection (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_has_protection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_has_protection() directly.
YANG Description: RRO hop Protection available
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection", rest_name="lsp-hop-has-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_has_protection must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection", rest_name="lsp-hop-has-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_has_protection = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_has_protection(self):
self.__lsp_hop_has_protection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection", rest_name="lsp-hop-has-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_has_node_protection(self):
"""
Getter method for lsp_hop_has_node_protection, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_node_protection (boolean)
YANG Description: RRO hop Node Protection available
"""
return self.__lsp_hop_has_node_protection
def _set_lsp_hop_has_node_protection(self, v, load=False):
"""
Setter method for lsp_hop_has_node_protection, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_node_protection (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_has_node_protection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_has_node_protection() directly.
YANG Description: RRO hop Node Protection available
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-node-protection", rest_name="lsp-hop-has-node-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_has_node_protection must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-node-protection", rest_name="lsp-hop-has-node-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_has_node_protection = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_has_node_protection(self):
self.__lsp_hop_has_node_protection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-node-protection", rest_name="lsp-hop-has-node-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_has_bandwidth_protection(self):
"""
Getter method for lsp_hop_has_bandwidth_protection, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_bandwidth_protection (boolean)
YANG Description: RRO hop bandwidth Protection available
"""
return self.__lsp_hop_has_bandwidth_protection
def _set_lsp_hop_has_bandwidth_protection(self, v, load=False):
"""
Setter method for lsp_hop_has_bandwidth_protection, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_bandwidth_protection (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_has_bandwidth_protection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_has_bandwidth_protection() directly.
YANG Description: RRO hop bandwidth Protection available
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-bandwidth-protection", rest_name="lsp-hop-has-bandwidth-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_has_bandwidth_protection must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-bandwidth-protection", rest_name="lsp-hop-has-bandwidth-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_has_bandwidth_protection = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_has_bandwidth_protection(self):
self.__lsp_hop_has_bandwidth_protection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-bandwidth-protection", rest_name="lsp-hop-has-bandwidth-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_has_protection_in_use(self):
"""
Getter method for lsp_hop_has_protection_in_use, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_protection_in_use (boolean)
YANG Description: RRO hop protection is in use
"""
return self.__lsp_hop_has_protection_in_use
def _set_lsp_hop_has_protection_in_use(self, v, load=False):
"""
Setter method for lsp_hop_has_protection_in_use, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_has_protection_in_use (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_has_protection_in_use is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_has_protection_in_use() directly.
YANG Description: RRO hop protection is in use
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection-in-use", rest_name="lsp-hop-has-protection-in-use", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_has_protection_in_use must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection-in-use", rest_name="lsp-hop-has-protection-in-use", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_has_protection_in_use = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_has_protection_in_use(self):
self.__lsp_hop_has_protection_in_use = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-has-protection-in-use", rest_name="lsp-hop-has-protection-in-use", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_avoid_node(self):
"""
Getter method for lsp_hop_avoid_node, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_avoid_node (boolean)
YANG Description: Avoid address type is node
"""
return self.__lsp_hop_avoid_node
def _set_lsp_hop_avoid_node(self, v, load=False):
"""
Setter method for lsp_hop_avoid_node, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_avoid_node (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_avoid_node is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_avoid_node() directly.
YANG Description: Avoid address type is node
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-node", rest_name="lsp-hop-avoid-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_avoid_node must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-node", rest_name="lsp-hop-avoid-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_avoid_node = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_avoid_node(self):
self.__lsp_hop_avoid_node = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-node", rest_name="lsp-hop-avoid-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_avoid_local(self):
"""
Getter method for lsp_hop_avoid_local, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_avoid_local (boolean)
YANG Description: Avoid address type is local
"""
return self.__lsp_hop_avoid_local
def _set_lsp_hop_avoid_local(self, v, load=False):
"""
Setter method for lsp_hop_avoid_local, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_avoid_local (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_avoid_local is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_avoid_local() directly.
YANG Description: Avoid address type is local
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-local", rest_name="lsp-hop-avoid-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_avoid_local must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-local", rest_name="lsp-hop-avoid-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_avoid_local = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_avoid_local(self):
self.__lsp_hop_avoid_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-local", rest_name="lsp-hop-avoid-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_hop_avoid_remote(self):
"""
Getter method for lsp_hop_avoid_remote, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_avoid_remote (boolean)
YANG Description: Avoid address type is remote
"""
return self.__lsp_hop_avoid_remote
def _set_lsp_hop_avoid_remote(self, v, load=False):
"""
Setter method for lsp_hop_avoid_remote, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_cspf_exclude_hops/show_mpls_lsp_hop_list/hop/lsp_hop_avoid_remote (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_hop_avoid_remote is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_hop_avoid_remote() directly.
YANG Description: Avoid address type is remote
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-remote", rest_name="lsp-hop-avoid-remote", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_hop_avoid_remote must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-remote", rest_name="lsp-hop-avoid-remote", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_hop_avoid_remote = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_hop_avoid_remote(self):
self.__lsp_hop_avoid_remote = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-hop-avoid-remote", rest_name="lsp-hop-avoid-remote", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
lsp_hop_address = __builtin__.property(_get_lsp_hop_address, _set_lsp_hop_address)
lsp_hop_strict_hop = __builtin__.property(_get_lsp_hop_strict_hop, _set_lsp_hop_strict_hop)
lsp_hop_loose_hop = __builtin__.property(_get_lsp_hop_loose_hop, _set_lsp_hop_loose_hop)
lsp_hop_is_router_id = __builtin__.property(_get_lsp_hop_is_router_id, _set_lsp_hop_is_router_id)
lsp_hop_has_protection = __builtin__.property(_get_lsp_hop_has_protection, _set_lsp_hop_has_protection)
lsp_hop_has_node_protection = __builtin__.property(_get_lsp_hop_has_node_protection, _set_lsp_hop_has_node_protection)
lsp_hop_has_bandwidth_protection = __builtin__.property(_get_lsp_hop_has_bandwidth_protection, _set_lsp_hop_has_bandwidth_protection)
lsp_hop_has_protection_in_use = __builtin__.property(_get_lsp_hop_has_protection_in_use, _set_lsp_hop_has_protection_in_use)
lsp_hop_avoid_node = __builtin__.property(_get_lsp_hop_avoid_node, _set_lsp_hop_avoid_node)
lsp_hop_avoid_local = __builtin__.property(_get_lsp_hop_avoid_local, _set_lsp_hop_avoid_local)
lsp_hop_avoid_remote = __builtin__.property(_get_lsp_hop_avoid_remote, _set_lsp_hop_avoid_remote)
_pyangbind_elements = {'lsp_hop_address': lsp_hop_address, 'lsp_hop_strict_hop': lsp_hop_strict_hop, 'lsp_hop_loose_hop': lsp_hop_loose_hop, 'lsp_hop_is_router_id': lsp_hop_is_router_id, 'lsp_hop_has_protection': lsp_hop_has_protection, 'lsp_hop_has_node_protection': lsp_hop_has_node_protection, 'lsp_hop_has_bandwidth_protection': lsp_hop_has_bandwidth_protection, 'lsp_hop_has_protection_in_use': lsp_hop_has_protection_in_use, 'lsp_hop_avoid_node': lsp_hop_avoid_node, 'lsp_hop_avoid_local': lsp_hop_avoid_local, 'lsp_hop_avoid_remote': lsp_hop_avoid_remote, }
| 73.60728
| 566
| 0.767145
|
794b9fc0bd3a2bbf84e50b8326190554d02780f9
| 591
|
py
|
Python
|
maths/fibonacci_sequence_recursion.py
|
jenia90/Python
|
696fb4a681ad9e4d84e0d2b894daf449a3e30b24
|
[
"MIT"
] | 21
|
2020-11-29T11:34:44.000Z
|
2021-06-04T05:50:33.000Z
|
maths/fibonacci_sequence_recursion.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 279
|
2020-02-12T20:51:09.000Z
|
2021-07-20T11:25:19.000Z
|
maths/fibonacci_sequence_recursion.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 12
|
2020-10-03T05:44:19.000Z
|
2022-01-16T05:37:54.000Z
|
# Fibonacci Sequence Using Recursion
def recur_fibo(n: int) -> int:
"""
>>> [recur_fibo(i) for i in range(12)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
"""
return n if n <= 1 else recur_fibo(n - 1) + recur_fibo(n - 2)
def main() -> None:
limit = int(input("How many terms to include in fibonacci series: "))
if limit > 0:
print(f"The first {limit} terms of the fibonacci series are as follows:")
print([recur_fibo(n) for n in range(limit)])
else:
print("Please enter a positive integer: ")
if __name__ == "__main__":
main()
| 25.695652
| 81
| 0.58714
|
794b9ffdcd804e4faf789bf479a655340581cafa
| 1,071
|
py
|
Python
|
udemy/Machine Learning A-Z/Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/svr.py
|
balazssimon/ml-playground
|
c2eba497bebc53e5a03807bdd8873c55f0ec73e1
|
[
"Apache-2.0"
] | null | null | null |
udemy/Machine Learning A-Z/Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/svr.py
|
balazssimon/ml-playground
|
c2eba497bebc53e5a03807bdd8873c55f0ec73e1
|
[
"Apache-2.0"
] | null | null | null |
udemy/Machine Learning A-Z/Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/svr.py
|
balazssimon/ml-playground
|
c2eba497bebc53e5a03807bdd8873c55f0ec73e1
|
[
"Apache-2.0"
] | null | null | null |
# SVR
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values.reshape(-1,1)
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X = sc_X.fit_transform(X)
sc_y = StandardScaler()
y = sc_y.fit_transform(y)
# Fitting SVR to the dataset
from sklearn.svm import SVR
regressor = SVR(kernel='rbf')
regressor.fit(X, y)
# Predicting a new result
y_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(np.array([[6.5]]))))
# Visualising the Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
| 26.775
| 95
| 0.746032
|
794ba053de9a3d0898364888d77f04903e464997
| 15,273
|
py
|
Python
|
trainval_model.py
|
spyflying/Referring-Image-Segmentation-via-Cross-Modal-Progressive-Comprehension
|
094639b8bf00cc169ea7b49cdf9c87fdfc70d963
|
[
"MIT"
] | 52
|
2020-06-17T08:35:36.000Z
|
2022-03-22T08:04:12.000Z
|
trainval_model.py
|
spyflying/Referring-Image-Segmentation-via-Cross-Modal-Progressive-Comprehension
|
094639b8bf00cc169ea7b49cdf9c87fdfc70d963
|
[
"MIT"
] | 6
|
2020-06-09T11:06:23.000Z
|
2022-01-07T05:40:59.000Z
|
trainval_model.py
|
spyflying/Referring-Image-Segmentation-via-Cross-Modal-Progressive-Comprehension
|
094639b8bf00cc169ea7b49cdf9c87fdfc70d963
|
[
"MIT"
] | 15
|
2020-06-23T22:51:59.000Z
|
2021-08-07T10:40:57.000Z
|
from __future__ import division
import sys
import os
import argparse
import tensorflow as tf
import skimage
from skimage import io as sio
import time
# import matplotlib.pyplot as plt
from get_model import get_segmentation_model
from pydensecrf import densecrf
from util import data_reader
from util.processing_tools import *
from util import im_processing, eval_tools, MovingAverage
def train(max_iter, snapshot, dataset, setname, mu, lr, bs, tfmodel_folder,
conv5, model_name, stop_iter, pre_emb=False):
iters_per_log = 100
data_folder = './' + dataset + '/' + setname + '_batch/'
data_prefix = dataset + '_' + setname
snapshot_file = os.path.join(tfmodel_folder, dataset + '_iter_%d.tfmodel')
if not os.path.isdir(tfmodel_folder):
os.makedirs(tfmodel_folder)
cls_loss_avg = 0
avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg = 0, 0, 0
decay = 0.99
vocab_size = 8803 if dataset == 'referit' else 12112
emb_name = 'referit' if dataset == 'referit' else 'Gref'
if pre_emb:
print("Use pretrained Embeddings.")
model = get_segmentation_model(model_name, mode='train',
vocab_size=vocab_size, start_lr=lr,
batch_size=bs, conv5=conv5, emb_name=emb_name)
else:
model = get_segmentation_model(model_name, mode='train',
vocab_size=vocab_size, start_lr=lr,
batch_size=bs, conv5=conv5)
weights = './data/weights/deeplab_resnet_init.ckpt'
print("Loading pretrained weights from {}".format(weights))
load_var = {var.op.name: var for var in tf.global_variables()
if var.name.startswith('res') or var.name.startswith('bn') or var.name.startswith('conv1')}
snapshot_loader = tf.train.Saver(load_var)
snapshot_saver = tf.train.Saver(max_to_keep=4)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
snapshot_loader.restore(sess, weights)
im_h, im_w, num_steps = model.H, model.W, model.num_steps
text_batch = np.zeros((bs, num_steps), dtype=np.float32)
image_batch = np.zeros((bs, im_h, im_w, 3), dtype=np.float32)
mask_batch = np.zeros((bs, im_h, im_w, 1), dtype=np.float32)
valid_idx_batch = np.zeros((bs, 1), dtype=np.int32)
reader = data_reader.DataReader(data_folder, data_prefix)
# for time calculate
last_time = time.time()
time_avg = MovingAverage()
for n_iter in range(max_iter):
for n_batch in range(bs):
batch = reader.read_batch(is_log=(n_batch == 0 and n_iter % iters_per_log == 0))
text = batch['text_batch']
im = batch['im_batch'].astype(np.float32)
mask = np.expand_dims(batch['mask_batch'].astype(np.float32), axis=2)
im = im[:, :, ::-1]
im -= mu
text_batch[n_batch, ...] = text
image_batch[n_batch, ...] = im
mask_batch[n_batch, ...] = mask
for idx in range(text.shape[0]):
if text[idx] != 0:
valid_idx_batch[n_batch, :] = idx
break
_, cls_loss_val, lr_val, scores_val, label_val = sess.run([model.train_step,
model.cls_loss,
model.learning_rate,
model.pred,
model.target],
feed_dict={
model.words: text_batch,
# np.expand_dims(text, axis=0),
model.im: image_batch,
# np.expand_dims(im, axis=0),
model.target_fine: mask_batch,
# np.expand_dims(mask, axis=0)
model.valid_idx: valid_idx_batch
})
cls_loss_avg = decay * cls_loss_avg + (1 - decay) * cls_loss_val
# Accuracy
accuracy_all, accuracy_pos, accuracy_neg = compute_accuracy(scores_val, label_val)
avg_accuracy_all = decay * avg_accuracy_all + (1 - decay) * accuracy_all
avg_accuracy_pos = decay * avg_accuracy_pos + (1 - decay) * accuracy_pos
avg_accuracy_neg = decay * avg_accuracy_neg + (1 - decay) * accuracy_neg
# timing
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
if n_iter % iters_per_log == 0:
print('iter = %d, loss (cur) = %f, loss (avg) = %f, lr = %f'
% (n_iter, cls_loss_val, cls_loss_avg, lr_val))
print('iter = %d, accuracy (cur) = %f (all), %f (pos), %f (neg)'
% (n_iter, accuracy_all, accuracy_pos, accuracy_neg))
print('iter = %d, accuracy (avg) = %f (all), %f (pos), %f (neg)'
% (n_iter, avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg))
time_avg.add(elapsed)
print('iter = %d, cur time = %.5f, avg time = %.5f, model_name: %s' % (n_iter, elapsed, time_avg.get_avg(), model_name))
# Save snapshot
if (n_iter + 1) % snapshot == 0 or (n_iter + 1) >= max_iter:
snapshot_saver.save(sess, snapshot_file % (n_iter + 1))
print('snapshot saved to ' + snapshot_file % (n_iter + 1))
if (n_iter + 1) >= stop_iter:
print('stop training at iter ' + str(stop_iter))
break
print('Optimization done.')
def test(iter, dataset, visualize, setname, dcrf, mu, tfmodel_folder, model_name, pre_emb=False):
data_folder = './' + dataset + '/' + setname + '_batch/'
data_prefix = dataset + '_' + setname
if visualize:
save_dir = './' + dataset + '/visualization/' + str(iter) + '/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
weights = os.path.join(tfmodel_folder, dataset + '_iter_' + str(iter) + '.tfmodel')
print("Loading trained weights from {}".format(weights))
score_thresh = 1e-9
eval_seg_iou_list = [.5, .6, .7, .8, .9]
cum_I, cum_U = 0, 0
mean_IoU, mean_dcrf_IoU = 0, 0
seg_correct = np.zeros(len(eval_seg_iou_list), dtype=np.int32)
if dcrf:
cum_I_dcrf, cum_U_dcrf = 0, 0
seg_correct_dcrf = np.zeros(len(eval_seg_iou_list), dtype=np.int32)
seg_total = 0.
H, W = 320, 320
vocab_size = 8803 if dataset == 'referit' else 12112
emb_name = 'referit' if dataset == 'referit' else 'Gref'
IU_result = list()
if pre_emb:
# use pretrained embbeding
print("Use pretrained Embeddings.")
model = get_segmentation_model(model_name, H=H, W=W,
mode='eval', vocab_size=vocab_size, emb_name=emb_name)
else:
model = get_segmentation_model(model_name, H=H, W=W,
mode='eval', vocab_size=vocab_size)
# Load pretrained model
snapshot_restorer = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
snapshot_restorer.restore(sess, weights)
reader = data_reader.DataReader(data_folder, data_prefix, shuffle=False)
NN = reader.num_batch
for n_iter in range(reader.num_batch):
if n_iter % (NN // 50) == 0:
if n_iter / (NN // 50) % 5 == 0:
sys.stdout.write(str(n_iter / (NN // 50) // 5))
else:
sys.stdout.write('.')
sys.stdout.flush()
batch = reader.read_batch(is_log=False)
text = batch['text_batch']
im = batch['im_batch']
mask = batch['mask_batch'].astype(np.float32)
valid_idx = np.zeros([1], dtype=np.int32)
for idx in range(text.shape[0]):
if text[idx] != 0:
valid_idx[0] = idx
break
proc_im = skimage.img_as_ubyte(im_processing.resize_and_pad(im, H, W))
proc_im_ = proc_im.astype(np.float32)
proc_im_ = proc_im_[:, :, ::-1]
proc_im_ -= mu
scores_val, up_val, sigm_val = sess.run([model.pred, model.up, model.sigm],
feed_dict={
model.words: np.expand_dims(text, axis=0),
model.im: np.expand_dims(proc_im_, axis=0),
model.valid_idx: np.expand_dims(valid_idx, axis=0)
})
# scores_val = np.squeeze(scores_val)
# pred_raw = (scores_val >= score_thresh).astype(np.float32)
up_val = np.squeeze(up_val)
pred_raw = (up_val >= score_thresh).astype(np.float32)
predicts = im_processing.resize_and_crop(pred_raw, mask.shape[0], mask.shape[1])
if dcrf:
# Dense CRF post-processing
sigm_val = np.squeeze(sigm_val)
d = densecrf.DenseCRF2D(W, H, 2)
U = np.expand_dims(-np.log(sigm_val), axis=0)
U_ = np.expand_dims(-np.log(1 - sigm_val), axis=0)
unary = np.concatenate((U_, U), axis=0)
unary = unary.reshape((2, -1))
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3, compat=3)
d.addPairwiseBilateral(sxy=20, srgb=3, rgbim=proc_im, compat=10)
Q = d.inference(5)
pred_raw_dcrf = np.argmax(Q, axis=0).reshape((H, W)).astype(np.float32)
predicts_dcrf = im_processing.resize_and_crop(pred_raw_dcrf, mask.shape[0], mask.shape[1])
if visualize:
sent = batch['sent_batch'][0]
visualize_seg(im, mask, predicts, sent)
if dcrf:
visualize_seg(im, mask, predicts_dcrf, sent)
I, U = eval_tools.compute_mask_IU(predicts, mask)
IU_result.append({'batch_no': n_iter, 'I': I, 'U': U})
mean_IoU += float(I) / U
cum_I += I
cum_U += U
msg = 'cumulative IoU = %f' % (cum_I / cum_U)
for n_eval_iou in range(len(eval_seg_iou_list)):
eval_seg_iou = eval_seg_iou_list[n_eval_iou]
seg_correct[n_eval_iou] += (I / U >= eval_seg_iou)
if dcrf:
I_dcrf, U_dcrf = eval_tools.compute_mask_IU(predicts_dcrf, mask)
mean_dcrf_IoU += float(I_dcrf) / U_dcrf
cum_I_dcrf += I_dcrf
cum_U_dcrf += U_dcrf
msg += '\tcumulative IoU (dcrf) = %f' % (cum_I_dcrf / cum_U_dcrf)
for n_eval_iou in range(len(eval_seg_iou_list)):
eval_seg_iou = eval_seg_iou_list[n_eval_iou]
seg_correct_dcrf[n_eval_iou] += (I_dcrf / U_dcrf >= eval_seg_iou)
# print(msg)
seg_total += 1
# Print results
print('Segmentation evaluation (without DenseCRF):')
result_str = ''
for n_eval_iou in range(len(eval_seg_iou_list)):
result_str += 'precision@%s = %f\n' % \
(str(eval_seg_iou_list[n_eval_iou]), seg_correct[n_eval_iou] / seg_total)
result_str += 'overall IoU = %f; mean IoU = %f\n' % (cum_I / cum_U, mean_IoU / seg_total)
print(result_str)
if dcrf:
print('Segmentation evaluation (with DenseCRF):')
result_str = ''
for n_eval_iou in range(len(eval_seg_iou_list)):
result_str += 'precision@%s = %f\n' % \
(str(eval_seg_iou_list[n_eval_iou]), seg_correct_dcrf[n_eval_iou] / seg_total)
result_str += 'overall IoU = %f; mean IoU = %f\n' % (cum_I_dcrf / cum_U_dcrf, mean_dcrf_IoU / seg_total)
print(result_str)
def visualize_seg(im, mask, predicts, sent):
# print("visualizing")
vis_dir = "./visualize/lgcr_best_c5map/unc/testA"
sent_dir = os.path.join(vis_dir, sent)
if not os.path.exists(sent_dir):
os.makedirs(sent_dir)
# Ignore sio warnings of low-contrast image.
import warnings
warnings.filterwarnings('ignore')
sio.imsave(os.path.join(sent_dir, "im.png"), im)
im_gt = np.zeros_like(im)
im_gt[:, :, 2] = 170
im_gt[:, :, 0] += mask.astype('uint8') * 170
im_gt = im_gt.astype('int16')
im_gt[:, :, 2] += mask.astype('int16') * (-170)
im_gt = im_gt.astype('uint8')
sio.imsave(os.path.join(sent_dir, "gt.png"), im_gt)
im_seg = im / 2
im_seg[:, :, 0] += predicts.astype('uint8') * 100
im_seg = im_seg.astype('uint8')
sio.imsave(os.path.join(sent_dir, "pred.png"), im_seg)
# plt.imshow(im_seg.astype('uint8'))
# plt.title(sent)
# plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-g', type=str, default='0')
parser.add_argument('-i', type=int, default=800000)
parser.add_argument('-s', type=int, default=100000)
parser.add_argument('-st', type=int, default=700000) # stop training when get st iters
parser.add_argument('-m', type=str) # 'train' 'test'
parser.add_argument('-d', type=str, default='referit') # 'Gref' 'unc' 'unc+' 'referit'
parser.add_argument('-t', type=str) # 'train' 'trainval' 'val' 'test' 'testA' 'testB'
parser.add_argument('-f', type=str) # directory to save models
parser.add_argument('-lr', type=float, default=0.00025) # start learning rate
parser.add_argument('-bs', type=int, default=1) # batch size
parser.add_argument('-v', default=False, action='store_true') # visualization
parser.add_argument('-c', default=False, action='store_true') # whether or not apply DenseCRF
parser.add_argument('-emb', default=False, action='store_true') # whether or not use Pretrained Embeddings
parser.add_argument('-n', type=str, default='') # select model
parser.add_argument('-conv5', default=False, action='store_true') # finetune conv layers
args = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = args.g
mu = np.array((104.00698793, 116.66876762, 122.67891434))
if args.m == 'train':
train(max_iter=args.i,
snapshot=args.s,
dataset=args.d,
setname=args.t,
mu=mu,
lr=args.lr,
bs=args.bs,
tfmodel_folder=args.f,
conv5=args.conv5,
model_name=args.n,
stop_iter=args.st,
pre_emb=args.emb)
elif args.m == 'test':
test(iter=args.i,
dataset=args.d,
visualize=args.v,
setname=args.t,
dcrf=args.c,
mu=mu,
tfmodel_folder=args.f,
model_name=args.n,
pre_emb=args.emb)
| 43.266289
| 132
| 0.562103
|
794ba0cf6f5797caf5f4cdcfd039ef2b32fd32ba
| 5,365
|
py
|
Python
|
main.py
|
hernikplays/PythonPrsty
|
b596e49010378ce50847657aac694247a33f5b5b
|
[
"MIT"
] | null | null | null |
main.py
|
hernikplays/PythonPrsty
|
b596e49010378ce50847657aac694247a33f5b5b
|
[
"MIT"
] | 1
|
2021-06-21T18:46:36.000Z
|
2021-06-21T18:46:36.000Z
|
main.py
|
hernikplays/PythonPrsty
|
b596e49010378ce50847657aac694247a33f5b5b
|
[
"MIT"
] | null | null | null |
from time import time
from pynput import keyboard
from colorama import Fore,init,Back
import os
import utils
from sys import stdin
import re
init()
# předdefinujeme proměnné
text = ""
napsano = f"{Fore.GREEN}^{Fore.RESET}" # to, co už uživatel napsal
listener = None # proměnná pro posluchač stisknutí kláves
radek = 0 # řádek, který zrovna píšeme
pismeno = 0 # písmeno, které právě máme psát
slovo = 0 # aktuální psané slovo
ctrl = False # kontrola jestli je stisknutý control
predchozi_napsano = "" # ukladame predchozi radek
soubor = ""
chybna_neopakuj = []
start = 0 # začátek psaní
konec = 0 # konec psaní
chyby = 0 # chyby
def main_menu(): # funkce pro zobrazení hlavního menu
global text,start,chyby,soubor,slovo,radek,pismeno,napsano,predchozi_napsano,chybna_neopakuj
print(f"{Back.WHITE}{Fore.BLACK}Vyberte co chcete dělat:{Back.RESET}{Fore.RESET}")
print("1 - načíst soubor s textem")
if text != "":
print("2 - Začít psat")
else:
print(f"{Fore.RED}2 - Začít psat{Fore.RESET}")
choose = input()
print(choose)
if(choose == "1"):
path = input("Zadejte cestu k souboru s textem\n")
if not path.endswith(".txt"):
print("Program podporuje pouze formát .txt")
else:
soubor = re.findall(r"[a-zA-Z_]+\.txt",path)[-1]
text = utils.load_text(path)
os.system("cls||clear")
if(text == ""):
print(f"{Fore.RED}Při otevírání souboru došlo k chybě{Fore.RESET}\n")
main_menu()
elif(choose == "2" and text == ""):
os.system("cls||clear")
print(f"{Fore.RED}Není načtený žádný text")
main_menu()
elif(choose == "2"):
chyby = 0
chybnaslova = utils.otevri_chyby(soubor)
if(chybnaslova != ""):
text.insert(0,chybnaslova)
napsano = f"{Fore.GREEN}^{Fore.RESET}"
predchozi_napsano = ""
pismeno = 0
radek = 0
slovo = 0
chybna_neopakuj = []
pis()
start = time() # zaznamená čas, kdy začal uživatel psát
listener = keyboard.Listener(on_release=on_key_release,on_press=on_key_press)
listener.start() # spustí posluchač stisků
stdin.read() # umožňuje, aby se program nezavřel
def pis(): # funkce jednoduše pro vypsání řádku a napsaného textu
if radek > 0:
print(text[radek-1])
print(predchozi_napsano)
print(text[radek])
print(napsano)
if radek+1 != len(text):
print(text[radek+1])
def on_key_press(key): # kontroloa pro control
global ctrl
if(key == keyboard.Key.ctrl_l):
ctrl = True
def on_key_release(key): # funkce, která se spustí při puštění klávesy
global napsano,pismeno,radek,text,chyby,ctrl,predchozi_napsano,slovo,soubor,listener,chybna_neopakuj
p = text[radek][pismeno] # aktuálně psané písmeno
s = text[radek].split(" ")[slovo] # aktuálně psané slovo
napsano = napsano.replace(f"{Fore.GREEN}^{Fore.RESET}","")
try:
if(key.vk == 90 and ctrl): # ctrl+z
os.system("cls||clear")
main_menu()
listener.join()
return False
elif(p == key.char): napsano += key.char+f"{Fore.GREEN}^{Fore.RESET}" # pokud se písmeno rovná znaku stisknuté klávesy, vložíme normálně
else:
napsano += f"{Fore.RED}{key.char}{Fore.RESET}"+f"{Fore.GREEN}^{Fore.RESET}" # jinak vložíme červeně
chyby+=1
if s not in chybna_neopakuj:
utils.zapis_chybu(s,soubor)
chybna_neopakuj.append(s)
except AttributeError: # speciální klávesy jako je mezerník nevrací ".char" a vyhodí AttributeError
if(key == keyboard.Key.space): # pokud je klávesa mezerník
slovo+=1
if(p == " "): napsano += " "+f"{Fore.GREEN}^{Fore.RESET}"
else:
napsano += f"{Fore.RED}_{Fore.RESET}"+f"{Fore.GREEN}^{Fore.RESET}"
chyby+=1
elif(key == keyboard.Key.enter and pismeno != 0): # pokud je klávesa enter
if(p == "⤶"): napsano += "\n"
else:
napsano += f"{Fore.RED}⤶{Fore.RESET}"+f"{Fore.GREEN}^{Fore.RESET}"
chyby+=1
elif key == keyboard.Key.ctrl_l:
ctrl = False
else: return # jinak ignorovat
if(pismeno+1 == len(text[radek]) and radek+1 != len(text)): # pokud jsme na konci řádku ale nejsme na konci textu
radek+=1
pismeno = 0
slovo = 0
predchozi_napsano = napsano.replace(f"{Fore.GREEN}^{Fore.RESET}","")
napsano = f"{Fore.GREEN}^{Fore.RESET}"
os.system("cls||clear")
pis()
elif(pismeno+1 == len(text[radek]) and radek+1 == len(text)): # pokud jsme na konci řádku a konci textu
hotovo()
else: # jinak pokračujeme dál po písmenkách
pismeno+=1
os.system("cls||clear")
pis()
def hotovo(): # finální vyhodnocení
global konec
konec = time()
os.system("cls||clear")
print("Úspěšně dopsáno")
print()
print(f"Chybné úhozy: {Fore.RED}{chyby}{Fore.RESET}")
print()
print(f"Průměrná rychlost: {Fore.CYAN}{(utils.delka_textu(text)/(konec-start))*60}{Fore.RESET} úhozů za minutu")
#print(f"\nStiskni {Fore.GREEN}Ctrl+Z{Fore.RESET} pro navrácení do menu")
utils.welcome()
main_menu()
| 36.25
| 144
| 0.604846
|
794ba0d050d52b3480f71a883a903cc804bc0a9d
| 2,706
|
py
|
Python
|
tests/utests/voltha/core/test_logical_device_agent_venet.py
|
jonohart/voltha
|
87314cd53cb4c61e7e62b0ed3fc6da94603cc507
|
[
"Apache-2.0"
] | null | null | null |
tests/utests/voltha/core/test_logical_device_agent_venet.py
|
jonohart/voltha
|
87314cd53cb4c61e7e62b0ed3fc6da94603cc507
|
[
"Apache-2.0"
] | null | null | null |
tests/utests/voltha/core/test_logical_device_agent_venet.py
|
jonohart/voltha
|
87314cd53cb4c61e7e62b0ed3fc6da94603cc507
|
[
"Apache-2.0"
] | null | null | null |
from unittest import main
from mock import Mock
from tests.utests.voltha.core.test_logical_device_agent import \
test_logical_device_agent
from voltha.protos.device_pb2 import Port
class test_logical_device_agent_venet(test_logical_device_agent):
def setUp(self):
#calling the parent class setUp
test_logical_device_agent.setUp(self)
#re-initializing ports to VENET topology
self.ports = {
'olt': [
Port(port_no=0, type=Port.ETHERNET_NNI, device_id='olt'),
Port(port_no=1, type=Port.VENET_OLT, device_id='olt',
peers=[
Port.PeerPort(device_id='onu1', port_no=1),
Port.PeerPort(device_id='onu2', port_no=1)
]
)
],
'onu1': [
Port(port_no=0, type=Port.ETHERNET_UNI, device_id='onu1'),
Port(port_no=1, type=Port.VENET_ONU, device_id='onu1',
peers=[
Port.PeerPort(device_id='olt', port_no=1),
]
)
],
'onu2': [
Port(port_no=0, type=Port.ETHERNET_UNI, device_id='onu2'),
Port(port_no=1, type=Port.VENET_ONU, device_id='onu2',
peers=[
Port.PeerPort(device_id='olt', port_no=1),
]
)
],
}
#resetting root_proxy for VENET topology
self.root_proxy = Mock()
def get_devices(path):
if path == '':
return self.devices.values()
if path.endswith('/ports'):
return self.ports[path[:-len('/ports')]]
elif path.find('/') == -1:
return self.devices[path]
else:
raise Exception(
'Nothing to yield for path /devices/{}'.format(path))
def update_devices(path, data):
if path.endswith('/flows'):
self.device_flows[path[:-len('/flows')]] = data
elif path.endswith('/flow_groups'):
self.device_groups[path[:-len('/flow_groups')]] = data
else:
raise NotImplementedError(
'not handling path /devices/{}'.format(path))
self.root_proxy.get = lambda p: \
get_devices(p[len('/devices/'):]) if p.startswith('/devices') \
else None
self.root_proxy.update = lambda p, d: \
update_devices(p[len('/devices/'):], d) \
if p.startswith('/devices') \
else None
if __name__ == '__main__':
main()
| 38.657143
| 75
| 0.504065
|
794ba2ce1d5dffb3a5e251f92f782bc561a198b9
| 8,622
|
py
|
Python
|
src/problem3.py
|
brammetc/24-Exam3-201920
|
e28d0f6e48928b4d55da568455d757f2fe500118
|
[
"MIT"
] | null | null | null |
src/problem3.py
|
brammetc/24-Exam3-201920
|
e28d0f6e48928b4d55da568455d757f2fe500118
|
[
"MIT"
] | null | null | null |
src/problem3.py
|
brammetc/24-Exam3-201920
|
e28d0f6e48928b4d55da568455d757f2fe500118
|
[
"MIT"
] | null | null | null |
"""
Exam 3, problem 3.
Authors: Vibha Alangar, Aaron Wilkin, David Mutchler, Dave Fisher,
Matt Boutell, Amanda Stouder, their colleagues and
Tanner Brammeier. January 2019.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import testing_helper
import time
def main():
""" Calls the TEST functions in this module. """
run_test_problem3()
def is_prime(n):
"""
What comes in: An integer n.
What goes out:
-- Returns True if the given integer is prime,
False if the given integer is NOT prime.
Treats integers less than 2 as NOT prime.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
-- is_prime(1) returns False
Note: The algorithm used here is simple and clear but slow.
"""
if n < 2:
return False
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def run_test_problem3():
""" Tests the problem3 function. """
print()
print('--------------------------------------------------')
print('Testing the problem3 function:')
print('--------------------------------------------------')
format_string = ' problem3( {} )'
test_results = [0, 0] # Number of tests passed, failed.
number_passed = test_results[0]
number_failed = test_results[1]
# Test 1:
sequence = [4, 5, 6, 7, 8, 9, 10]
expected = 2
copy_of_sequence = [4, 5, 6, 7, 8, 9, 10]
expected_mutation = [4, 8, 6, 10, 8, 9, 10]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem3(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print()
if sequence != expected_mutation:
print('FAILED part of the above test!', color='red')
print('BEFORE the function call, the argument is:')
print(' ', copy_of_sequence, color='red')
print('AFTER the function call, the argument should be:')
print(' ', expected_mutation, color='red')
print('Running your code, the argument after the function call is:')
print(' ', sequence, color='red')
test_results[0] = number_passed
test_results[1] = number_failed + 1
else:
print('PASSED the MUTATION part of the above test -- good!',
color='blue')
number_passed = test_results[0]
number_failed = test_results[1]
# Test 2:
sequence = [10, 11, 20, 21, 30, 31, 40, 41]
expected = 3
copy_of_sequence = [10, 11, 20, 21, 30, 31, 40, 41]
expected_mutation = [10, 14, 20, 21, 30, 34, 40, 44]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem3(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print()
if sequence != expected_mutation:
print('FAILED part of the above test!', color='red')
print('BEFORE the function call, the argument is:')
print(' ', copy_of_sequence, color='red')
print('AFTER the function call, the argument should be:')
print(' ', expected_mutation, color='red')
print('Running your code, the argument after the function call is:')
print(' ', sequence, color='red')
test_results[0] = number_passed
test_results[1] = number_failed + 1
else:
print('PASSED the MUTATION part of the above test -- good!',
color='blue')
number_passed = test_results[0]
number_failed = test_results[1]
# Test 3:
sequence = [14, 16, 18]
expected = 0
copy_of_sequence = [14, 16, 18]
expected_mutation = [14, 16, 18]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem3(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print()
if sequence != expected_mutation:
print('FAILED part of the above test!', color='red')
print('BEFORE the function call, the argument is:')
print(' ', copy_of_sequence, color='red')
print('AFTER the function call, the argument should be:')
print(' ', expected_mutation, color='red')
print('Running your code, the argument after the function call is:')
print(' ', sequence, color='red')
test_results[0] = number_passed
test_results[1] = number_failed + 1
else:
print('PASSED the MUTATION part of the above test -- good!',
color='blue')
# number_passed = test_results[0]
# number_failed = test_results[1]
# SUMMARY of the test results:
print_summary_of_test_results(test_results)
def problem3(sequence):
"""
What comes in:
-- A sequence of integers.
What goes out:
-- The number of integers that are prime.
Side effects:
Replaces each prime number with a number that is 3 greater than it.
Examples:
If sequence = [4, 5, 6, 7, 8, 9, 10]
problem5(sequence)
RETURNS 2
(because 5 and 7 are the only prime numbers in the sequence)
MUTATES sequence so that its new value is [4, 8, 6, 10, 8, 9, 10]
If sequence = [10, 11, 20, 21, 30, 31, 40, 41]
problem5(sequence)
RETURNS 3
(because 11, 31, and 41 are the only prime numbers in the sequence)
MUTATES sequence so that its new value is:
[10, 14, 20, 21, 30, 34, 40, 44]
If sequence = [14, 16, 18]
problem5(sequence)
RETURNS 0 (because no number in the sequence is prime)
and leaves sequence unchanged: [14, 16, 18]
Type hints:
:type sequence: [int]
:rtype: int
"""
# -------------------------------------------------------------------------
# TODO: 3. Implement and test this function.
# Tests have been written for you (above).
# -------------------------------------------------------------------------
total = 0
for k in range(len(sequence)):
if is_prime(sequence[k]) is True:
sequence[k] = int(sequence[k])+3
print(sequence[k])
total = total + 1
else:
total = total + 0
return total
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results,
format_string)
def print_actual_result_of_test(expected, actual, test_results):
testing_helper.print_actual_result_of_test(expected, actual,
test_results)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
| 35.481481
| 79
| 0.563906
|
794ba35b1f559f7983ad2a493f06b797b6f69cab
| 3,377
|
py
|
Python
|
examples/daal4py/decision_forest_classification_default_dense_batch.py
|
yumorozov/scikit-learn-intelex
|
7a39c0a0e208b49f209168b01fb50206f962175f
|
[
"Apache-2.0"
] | 1
|
2021-12-24T16:53:01.000Z
|
2021-12-24T16:53:01.000Z
|
examples/daal4py/decision_forest_classification_default_dense_batch.py
|
yumorozov/scikit-learn-intelex
|
7a39c0a0e208b49f209168b01fb50206f962175f
|
[
"Apache-2.0"
] | null | null | null |
examples/daal4py/decision_forest_classification_default_dense_batch.py
|
yumorozov/scikit-learn-intelex
|
7a39c0a0e208b49f209168b01fb50206f962175f
|
[
"Apache-2.0"
] | null | null | null |
#===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# daal4py Decision Forest Classification example for shared memory systems
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=t)
def main(readcsv=read_csv, method='defaultDense'):
# input data file
infile = "./data/batch/df_classification_train.csv"
testfile = "./data/batch/df_classification_test.csv"
# Configure a training object (5 classes)
train_algo = d4p.decision_forest_classification_training(
5,
method=method,
nTrees=10,
minObservationsInLeafNode=8,
featuresPerNode=3,
engine=d4p.engines_mt19937(seed=777),
varImportance='MDI',
bootstrap=True,
resultsToCompute='computeOutOfBagError'
)
# Read data. Let's use 3 features per observation
data = readcsv(infile, range(3), t=np.float32)
labels = readcsv(infile, range(3, 4), t=np.float32)
train_result = train_algo.compute(data, labels)
# Traiing result provides (depending on parameters) model,
# outOfBagError, outOfBagErrorPerObservation and/or variableImportance
# Now let's do some prediction
predict_algo = d4p.decision_forest_classification_prediction(
nClasses=5,
resultsToEvaluate="computeClassLabels|computeClassProbabilities",
votingMethod="unweighted"
)
# read test data (with same #features)
pdata = readcsv(testfile, range(3), t=np.float32)
plabels = readcsv(testfile, range(3, 4), t=np.float32)
# now predict using the model from the training above
predict_result = predict_algo.compute(pdata, train_result.model)
# Prediction result provides prediction
assert(predict_result.prediction.shape == (pdata.shape[0], 1))
return (train_result, predict_result, plabels)
if __name__ == "__main__":
(train_result, predict_result, plabels) = main()
print("\nVariable importance results:\n", train_result.variableImportance)
print("\nOOB error:\n", train_result.outOfBagError)
print(
"\nDecision forest prediction results (first 10 rows):\n",
predict_result.prediction[0:10]
)
print(
"\nDecision forest probabilities results (first 10 rows):\n",
predict_result.probabilities[0:10]
)
print("\nGround truth (first 10 rows):\n", plabels[0:10])
print('All looks good!')
| 37.10989
| 81
| 0.671898
|
794ba5f1d193fb6a562353e2429e727af42f24af
| 1,270
|
py
|
Python
|
util/io_util.py
|
CxrImagePreProcessing/CheXaid
|
d815afbdfdea63cd3aa9151f1f8a1093b7c02412
|
[
"MIT"
] | 8
|
2020-03-04T22:16:06.000Z
|
2022-02-13T20:04:49.000Z
|
util/io_util.py
|
CxrImagePreProcessing/CheXaid
|
d815afbdfdea63cd3aa9151f1f8a1093b7c02412
|
[
"MIT"
] | 2
|
2020-03-23T21:43:40.000Z
|
2020-05-06T13:17:39.000Z
|
util/io_util.py
|
CxrImagePreProcessing/CheXaid
|
d815afbdfdea63cd3aa9151f1f8a1093b7c02412
|
[
"MIT"
] | 5
|
2020-08-01T01:07:36.000Z
|
2021-12-14T17:28:09.000Z
|
import argparse
from sys import stderr
def args_to_list(csv, allow_empty, arg_type=int, allow_negative=True):
"""Convert comma-separated arguments to a list.
Args:
csv: Comma-separated list of arguments as a string.
allow_empty: If True, allow the list to be empty. Otherwise return None instead of empty list.
arg_type: Argument type in the list.
allow_negative: If True, allow negative inputs.
Returns:
List of arguments, converted to `arg_type`.
"""
arg_vals = [arg_type(d) for d in str(csv).split(',')]
if not allow_negative:
arg_vals = [v for v in arg_vals if v >= 0]
if not allow_empty and len(arg_vals) == 0:
return None
return arg_vals
def print_err(*args, **kwargs):
"""Print a message to stderr."""
print(*args, file=stderr, **kwargs)
def str_to_bool(arg):
"""Convert an argument string into its boolean value.
Args:
arg: String representing a bool.
Returns:
Boolean value for the string.
"""
if arg.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| 28.222222
| 102
| 0.630709
|
794ba622663cd57a0b2963c494f038eb05c9a606
| 2,017
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/input_privacy_value_allow_all.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_privacy_value_allow_all.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_privacy_value_allow_all.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputPrivacyValueAllowAll(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.InputPrivacyRule`.
Details:
- Layer: ``117``
- ID: ``0x184b35ce``
**No parameters required.**
"""
__slots__: List[str] = []
ID = 0x184b35ce
QUALNAME = "types.InputPrivacyValueAllowAll"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputPrivacyValueAllowAll":
# No flags
return InputPrivacyValueAllowAll()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
| 31.515625
| 103
| 0.637581
|
794ba649899fae402387ab592e6fa3cda8cfca62
| 3,381
|
py
|
Python
|
ESMF/src/addon/ESMPy/examples/locstream_grid_regrid.py
|
joeylamcy/gchp
|
0e1676300fc91000ecb43539cabf1f342d718fb3
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-05T16:48:58.000Z
|
2018-07-05T16:48:58.000Z
|
ESMF/src/addon/ESMPy/examples/locstream_grid_regrid.py
|
joeylamcy/gchp
|
0e1676300fc91000ecb43539cabf1f342d718fb3
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | 1
|
2022-03-04T16:12:02.000Z
|
2022-03-04T16:12:02.000Z
|
ESMF/src/addon/ESMPy/examples/locstream_grid_regrid.py
|
joeylamcy/gchp
|
0e1676300fc91000ecb43539cabf1f342d718fb3
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | null | null | null |
# This example demonstrates how to regrid between a LocStream and a Grid.
# The data files can be retrieved from the ESMF data repository by uncommenting the
# following block of code:
#
# import os
# DD = os.path.join(os.getcwd(), "examples/data")
# if not os.path.isdir(DD):
# os.makedirs(DD)
# from ESMF.util.cache_data import cache_data_file
# cache_data_file(os.path.join(DD, "ll1deg_grid.nc"))
import ESMF
import numpy
import ESMF.util.helpers as helpers
import ESMF.api.constants as constants
# This call enables debug logging
ESMF.Manager(debug=True)
grid1 = "examples/data/ll1deg_grid.nc"
grid = ESMF.Grid(filename=grid1, filetype=ESMF.FileFormat.SCRIP)
from ESMF.util.locstream_utilities import create_locstream_spherical_16, create_locstream_spherical_16_parallel
coord_sys=ESMF.CoordSys.SPH_DEG
domask=True
if ESMF.pet_count() == 1:
locstream = create_locstream_spherical_16(coord_sys=coord_sys, domask=domask)
else:
if ESMF.pet_count() is not 4:
raise ValueError("processor count must be 4 or 1 for this example")
else:
locstream = create_locstream_spherical_16_parallel(coord_sys=coord_sys, domask=domask)
# create a field
srcfield = ESMF.Field(locstream, name='srcfield')
dstfield = ESMF.Field(grid, name='dstfield')
xctfield = ESMF.Field(grid, name='xctfield')
# initialize the fields
[x, y] = [0, 1]
deg2rad = 3.14159/180
gridXCoord = locstream["ESMF:Lon"]
gridYCoord = locstream["ESMF:Lat"]
if coord_sys == ESMF.CoordSys.SPH_DEG:
srcfield.data[...] = 10.0 + numpy.cos(gridXCoord * deg2rad) ** 2 + numpy.cos(2 * gridYCoord * deg2rad)
elif coord_sys == ESMF.CoordSys.SPH_RAD:
srcfield.data[...] = 10.0 + numpy.cos(gridXCoord) ** 2 + numpy.cos(2 * gridYCoord)
else:
raise ValueError("coordsys value does not apply in this example")
gridXCoord = xctfield.grid.get_coords(x)
gridYCoord = xctfield.grid.get_coords(y)
xctfield.data[...] = 10.0 + numpy.cos(gridXCoord * deg2rad) ** 2 + numpy.cos(2 * gridYCoord * deg2rad)
dstfield.data[...] = 1e20
# create an object to regrid data from the source to the destination field
mask_values=None
if domask:
mask_values=numpy.array([0])
regrid = ESMF.Regrid(srcfield, dstfield,
regrid_method=ESMF.RegridMethod.NEAREST_DTOS,
unmapped_action=ESMF.UnmappedAction.ERROR,
src_mask_values=mask_values)
# do the regridding from source to destination field
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
# compute the mean relative error
from operator import mul
num_nodes = numpy.prod(xctfield.data.shape[:])
relerr = 0
meanrelerr = 0
dstfield = numpy.ravel(dstfield.data)
xctfield = numpy.ravel(xctfield.data)
if num_nodes is not 0:
ind = numpy.where((dstfield != 1e20) & (xctfield != 0))[0]
relerr = numpy.sum(numpy.abs(dstfield[ind] - xctfield[ind]) / numpy.abs(xctfield[ind]))
meanrelerr = relerr / num_nodes
# handle the parallel case
if ESMF.pet_count() > 1:
relerr = helpers.reduce_val(relerr, op=constants.Reduce.SUM)
num_nodes = helpers.reduce_val(num_nodes, op=constants.Reduce.SUM)
# output the results from one processor only
if ESMF.local_pet() is 0:
meanrelerr = relerr / num_nodes
print ("ESMPy LocStream Grid Regridding Example")
print (" interpolation mean relative error = {0}".format(meanrelerr))
assert (meanrelerr < 9e-5)
| 33.81
| 111
| 0.729666
|
794ba66dfc96bb5b12403590690562f4dd0e65ee
| 24,831
|
py
|
Python
|
rdr_service/lib_fhir/fhirclient_1_0_6/models/valueset.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
fhirclient/models/valueset.py
|
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
|
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
|
[
"Apache-2.0"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/lib_fhir/fhirclient_1_0_6/models/valueset.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/ValueSet) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class ValueSet(domainresource.DomainResource):
""" A set of codes drawn from one or more code systems.
A value set specifies a set of codes drawn from one or more code systems.
"""
resource_name = "ValueSet"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.codeSystem = None
""" An inline code system, which is part of this value set.
Type `ValueSetCodeSystem` (represented as `dict` in JSON). """
self.compose = None
""" When value set includes codes from elsewhere.
Type `ValueSetCompose` (represented as `dict` in JSON). """
self.contact = None
""" Contact details of the publisher.
List of `ValueSetContact` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date for given status.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Human language description of the value set.
Type `str`. """
self.expansion = None
""" Used when the value set is "expanded".
Type `ValueSetExpansion` (represented as `dict` in JSON). """
self.experimental = None
""" If for testing purposes, not real usage.
Type `bool`. """
self.extensible = None
""" Whether this is intended to be used with an extensible binding.
Type `bool`. """
self.identifier = None
""" Additional identifier for the value set (e.g. HL7 v2 / CDA).
Type `Identifier` (represented as `dict` in JSON). """
self.immutable = None
""" Indicates whether or not any change to the content logical
definition may occur.
Type `bool`. """
self.lockedDate = None
""" Fixed date for all referenced code systems and value sets.
Type `FHIRDate` (represented as `str` in JSON). """
self.name = None
""" Informal name for this value set.
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.requirements = None
""" Why needed.
Type `str`. """
self.status = None
""" draft | active | retired.
Type `str`. """
self.url = None
""" Globally unique logical identifier for value set.
Type `str`. """
self.useContext = None
""" Content intends to support these contexts.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.version = None
""" Logical identifier for this version of the value set.
Type `str`. """
super(ValueSet, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSet, self).elementProperties()
js.extend([
("codeSystem", "codeSystem", ValueSetCodeSystem, False, None, False),
("compose", "compose", ValueSetCompose, False, None, False),
("contact", "contact", ValueSetContact, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("expansion", "expansion", ValueSetExpansion, False, None, False),
("experimental", "experimental", bool, False, None, False),
("extensible", "extensible", bool, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("immutable", "immutable", bool, False, None, False),
("lockedDate", "lockedDate", fhirdate.FHIRDate, False, None, False),
("name", "name", str, False, None, False),
("publisher", "publisher", str, False, None, False),
("requirements", "requirements", str, False, None, False),
("status", "status", str, False, None, True),
("url", "url", str, False, None, False),
("useContext", "useContext", codeableconcept.CodeableConcept, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class ValueSetCodeSystem(backboneelement.BackboneElement):
""" An inline code system, which is part of this value set.
A definition of a code system, inlined into the value set (as a packaging
convenience). Note that the inline code system may be used from other value
sets by referring to its (codeSystem.system) directly.
"""
resource_name = "ValueSetCodeSystem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.caseSensitive = None
""" If code comparison is case sensitive.
Type `bool`. """
self.concept = None
""" Concepts in the code system.
List of `ValueSetCodeSystemConcept` items (represented as `dict` in JSON). """
self.system = None
""" URI to identify the code system (e.g. in Coding.system).
Type `str`. """
self.version = None
""" Version (for use in Coding.version).
Type `str`. """
super(ValueSetCodeSystem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetCodeSystem, self).elementProperties()
js.extend([
("caseSensitive", "caseSensitive", bool, False, None, False),
("concept", "concept", ValueSetCodeSystemConcept, True, None, True),
("system", "system", str, False, None, True),
("version", "version", str, False, None, False),
])
return js
class ValueSetCodeSystemConcept(backboneelement.BackboneElement):
""" Concepts in the code system.
Concepts that are in the code system. The concept definitions are
inherently hierarchical, but the definitions must be consulted to determine
what the meaning of the hierarchical relationships are.
"""
resource_name = "ValueSetCodeSystemConcept"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.abstract = None
""" If this code is not for use as a real concept.
Type `bool`. """
self.code = None
""" Code that identifies concept.
Type `str`. """
self.concept = None
""" Child Concepts (is-a/contains/categorizes).
List of `ValueSetCodeSystemConcept` items (represented as `dict` in JSON). """
self.definition = None
""" Formal definition.
Type `str`. """
self.designation = None
""" Additional representations for the concept.
List of `ValueSetCodeSystemConceptDesignation` items (represented as `dict` in JSON). """
self.display = None
""" Text to display to the user.
Type `str`. """
super(ValueSetCodeSystemConcept, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetCodeSystemConcept, self).elementProperties()
js.extend([
("abstract", "abstract", bool, False, None, False),
("code", "code", str, False, None, True),
("concept", "concept", ValueSetCodeSystemConcept, True, None, False),
("definition", "definition", str, False, None, False),
("designation", "designation", ValueSetCodeSystemConceptDesignation, True, None, False),
("display", "display", str, False, None, False),
])
return js
class ValueSetCodeSystemConceptDesignation(backboneelement.BackboneElement):
""" Additional representations for the concept.
Additional representations for the concept - other languages, aliases,
specialized purposes, used for particular purposes, etc.
"""
resource_name = "ValueSetCodeSystemConceptDesignation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.language = None
""" Human language of the designation.
Type `str`. """
self.use = None
""" Details how this designation would be used.
Type `Coding` (represented as `dict` in JSON). """
self.value = None
""" The text value for this designation.
Type `str`. """
super(ValueSetCodeSystemConceptDesignation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetCodeSystemConceptDesignation, self).elementProperties()
js.extend([
("language", "language", str, False, None, False),
("use", "use", coding.Coding, False, None, False),
("value", "value", str, False, None, True),
])
return js
class ValueSetCompose(backboneelement.BackboneElement):
""" When value set includes codes from elsewhere.
A set of criteria that provide the content logical definition of the value
set by including or excluding codes from outside this value set.
"""
resource_name = "ValueSetCompose"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.exclude = None
""" Explicitly exclude codes.
List of `ValueSetComposeInclude` items (represented as `dict` in JSON). """
self.import_fhir = None
""" Import the contents of another value set.
List of `str` items. """
self.include = None
""" Include one or more codes from a code system.
List of `ValueSetComposeInclude` items (represented as `dict` in JSON). """
super(ValueSetCompose, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetCompose, self).elementProperties()
js.extend([
("exclude", "exclude", ValueSetComposeInclude, True, None, False),
("import_fhir", "import", str, True, None, False),
("include", "include", ValueSetComposeInclude, True, None, False),
])
return js
class ValueSetComposeInclude(backboneelement.BackboneElement):
""" Include one or more codes from a code system.
"""
resource_name = "ValueSetComposeInclude"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.concept = None
""" A concept defined in the system.
List of `ValueSetComposeIncludeConcept` items (represented as `dict` in JSON). """
self.filter = None
""" Select codes/concepts by their properties (including relationships).
List of `ValueSetComposeIncludeFilter` items (represented as `dict` in JSON). """
self.system = None
""" The system the codes come from.
Type `str`. """
self.version = None
""" Specific version of the code system referred to.
Type `str`. """
super(ValueSetComposeInclude, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetComposeInclude, self).elementProperties()
js.extend([
("concept", "concept", ValueSetComposeIncludeConcept, True, None, False),
("filter", "filter", ValueSetComposeIncludeFilter, True, None, False),
("system", "system", str, False, None, True),
("version", "version", str, False, None, False),
])
return js
class ValueSetComposeIncludeConcept(backboneelement.BackboneElement):
""" A concept defined in the system.
Specifies a concept to be included or excluded.
"""
resource_name = "ValueSetComposeIncludeConcept"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Code or expression from system.
Type `str`. """
self.designation = None
""" Additional representations for this valueset.
List of `ValueSetCodeSystemConceptDesignation` items (represented as `dict` in JSON). """
self.display = None
""" Test to display for this code for this value set.
Type `str`. """
super(ValueSetComposeIncludeConcept, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetComposeIncludeConcept, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("designation", "designation", ValueSetCodeSystemConceptDesignation, True, None, False),
("display", "display", str, False, None, False),
])
return js
class ValueSetComposeIncludeFilter(backboneelement.BackboneElement):
""" Select codes/concepts by their properties (including relationships).
Select concepts by specify a matching criteria based on the properties
(including relationships) defined by the system. If multiple filters are
specified, they SHALL all be true.
"""
resource_name = "ValueSetComposeIncludeFilter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.op = None
""" = | is-a | is-not-a | regex | in | not-in.
Type `str`. """
self.property = None
""" A property defined by the code system.
Type `str`. """
self.value = None
""" Code from the system, or regex criteria.
Type `str`. """
super(ValueSetComposeIncludeFilter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetComposeIncludeFilter, self).elementProperties()
js.extend([
("op", "op", str, False, None, True),
("property", "property", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class ValueSetContact(backboneelement.BackboneElement):
""" Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_name = "ValueSetContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of an individual to contact.
Type `str`. """
self.telecom = None
""" Contact details for individual or publisher.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(ValueSetContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetContact, self).elementProperties()
js.extend([
("name", "name", str, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
class ValueSetExpansion(backboneelement.BackboneElement):
""" Used when the value set is "expanded".
A value set can also be "expanded", where the value set is turned into a
simple collection of enumerated codes. This element holds the expansion, if
it has been performed.
"""
resource_name = "ValueSetExpansion"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contains = None
""" Codes in the value set.
List of `ValueSetExpansionContains` items (represented as `dict` in JSON). """
self.identifier = None
""" Uniquely identifies this expansion.
Type `str`. """
self.offset = None
""" Offset at which this resource starts.
Type `int`. """
self.parameter = None
""" Parameter that controlled the expansion process.
List of `ValueSetExpansionParameter` items (represented as `dict` in JSON). """
self.timestamp = None
""" Time ValueSet expansion happened.
Type `FHIRDate` (represented as `str` in JSON). """
self.total = None
""" Total number of codes in the expansion.
Type `int`. """
super(ValueSetExpansion, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetExpansion, self).elementProperties()
js.extend([
("contains", "contains", ValueSetExpansionContains, True, None, False),
("identifier", "identifier", str, False, None, True),
("offset", "offset", int, False, None, False),
("parameter", "parameter", ValueSetExpansionParameter, True, None, False),
("timestamp", "timestamp", fhirdate.FHIRDate, False, None, True),
("total", "total", int, False, None, False),
])
return js
class ValueSetExpansionContains(backboneelement.BackboneElement):
""" Codes in the value set.
The codes that are contained in the value set expansion.
"""
resource_name = "ValueSetExpansionContains"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.abstract = None
""" If user cannot select this entry.
Type `bool`. """
self.code = None
""" Code - if blank, this is not a selectable code.
Type `str`. """
self.contains = None
""" Codes contained under this entry.
List of `ValueSetExpansionContains` items (represented as `dict` in JSON). """
self.display = None
""" User display for the concept.
Type `str`. """
self.system = None
""" System value for the code.
Type `str`. """
self.version = None
""" Version in which this code/display is defined.
Type `str`. """
super(ValueSetExpansionContains, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetExpansionContains, self).elementProperties()
js.extend([
("abstract", "abstract", bool, False, None, False),
("code", "code", str, False, None, False),
("contains", "contains", ValueSetExpansionContains, True, None, False),
("display", "display", str, False, None, False),
("system", "system", str, False, None, False),
("version", "version", str, False, None, False),
])
return js
class ValueSetExpansionParameter(backboneelement.BackboneElement):
""" Parameter that controlled the expansion process.
A parameter that controlled the expansion process. These parameters may be
used by users of expanded value sets to check whether the expansion is
suitable for a particular purpose, or to pick the correct expansion.
"""
resource_name = "ValueSetExpansionParameter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name as assigned by the server.
Type `str`. """
self.valueBoolean = None
""" Value of the named parameter.
Type `bool`. """
self.valueCode = None
""" Value of the named parameter.
Type `str`. """
self.valueDecimal = None
""" Value of the named parameter.
Type `float`. """
self.valueInteger = None
""" Value of the named parameter.
Type `int`. """
self.valueString = None
""" Value of the named parameter.
Type `str`. """
self.valueUri = None
""" Value of the named parameter.
Type `str`. """
super(ValueSetExpansionParameter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetExpansionParameter, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("valueBoolean", "valueBoolean", bool, False, "value", False),
("valueCode", "valueCode", str, False, "value", False),
("valueDecimal", "valueDecimal", float, False, "value", False),
("valueInteger", "valueInteger", int, False, "value", False),
("valueString", "valueString", str, False, "value", False),
("valueUri", "valueUri", str, False, "value", False),
])
return js
from . import codeableconcept
from . import coding
from . import contactpoint
from . import fhirdate
from . import identifier
| 37.737082
| 100
| 0.603278
|
794ba6b6de142250827338e7e52d2155b49a9b50
| 551
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_payumoney/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_payumoney/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_payumoney/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'PayuMoney Payment Acquirer',
'category': 'Payment Acquirer',
'summary': 'Payment Acquirer: PayuMoney Implementation',
'description': """
PayuMoney Payment Acquirer for India.
PayUmoney payment gateway supports only INR currency.
""",
'depends': ['payment'],
'data': [
'views/payment_views.xml',
'views/payment_payumoney_templates.xml',
'data/payment_acquirer_data.xml',
],
}
| 27.55
| 74
| 0.646098
|
794ba7b196c510cffc7a15136925ecc6104fed5b
| 2,051
|
py
|
Python
|
rack/paths.py
|
tkaneko0204/rack
|
2b459e0c870ad2cb936b458e7597bd6ebb9f031e
|
[
"Apache-2.0"
] | null | null | null |
rack/paths.py
|
tkaneko0204/rack
|
2b459e0c870ad2cb936b458e7597bd6ebb9f031e
|
[
"Apache-2.0"
] | null | null | null |
rack/paths.py
|
tkaneko0204/rack
|
2b459e0c870ad2cb936b458e7597bd6ebb9f031e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from oslo.config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the rack python module is installed'),
cfg.StrOpt('bindir',
default=os.path.join(sys.prefix, 'local', 'bin'),
help='Directory where rack binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining rack's state"),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
| 31.553846
| 78
| 0.658703
|
794ba86c86c412b9d3c52178607f8b63dd5d1a4b
| 35,835
|
py
|
Python
|
src/k8s-extension/azext_k8s_extension/vendored_sdks/operations/_extensions_operations.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/k8s-extension/azext_k8s_extension/vendored_sdks/operations/_extensions_operations.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/k8s-extension/azext_k8s_extension/vendored_sdks/operations/_extensions_operations.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExtensionsOperations(object):
"""ExtensionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
extension, # type: "_models.Extension"
**kwargs # type: Any
):
# type: (...) -> "_models.Extension"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Extension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(extension, 'Extension')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Extension', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Extension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
extension, # type: "_models.Extension"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Extension"]
"""Create a new Kubernetes Cluster Extension.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param extension_name: Name of the Extension.
:type extension_name: str
:param extension: Properties necessary to Create an Extension.
:type extension: ~azure.mgmt.kubernetesconfiguration.models.Extension
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Extension or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.models.Extension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Extension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
extension_name=extension_name,
extension=extension,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Extension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Extension"
"""Gets Kubernetes Cluster Extension.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param extension_name: Name of the Extension.
:type extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Extension, or the result of cls(response)
:rtype: ~azure.mgmt.kubernetesconfiguration.models.Extension
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Extension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Extension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
force_delete=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force_delete is not None:
query_parameters['forceDelete'] = self._serialize.query("force_delete", force_delete, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
force_delete=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete a Kubernetes Cluster Extension. This will cause the Agent to Uninstall the extension
from the cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param extension_name: Name of the Extension.
:type extension_name: str
:param force_delete: Delete the extension resource in Azure - not the normal asynchronous
delete.
:type force_delete: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
extension_name=extension_name,
force_delete=force_delete,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
patch_extension, # type: "_models.PatchExtension"
**kwargs # type: Any
):
# type: (...) -> "_models.Extension"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Extension"]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: lambda response: ResourceExistsError(response=response, model=self._deserialize(_models.ErrorResponse, response), error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(patch_extension, 'PatchExtension')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Extension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
extension_name, # type: str
patch_extension, # type: "_models.PatchExtension"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Extension"]
"""Patch an existing Kubernetes Cluster Extension.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param extension_name: Name of the Extension.
:type extension_name: str
:param patch_extension: Properties to Patch in an existing Extension.
:type patch_extension: ~azure.mgmt.kubernetesconfiguration.models.PatchExtension
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Extension or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.models.Extension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Extension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
extension_name=extension_name,
patch_extension=patch_extension,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Extension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
cluster_rp, # type: Union[str, "_models.Enum0"]
cluster_resource_name, # type: Union[str, "_models.Enum1"]
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExtensionsList"]
"""List all Extensions in the cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtensionsList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.models.ExtensionsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtensionsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExtensionsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions'} # type: ignore
| 54.460486
| 253
| 0.672248
|
794ba8955ef9ebb51572be3471879e3708bcefdc
| 1,888
|
py
|
Python
|
setup.py
|
ryukinix/rentalcar
|
fbcba57ae87ed9252e1ea6cb4ad401c176785e5b
|
[
"Apache-2.0"
] | 4
|
2017-12-13T18:02:54.000Z
|
2021-10-29T20:19:00.000Z
|
setup.py
|
ryukinix/rentalcar
|
fbcba57ae87ed9252e1ea6cb4ad401c176785e5b
|
[
"Apache-2.0"
] | 1
|
2017-12-13T18:28:57.000Z
|
2019-01-03T05:37:20.000Z
|
setup.py
|
ryukinix/rentalcar
|
fbcba57ae87ed9252e1ea6cb4ad401c176785e5b
|
[
"Apache-2.0"
] | 2
|
2019-05-07T20:01:09.000Z
|
2019-11-16T22:08:05.000Z
|
#!/usr/bin/env python
# coding=utf-8
#
# Python Script
#
# Copyright © Manoel Vilela
#
#
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
import project
here = path.abspath(path.dirname(__file__))
readme = path.join(here, 'README.md')
with open(readme, encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt') as f:
install_requires = list(map(str.strip, f.readlines()))
setup(
name=project.__name__,
version=project.__version__,
description="A software for managing the business of car rentals using PyQt5",
long_description=long_description,
classifiers=[
"Environment :: Console",
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Operating System :: Unix",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='project animation decorators decorator',
author=project.__author__,
author_email=project.__email__,
url=project.__url__,
download_url="{u}/archive/v{v}.tar.gz".format(u=project.__url__,
v=project.__version__),
zip_safe=False,
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples',
'tests', 'docs', '__pycache__']),
platforms='unix',
install_requires=install_requires,
extras_require={
"Requires-Dist": ["pypandoc"]
},
entry_points={ # no entry-points yet
# 'console_scripts': [
# 'project = project.cli:main'
# ]
},
)
| 29.046154
| 82
| 0.623411
|
794ba94b070335497086b6c31e7aade02d9f77fb
| 1,502
|
py
|
Python
|
setup.py
|
afatt/effmass
|
dac39d404f2417ba0618c8b2183363698ffee69b
|
[
"MIT"
] | 1
|
2021-09-02T03:20:31.000Z
|
2021-09-02T03:20:31.000Z
|
setup.py
|
afatt/effmass
|
dac39d404f2417ba0618c8b2183363698ffee69b
|
[
"MIT"
] | null | null | null |
setup.py
|
afatt/effmass
|
dac39d404f2417ba0618c8b2183363698ffee69b
|
[
"MIT"
] | 1
|
2021-11-29T07:48:16.000Z
|
2021-11-29T07:48:16.000Z
|
import os
from effmass import __version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
long_description = open('README.md').read()
config = {
'description': 'An effective mass package',
'long_description': long_description,
'long_description_content_type': 'text/markdown',
'author': 'Lucy Whalley',
'author_email': 'l.whalley@northumbria.ac.uk',
'url': 'https://github.com/lucydot/effmass',
'download_url': "https://github.com/lucydot/effmass/archive/%s.tar.gz" % (__version__),
'version': __version__,
'install_requires': [ 'vasppy>=0.5.0.0',
'scipy',
'numpy',
'matplotlib',
'adjustText',
'ase>=3.21.1',
'octopuspy>=1.0.2',
'questionary>=1.9.0',
'prettytable>=2.1.0'],
'extras_require': {
"docs": [
"sphinx >=3.2.1",
"sphinx_rtd_theme>=0.5.0",
],
"tests": [
"pytest",
"pytest-lazy-fixture",
"code-climate-test-reporter",
"coverage==4.3.4"
],
"dev": ["black"],
},
'python_requires': '>=3.6',
'license': 'MIT',
'packages': [ 'effmass' ],
'scripts': [],
'name': 'effmass',
'entry_points': {"console_scripts": ["effmass = effmass.cli:cli"]}
}
setup(**config)
| 27.309091
| 91
| 0.553262
|
794baa155dfb849b538cb74f838d14d40d2ba0a4
| 2,442
|
py
|
Python
|
DRIVE/perception/infers/segmention_infer.py
|
rohit9934/DRIVE-Digital-Retinal-Images-for-Vessel-Extraction
|
8ed44675500bdd60b2ba58cdac79b6902fbe4f99
|
[
"MIT"
] | 24
|
2019-07-18T02:12:54.000Z
|
2022-01-20T14:49:31.000Z
|
DRIVE/perception/infers/segmention_infer.py
|
LeadingIndiaAI/Digital-Retinal-Images-for-Vessel-Extraction
|
8ed44675500bdd60b2ba58cdac79b6902fbe4f99
|
[
"MIT"
] | 2
|
2019-11-14T08:57:49.000Z
|
2021-05-16T00:28:20.000Z
|
DRIVE/perception/infers/segmention_infer.py
|
Arvin117/DRIVE-Digital-Retinal-Images-for-Vessel-Extraction
|
8ed44675500bdd60b2ba58cdac79b6902fbe4f99
|
[
"MIT"
] | 5
|
2020-04-28T16:18:45.000Z
|
2022-03-21T01:35:37.000Z
|
# -- coding: utf-8 --
"""
Copyright (c) 2018. All rights reserved.
Created by Rohit Sharma, Abdul Mugeesh and Kanishk Nama..
"""
#The purpose of the Segmentation Infer file is to help testing process and show How to predict.
#Importing all libraries...
import glob,cv2,numpy as np
import matplotlib.pyplot as plt
from keras.models import model_from_json
from perception.bases.infer_base import InferBase
from configs.utils.img_utils import get_test_patches,pred_to_patches,recompone_overlap
from configs.utils.utils import visualize,gray2binary
class SegmentionInfer(InferBase):
def __init__(self,config):
super(SegmentionInfer, self).__init__(config)
self.load_model()
#The prediction is done by checking the best weights from experiments/VesselNet/hdf5 folder.
def load_model(self):
self.model = model_from_json(open(self.config.hdf5_path+self.config.exp_name + '_architecture.json').read())
self.model.load_weights(self.config.hdf5_path+self.config.exp_name+ '_best_weights.h5')
#Taking
def analyze_name(self,path):
return (path.split('\\')[-1]).split(".")[0]
#Prediction is done on patches. We are predicting the images and then converting it into gray scale to yield fast processing.
#After that, visualisation is done.
def predict(self):
predList=glob.glob(self.config.test_img_path+"*."+self.config.test_datatype)
for path in predList:
orgImg_temp=plt.imread(path)
orgImg=orgImg_temp[:,:,1]*0.75+orgImg_temp[:,:,0]*0.25
print("[Info] Analyze filename...",self.analyze_name(path))
height,width=orgImg.shape[:2]
orgImg = np.reshape(orgImg, (height,width,1))
patches_pred,new_height,new_width,adjustImg=get_test_patches(orgImg,self.config)
predictions = self.model.predict(patches_pred, batch_size=32, verbose=1)
pred_patches=pred_to_patches(predictions,self.config)
pred_imgs=recompone_overlap(pred_patches,self.config,new_height,new_width)
pred_imgs=pred_imgs[:,0:height,0:width,:]
adjustImg=adjustImg[0,0:height,0:width,:]
print(adjustImg.shape)
probResult=pred_imgs[0,:,:,0]
binaryResult=gray2binary(probResult)
resultMerge=visualize([adjustImg,binaryResult],[1,2])
resultMerge=cv2.cvtColor(resultMerge,cv2.COLOR_RGB2BGR)
cv2.imwrite(self.config.test_result_path+self.analyze_name(path)+"_merge.jpg",resultMerge)
cv2.imwrite(self.config.test_result_path + self.analyze_name(path) + "_prob.bmp", (probResult*255).astype(np.uint8))
| 38.761905
| 125
| 0.765356
|
794baa9cbf2c40184c88765f9fe39e838d5e15ba
| 76
|
py
|
Python
|
elastipy/plot/__init__.py
|
defgsus/elastipy
|
c1144ab39fa70571ba0e02ccf41d380a8a1bd730
|
[
"Apache-2.0"
] | 1
|
2021-02-17T17:50:28.000Z
|
2021-02-17T17:50:28.000Z
|
elastipy/plot/__init__.py
|
defgsus/elastipy
|
c1144ab39fa70571ba0e02ccf41d380a8a1bd730
|
[
"Apache-2.0"
] | 2
|
2021-03-29T02:09:41.000Z
|
2022-03-01T20:09:48.000Z
|
elastipy/plot/__init__.py
|
netzkolchose/elastipy
|
c1144ab39fa70571ba0e02ccf41d380a8a1bd730
|
[
"Apache-2.0"
] | null | null | null |
from .backend import get_backend, set_backend
from .heatmap_ import heatmap
| 25.333333
| 45
| 0.842105
|
794bab652e939ea003869cbbd2334c56bc026ea6
| 2,320
|
py
|
Python
|
academicstoday_project/student/tests/test_announcements.py
|
LeeDoona/EasyGrading
|
8a3b7a95e328a5b710bd98934dcde7556731bd72
|
[
"Apache-2.0"
] | 146
|
2017-02-04T11:14:50.000Z
|
2021-12-30T20:54:50.000Z
|
academicstoday_project/student/tests/test_announcements.py
|
LeeDoona/EasyGrading
|
8a3b7a95e328a5b710bd98934dcde7556731bd72
|
[
"Apache-2.0"
] | 139
|
2015-02-21T21:40:34.000Z
|
2016-02-20T13:34:25.000Z
|
academicstoday_project/student/tests/test_announcements.py
|
topsit143/acda
|
c2a20ffd1dcf8668d1fe401d114d32d9e686f1fd
|
[
"Apache-2.0"
] | 88
|
2017-01-20T20:32:44.000Z
|
2022-02-07T05:32:44.000Z
|
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import json
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Announcement
from student.views import announcement
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "password"
class AnnouncementTestCase(TestCase):
def tearDown(self):
Announcement.objects.all().delete()
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.get(email=TEST_USER_EMAIL).delete()
def setUp(self):
# Create our user.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
# Create a test course
course = Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
# Create our announcement(s)
Announcement.objects.create(
course=course,
title='Hello world!',
body='This is the body of the message.',
)
def test_url_resolves_to_announcements_page_view(self):
found = resolve('/course/1/announcements')
self.assertEqual(found.func, announcement.announcements_page)
def test_announcements_page_returns_correct_html(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
response = client.post('/course/1/announcements')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'Hello world!',response.content)
self.assertIn(b'This is the body of the message.',response.content)
| 32.222222
| 75
| 0.676293
|
794babb6067cbf2502ef6a75292a3ae8d3bd4cce
| 839
|
py
|
Python
|
Repositories/Raspbian/cv07.py
|
Dong-gi/Dong-gi.github.io
|
2c3d083db72e06032a1daf528ee9b175219aa554
|
[
"MIT"
] | 5
|
2018-02-27T16:19:35.000Z
|
2020-08-25T13:09:49.000Z
|
Repositories/Raspbian/cv07.py
|
Dong-gi/Dong-gi.github.io
|
2c3d083db72e06032a1daf528ee9b175219aa554
|
[
"MIT"
] | 25
|
2019-03-28T00:36:04.000Z
|
2021-08-12T01:42:41.000Z
|
Repositories/Raspbian/cv07.py
|
Dong-gi/Dong-gi.github.io
|
2c3d083db72e06032a1daf528ee9b175219aa554
|
[
"MIT"
] | 1
|
2021-11-28T11:28:29.000Z
|
2021-11-28T11:28:29.000Z
|
import numpy as np
import cv2
img1 = cv2.imread('start05.jpg')
img2 = cv2.imread('start05-2.jpg')
print(cv2.add(np.uint8([250]), np.uint8([20])))
# dst = a*img1 + b*img2 + c
cv2.imshow('Image Blending', cv2.addWeighted(img1, 0.3, img2, 0.7, 0))
cv2.waitKey(0)
# put logo on top-left
logo = cv2.imread('logo.jpg', cv2.IMREAD_COLOR)
rows, cols, channels = logo.shape
roi = img2[0:rows, 0:cols]
# create a mask of logo
logoGray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(logoGray, 200, 255, cv2.THRESH_BINARY)
maskInv = cv2.bitwise_not(mask)
# black-out the area of logo in ROI
img2Back = cv2.bitwise_and(roi, roi, mask = mask)
# pure logo
logoFore = cv2.bitwise_and(logo, logo, mask = maskInv)
# put logo
dst = cv2.add(img2Back, logoFore)
img2[0:rows, 0:cols] = dst
cv2.imshow('result', img2)
cv2.waitKey(0)
| 23.971429
| 70
| 0.702026
|
794babbf29d54296e11c9414429982436ffba2d2
| 5,886
|
py
|
Python
|
pysurf/logger.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 7
|
2020-10-28T13:46:08.000Z
|
2021-05-27T06:41:56.000Z
|
pysurf/logger.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 2
|
2020-10-27T19:15:12.000Z
|
2020-10-27T19:15:25.000Z
|
pysurf/logger.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 2
|
2021-04-15T05:54:30.000Z
|
2022-02-08T00:10:10.000Z
|
from datetime import datetime
from functools import partial
#
from .utils.context_utils import DoOnException
from .utils.context_utils import ExitOnException
from .utils.context_utils import BaseContextDecorator
__all__ = ["get_logger", "Logger"]
def get_logger(filename, name, sublogger=None, mode="w"):
"""initialize a new logger"""
fhandle = Logger.get_fhandle(filename, mode)
return Logger(name, fhandle, sublogger)
class LogBlock(BaseContextDecorator):
def __init__(self, logger, txt=None):
self.txt = txt
self.logger = logger
def set_text(self, txt):
self.txt = txt
def __enter__(self):
self.logger.info(f"\nEnter '{self.txt}' at: "
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
def __exit__(self, exception_type, exception_value, traceback):
self.logger.info(f"Leave '{self.txt}' at: "
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
class _LoggerBase(object):
def __init__(self, name, fhandle):
self.name = name
self.fhandle = fhandle
self._info_block = LogBlock(self)
self._error_block = DoOnException(self.error)
@staticmethod
def get_fhandle(filename, mode="w"):
"""Generate a new file handle"""
return _TextHandle(filename, mode)
def set_fhandle(self, handle):
"""set fhandle to new handle"""
if isinstance(handle, _TextHandle):
pass
elif isinstance(handle, str) or isinstance(handle, None):
handle = self.get_fhandle(handle)
else:
raise Exception("new file handle can only be: \n"
"None -> Console logger\n"
"filename -> File logger \n"
"logger -> logger \n")
self.fhandle = handle
def debug(self, txt):
self.fhandle.write(f"Debug: {txt}\n")
def header(self, name, dct=None):
txt = '********************************************************************************\n'
txt += '*{:^78}*\n'.format(name)
txt += '*{:^78}*\n'.format(' ')
txt += f"* {'Date':15}: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}".ljust(79) + '*\n'
txt += '*{:^78}*\n'.format(' ')
if dct is not None:
for key in dct:
inter = f"* {key:15}: {dct[key]}"
if len(inter) < 80:
inter = inter.ljust(79) + '*\n'
else:
inter_new = inter[0:79] + '*\n'
inter = inter[79:]
while len(inter) > 60:
inter_new += "* {:^15} ".format(' ') + inter[0:60] + '*\n'
inter = inter[60:]
inter_new += "* {:^15} ".format(' ') + inter.ljust(60) + '*\n'
inter = inter_new
txt += inter
txt += '*{:^78}*\n'.format(' ')
txt += '********************************************************************************\n\n\n'
self.fhandle.write(f"{txt}\n")
def info(self, txt):
self.fhandle.write(f"{txt}\n")
def warning(self, txt):
self.fhandle.write(f"Warning: {txt}\n")
def info_block(self, txt):
self._info_block.set_text(txt)
return self._info_block
def exit_on_exception(self, txt):
self._error_block.set_args(txt)
return self._error_block
def error(self, txt):
error = (f"in '{self.name}' at "
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:\n"
f"{txt}\n\n")
#
self.fhandle.write(f"Error Termination {error}")
# raise Exception
with ExitOnException():
raise Exception(error)
class Logger(_LoggerBase):
def __init__(self, name, fhandle=None, handles=None):
if fhandle is None:
# create an terminal logger
fhandle = self.get_fhandle(None)
_LoggerBase.__init__(self, name, fhandle)
self.sublogger = self._generate_sublogger(handles)
def __getitem__(self, key):
return self.sublogger[key]
def add_sublogger(self, sublogger):
"""add new sublogger to logger"""
if sublogger is None:
return
if isinstance(sublogger, str):
sublogger = [sublogger]
#
sublogger = self._generate_sublogger(sublogger)
# register new keys
for key, value in sublogger.items():
self.sublogger[key] = value
def _generate_sublogger(self, sublogger):
"""create subloggers"""
if sublogger is None:
return
if isinstance(sublogger, list):
return {logger: Logger(f"{self.name.upper()}-{logger}", self.fhandle)
for logger in sublogger}
if isinstance(sublogger, dict):
return {logger_name: Logger(f"{self.name.upper()}-{logger_name}",
self.fhandle, sub_logger)
for logger_name, sub_logger in sublogger.items()}
if isinstance(sublogger, tuple):
return {logger: Logger(f"{self.name.upper()}-{logger}", self.fhandle)
for logger in sublogger}
raise Exception("Sublogger can only be tuple, dict, list or None!")
class _TextHandle(object):
def __init__(self, filename=None, mode="w"):
self._setup(filename, mode=mode)
def _setup(self, filename, mode="w"):
if filename is None:
self._f = None
self.write = partial(print, end='')
else:
self._f = open(filename, mode=mode)
self.write = self._write
def _write(self, txt):
self._f.write(txt)
self._f.flush()
def __del__(self):
if self._f is not None:
self._f.close()
| 33.443182
| 103
| 0.528203
|
794babd62cb65edb618f1cad43fa04ab371d9b97
| 20,806
|
py
|
Python
|
tests/test_attr.py
|
simonyangme/picamera
|
cfd637f4aeede4b50698bb6963b99d0d9f0fe4c1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_attr.py
|
simonyangme/picamera
|
cfd637f4aeede4b50698bb6963b99d0d9f0fe4c1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_attr.py
|
simonyangme/picamera
|
cfd637f4aeede4b50698bb6963b99d0d9f0fe4c1
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T02:40:37.000Z
|
2020-04-21T02:40:37.000Z
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import picamera
from picamera.color import Color
import pytest
import time
from fractions import Fraction
from decimal import Decimal
from verify import isclose
def numeric_attr(camera, attr, value_min, value_max, step=1):
save_value = getattr(camera, attr)
try:
for value in range(value_min, value_max + 1, step):
setattr(camera, attr, value)
assert value == getattr(camera, attr)
with pytest.raises(picamera.PiCameraError):
setattr(camera, attr, value_min - 1)
with pytest.raises(picamera.PiCameraError):
setattr(camera, attr, value_max + 1)
finally:
setattr(camera, attr, save_value)
def keyword_attr(camera, attr, values):
save_value = getattr(camera, attr)
try:
for value in values:
setattr(camera, attr, value)
assert value == getattr(camera, attr)
with pytest.raises(picamera.PiCameraError):
setattr(camera, attr, 'foobar')
finally:
setattr(camera, attr, save_value)
def boolean_attr(camera, attr):
save_value = getattr(camera, attr)
try:
setattr(camera, attr, False)
assert not getattr(camera, attr)
setattr(camera, attr, True)
assert getattr(camera, attr)
finally:
setattr(camera, attr, save_value)
def test_analog_gain(camera, previewing):
# Just test the read-only property returns something sensible
assert 0.0 <= camera.analog_gain <= 8.0
def test_annotate_text(camera, previewing):
save_value = camera.annotate_text
try:
camera.annotate_text = ''
assert camera.annotate_text == ''
camera.annotate_text = 'foo'
assert camera.annotate_text == 'foo'
camera.annotate_text = 'foo bar baz quux xyzzy'
assert camera.annotate_text == 'foo bar baz quux xyzzy'
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_text = ('abcd' * 64) + 'a'
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_text = 'Oh lá lá'
finally:
camera.annotate_text = save_value
def test_annotate_text_size(camera, previewing):
numeric_attr(camera, 'annotate_text_size', 6, 160)
def test_annotate_foreground(camera, previewing):
save_value = camera.annotate_foreground
try:
camera.annotate_foreground = Color('black')
camera.annotate_foreground = Color('white')
camera.annotate_foreground = Color.from_yuv(0.5, 0, 0)
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_foreground = 'white'
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_foreground = 0
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_foreground = None
finally:
camera.annotate_foreground = save_value
def test_annotate_background(camera, previewing):
save_value = camera.annotate_background
try:
camera.annotate_background = Color('black')
camera.annotate_background = Color('white')
camera.annotate_background = Color(128, 128, 0)
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_background = 'black'
with pytest.raises(picamera.PiCameraValueError):
camera.annotate_background = 0
camera.annotate_background = None
finally:
camera.annotate_background = save_value
def test_annotate_frame_num(camera, previewing):
boolean_attr(camera, 'annotate_frame_num')
def test_awb_mode(camera, previewing):
keyword_attr(camera, 'awb_mode', camera.AWB_MODES)
def test_awb_gains(camera, previewing):
def check_gains(red, blue):
# The camera needs some time to let the AWB gains adjust
time.sleep(0.4)
# The gains we get back aren't absolutely precise, but they're
# close (+/- 0.05)
r, b = camera.awb_gains
assert red - 0.05 <= r <= red + 0.05
assert blue - 0.05 <= b <= blue + 0.05
save_mode = camera.awb_mode
try:
# Can't use numeric_attr here as awb_mode is a (red, blue) tuple
camera.awb_mode = 'off'
for i in range (1, 9):
camera.awb_gains = i
check_gains(i, i)
camera.awb_gains = 1.5
check_gains(1.5, 1.5)
camera.awb_gains = (0.5, 0.5)
check_gains(0.5, 0.5)
camera.awb_gains = (Fraction(16, 10), 1.9)
check_gains(1.6, 1.9)
with pytest.raises(picamera.PiCameraError):
camera.awb_gains = Fraction(20, 1)
finally:
camera.awb_mode = save_mode
def test_brightness(camera, previewing):
numeric_attr(camera, 'brightness', 0, 100)
def test_color_effects(camera, previewing):
save_value = camera.color_effects
try:
camera.color_effects = None
assert camera.color_effects is None
camera.color_effects = (128, 128)
assert camera.color_effects == (128, 128)
camera.color_effects = (0, 255)
assert camera.color_effects == (0, 255)
camera.color_effects = (255, 0)
assert camera.color_effects == (255, 0)
with pytest.raises(picamera.PiCameraError):
camera.color_effects = (-1, -1)
with pytest.raises(picamera.PiCameraError):
camera.color_effects = (0, 300)
finally:
camera.color_effects = save_value
def test_contrast(camera, previewing):
numeric_attr(camera, 'contrast', -100, 100)
def test_digital_gain(camera, previewing):
# Just test the read-only property returns something sensible
assert 0.0 <= camera.digital_gain <= 8.0
def test_exposure_compensation(camera, previewing):
numeric_attr(camera, 'exposure_compensation', -25, 25)
def test_exposure_mode(camera, previewing):
keyword_attr(camera, 'exposure_mode', camera.EXPOSURE_MODES)
def test_flash_mode(camera, previewing):
keyword_attr(camera, 'flash_mode', camera.FLASH_MODES)
def test_image_effects1(camera, previewing):
valid_combinations = {
'solarize': [
(False, 128, 128, 128, 0),
(True, 128, 128, 128, 0),
(False, 16, 192, 0, 0),
(128, 128, 128, 0),
0,
],
'colorbalance': [
(0, 1, 1, 1, 0, 0),
(0, 0.5, 0.5, 0.5),
(0.451, 1, 1),
(0, 1.0, 0.5, 0.75, -64, 64),
],
'colorpoint': [0, (1,), (2,), 3],
'colorswap': [0, (1,)],
'posterise': [2, (30,), 16],
'blur': [1, (2,)],
'film': [(0, 0, 0), (50, 128, 128)],
'watercolor': [(), (128, 128)],
}
try:
for effect in camera.IMAGE_EFFECTS:
camera.image_effect = effect
assert camera.image_effect_params is None
if effect in valid_combinations:
for params in valid_combinations[effect]:
camera.image_effect_params = params
assert camera.image_effect_params == params
finally:
camera.image_effect = 'none'
def test_image_effects2(camera, previewing):
invalid_combinations = {
'solarize': [(3, 3, 3), ()],
'colorpoint': [(1, 1), ()],
'colorbalance': [(1,), False, ()],
'colorswap': [(1, 1), ()],
'posterise': [(1, 1), ()],
'blur': [(1, 1), ()],
'film': [(1, 1), (), (12, 2, 3, 4)],
'watercolor': [1, (1, 2, 3)],
}
try:
for effect, params_sets in invalid_combinations.items():
camera.image_effect = effect
for params in params_sets:
with pytest.raises(picamera.PiCameraValueError):
camera.image_effect_params = params
finally:
camera.image_effect = 'none'
def test_drc_strength(camera, previewing):
keyword_attr(camera, 'drc_strength', camera.DRC_STRENGTHS)
def test_meter_mode(camera, previewing):
keyword_attr(camera, 'meter_mode', camera.METER_MODES)
def test_rotation(camera, previewing):
save_value = camera.rotation
try:
for value in range(0, 360):
camera.rotation = value
assert camera.rotation == [0, 90, 180, 270][value // 90]
camera.rotation = 360
assert camera.rotation == 0
finally:
camera.rotation = save_value
def test_saturation(camera, previewing):
numeric_attr(camera, 'saturation', -100, 100)
def test_sharpness(camera, previewing):
numeric_attr(camera, 'sharpness', -100, 100)
def test_iso(camera, previewing):
numeric_attr(camera, 'iso', 0, 1600)
def test_video_stabilization(camera, previewing):
boolean_attr(camera, 'video_stabilization')
def test_video_denoise(camera, previewing):
boolean_attr(camera, 'video_denoise')
def test_image_denoise(camera, previewing):
boolean_attr(camera, 'image_denoise')
def test_still_stats(camera, previewing):
boolean_attr(camera, 'still_stats')
def test_hflip(camera, previewing):
boolean_attr(camera, 'hflip')
def test_vflip(camera, previewing):
boolean_attr(camera, 'vflip')
def test_shutter_speed(camera, previewing):
# Shutter speed is now clamped by frame-rate; set frame-rate to something
# nice and low to enable the test to run correctly
save_framerate = camera.framerate
camera.framerate = 1
try:
# When setting shutter speed manually, ensure the actual shutter speed
# is within 50usec of the specified amount (the value+1 accounts for
# a rounding error)
for value in range(0, 700000, 50):
camera.shutter_speed = value
assert (value - 50) <= camera.shutter_speed <= (value + 1)
# Test the shutter speed clamping by framerate
camera.framerate = 30
assert 33000 <= camera.shutter_speed <= 33333
finally:
camera.framerate = save_framerate
camera.shutter_speed = 0
def test_zoom(camera, previewing):
save_zoom = camera.zoom
try:
camera.zoom = (0.0, 0.0, 1.0, 1.0)
assert camera.zoom == (0.0, 0.0, 1.0, 1.0)
camera.zoom = (0.2, 0.2, 0.6, 0.6)
assert camera.zoom == (0.2, 0.2, 0.6, 0.6)
camera.zoom = (0.1, 0.1, 0.8, 0.8)
# 0.1 doesn't quite make the round trip...
assert camera.zoom == (int(0.1*65535.0)/65535.0, int(0.1*65535.0)/65535.0, 0.8, 0.8)
finally:
camera.zoom = save_zoom
# XXX The preview properties work, but don't return correct values unless the
# preview is actually running; if this isn't expected behaviour then we should
# xfail these tests instead of simply testing for previewing...
def test_preview_alpha(camera, previewing):
if previewing:
numeric_attr(camera.preview, 'alpha', 0, 255)
def test_preview_layer(camera, previewing):
if previewing:
numeric_attr(camera.preview, 'layer', 0, 255)
def test_preview_fullscreen(camera, previewing):
if previewing:
boolean_attr(camera.preview, 'fullscreen')
def test_preview_window(camera, previewing):
if previewing:
camera.preview.window = (0, 0, 320, 240)
assert camera.preview.window == (0, 0, 320, 240)
camera.preview.window = (1280-320, 720-240, 320, 240)
assert camera.preview.window == (1280-320, 720-240, 320, 240)
camera.preview.window = (0, 0, 640, 360)
assert camera.preview.window == (0, 0, 640, 360)
camera.preview.window = (0, 720-360, 640, 360)
assert camera.preview.window == (0, 720-360, 640, 360)
camera.preview.window = (1280-640, 0, 640, 360)
assert camera.preview.window == (1280-640, 0, 640, 360)
camera.preview.window = (1280-640, 720-360, 640, 360)
assert camera.preview.window == (1280-640, 720-360, 640, 360)
camera.preview.window = (0, 0, 1920, 1080)
assert camera.preview.window == (0, 0, 1920, 1080)
def test_preview_resolution(camera, previewing):
if previewing:
save_resolution = camera.resolution
try:
camera.resolution = (640, 480)
assert camera.preview.resolution is None
camera.preview.resolution = (320, 240)
assert camera.preview.resolution == (320, 240)
assert camera._camera.outputs[0].framesize == (320, 240)
assert camera._camera.outputs[2].framesize == (640, 480)
camera.resolution = (320, 240)
assert camera.preview.resolution is None
assert camera._camera.outputs[0].framesize == (320, 240)
assert camera._camera.outputs[2].framesize == (320, 240)
camera.resolution = (1280, 720)
assert camera.resolution == (1280, 720)
assert camera.preview.resolution is None
assert camera._camera.outputs[0].framesize == (1280, 720)
assert camera._camera.outputs[2].framesize == (1280, 720)
with pytest.raises(picamera.PiCameraValueError):
camera.preview.resolution = (1281, 720)
with pytest.raises(picamera.PiCameraValueError):
camera.preview.resolution = (1280, 721)
finally:
camera.resolution = save_resolution
def test_preview_rotation(camera, previewing):
if previewing:
save_value = camera.preview.rotation
try:
for value in range(0, 360):
camera.preview.rotation = value
assert camera.preview.rotation == [0, 90, 180, 270][value // 90]
camera.preview.rotation = 360
assert camera.preview.rotation == 0
finally:
camera.preview.rotation = save_value
def test_preview_vflip(camera, previewing):
if previewing:
boolean_attr(camera.preview, 'vflip')
def test_preview_hflip(camera, previewing):
if previewing:
boolean_attr(camera.preview, 'hflip')
def test_sensor_mode(camera, previewing):
save_mode = camera.sensor_mode
try:
for mode in range(8):
camera.sensor_mode = mode
assert camera.sensor_mode == mode
with pytest.raises(picamera.PiCameraError):
camera.sensor_mode = 10
finally:
camera.sensor_mode = save_mode
def test_framerate_delta(camera, previewing):
for num in range(-10, 11):
camera.framerate_delta = num / 10
assert isclose(camera.framerate_delta, Fraction(num, 10), rel_tol=Fraction(1, 256))
def test_framerate(camera, previewing):
save_framerate = camera.framerate
try:
assert len(camera.framerate) == 2
camera.framerate = (30, 1)
n, d = camera.framerate
assert n/d == 30
camera.framerate = (15, 1)
n, d = camera.framerate
assert n/d == 15
camera.framerate = 30
n, d = camera.framerate
assert n/d == 30
camera.framerate = 15.0
n, d = camera.framerate
assert n/d == 15
camera.framerate = Fraction(30, 2)
n, d = camera.framerate
assert n/d == 15
camera.framerate = Decimal(30)
n, d = camera.framerate
assert n/d == 30
camera.framerate = 60
n, d = camera.framerate
assert n/d == 60
camera.framerate = 90
n, d = camera.framerate
assert n/d == 90
with pytest.raises(picamera.PiCameraError):
camera.framerate = (30, 0)
with pytest.raises(picamera.PiCameraError):
camera.framerate = -1
with pytest.raises(picamera.PiCameraError):
camera.framerate = 200
with pytest.raises(picamera.PiCameraError):
camera.framerate = 0
finally:
camera.framerate = save_framerate
def test_framerate_range(camera, previewing):
save_framerate = camera.framerate
try:
tol = Fraction(1, 256)
camera.framerate_range = (Fraction(1, 6), 1)
assert camera.framerate == 0
assert isclose(camera.framerate_range.low, Fraction(1, 6), rel_tol=tol)
assert isclose(camera.framerate_range.high, 1, rel_tol=tol)
camera.framerate_range = (Fraction(50, 1000), Fraction(166, 1000))
assert camera.framerate == 0
assert isclose(camera.framerate_range.low, Fraction(50, 1000), rel_tol=tol)
assert isclose(camera.framerate_range.high, Fraction(166, 1000), rel_tol=tol)
camera.framerate_range = (1, 5)
assert camera.framerate == 0
assert camera.framerate_range == (1, 5)
camera.framerate_range = (1, 30.0)
assert camera.framerate == 0
assert camera.framerate_range == (1, 30)
with pytest.raises(picamera.PiCameraError):
camera.framerate_delta = 1
with pytest.raises(picamera.PiCameraError):
camera.framerate_range = (1, 200)
with pytest.raises(picamera.PiCameraError):
camera.framerate_range = (0, 30)
with pytest.raises(picamera.PiCameraError):
camera.framerate_range = (2, 1)
finally:
camera.framerate = save_framerate
def test_resolution(camera, previewing):
save_resolution = camera.resolution
try:
# Test setting some regular resolutions
camera.resolution = (320, 240)
assert camera.resolution == (320, 240)
assert camera._camera.outputs[2].framesize == (320, 240)
camera.resolution = (640, 480)
assert camera.resolution == (640, 480)
assert camera._camera.outputs[2].framesize == (640, 480)
camera.resolution = (1280, 720)
assert camera.resolution == (1280, 720)
assert camera._camera.outputs[2].framesize == (1280, 720)
camera.resolution = (1920, 1080)
assert camera.resolution == (1920, 1080)
# Camera's vertical resolution is always a multiple of 16, and
# horizontal is a multiple of 32, hence the difference in the video
# formats here and below
assert camera._camera.outputs[2]._port[0].format[0].es[0].video.width == 1920
assert camera._camera.outputs[2]._port[0].format[0].es[0].video.height == 1088
camera.resolution = (2592, 1944)
assert camera.resolution == (2592, 1944)
assert camera._camera.outputs[2]._port[0].format[0].es[0].video.width == 2592
assert camera._camera.outputs[2]._port[0].format[0].es[0].video.height == 1952
# Test some irregular resolutions
camera.resolution = (100, 100)
assert camera.resolution == (100, 100)
assert camera._camera.outputs[2]._port[0].format[0].es[0].video.width == 128
assert camera._camera.outputs[2]._port[0].format[0].es[0].video.height == 112
# Anything below 16,16 will fail (because the camera's vertical
# resolution works in increments of 16)
with pytest.raises(picamera.PiCameraError):
camera.resolution = (0, 0)
with pytest.raises(picamera.PiCameraError):
camera.resolution = (15, 15)
finally:
camera.resolution = save_resolution
| 38.601113
| 92
| 0.640729
|
794bac09d3f97d0ae5537c27ae5a3e0ce8ead475
| 137
|
py
|
Python
|
fastwlk/utils/validation.py
|
pjhartout/fastwlk
|
deb78923c9a8450099c26bac09da94ae87892d0d
|
[
"BSD-3-Clause"
] | null | null | null |
fastwlk/utils/validation.py
|
pjhartout/fastwlk
|
deb78923c9a8450099c26bac09da94ae87892d0d
|
[
"BSD-3-Clause"
] | 7
|
2022-03-21T08:46:44.000Z
|
2022-03-25T16:20:48.000Z
|
fastwlk/utils/validation.py
|
pjhartout/fastwlk
|
deb78923c9a8450099c26bac09da94ae87892d0d
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Iterable, Union
def check_wl_input(X: Iterable) -> Union[Iterable, None]:
"""TODO: implement checks"""
pass
| 19.571429
| 57
| 0.693431
|
794bac1a447b859444eb47e25706feb4035573f7
| 5,773
|
py
|
Python
|
flink-python/pyflink/table/table_result.py
|
markcho/flink
|
600ace21aad941367db82539dce993c444c0a1d7
|
[
"Apache-2.0"
] | 2
|
2020-06-17T06:10:12.000Z
|
2020-12-11T15:12:14.000Z
|
flink-python/pyflink/table/table_result.py
|
markcho/flink
|
600ace21aad941367db82539dce993c444c0a1d7
|
[
"Apache-2.0"
] | 2
|
2021-03-30T11:03:02.000Z
|
2021-12-14T20:58:33.000Z
|
flink-python/pyflink/table/table_result.py
|
markcho/flink
|
600ace21aad941367db82539dce993c444c0a1d7
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.job_client import JobClient
from pyflink.table.result_kind import ResultKind
from pyflink.table.table_schema import TableSchema
__all__ = ['TableResult']
class TableResult(object):
"""
A :class:`~pyflink.table.TableResult` is the representation of the statement execution result.
.. versionadded:: 1.11.0
"""
def __init__(self, j_table_result):
self._j_table_result = j_table_result
def get_job_client(self):
"""
For DML and DQL statement, return the JobClient which associates the submitted Flink job.
For other statements (e.g. DDL, DCL) return empty.
:return: The job client, optional.
:rtype: pyflink.common.JobClient
.. versionadded:: 1.11.0
"""
job_client = self._j_table_result.getJobClient()
if job_client.isPresent():
return JobClient(job_client.get())
else:
return None
def get_table_schema(self):
"""
Get the schema of result.
The schema of DDL, USE, SHOW, EXPLAIN:
::
+-------------+-------------+----------+
| column name | column type | comments |
+-------------+-------------+----------+
| result | STRING | |
+-------------+-------------+----------+
The schema of DESCRIBE:
::
+------------------+-------------+-------------------------------------------------+
| column name | column type | comments |
+------------------+-------------+-------------------------------------------------+
| name | STRING | field name |
+------------------+-------------+-------------------------------------------------+
| type | STRING | field type expressed as a String |
+------------------+-------------+-------------------------------------------------+
| null | BOOLEAN | field nullability: true if a field is nullable, |
| | | else false |
+------------------+-------------+-------------------------------------------------+
| key | BOOLEAN | key constraint: 'PRI' for primary keys, |
| | | 'UNQ' for unique keys, else null |
+------------------+-------------+-------------------------------------------------+
| computed column | STRING | computed column: string expression |
| | | if a field is computed column, else null |
+------------------+-------------+-------------------------------------------------+
| watermark | STRING | watermark: string expression if a field is |
| | | watermark, else null |
+------------------+-------------+-------------------------------------------------+
The schema of INSERT: (one column per one sink)
::
+----------------------------+-------------+-----------------------+
| column name | column type | comments |
+----------------------------+-------------+-----------------------+
| (name of the insert table) | BIGINT | the insert table name |
+----------------------------+-------------+-----------------------+
The schema of SELECT is the selected field names and types.
:return: The schema of result.
:rtype: pyflink.table.TableSchema
.. versionadded:: 1.11.0
"""
return TableSchema(j_table_schema=self._j_table_result.getTableSchema())
def get_result_kind(self):
"""
Return the ResultKind which represents the result type.
For DDL operation and USE operation, the result kind is always SUCCESS.
For other operations, the result kind is always SUCCESS_WITH_CONTENT.
:return: The result kind.
:rtype: pyflink.table.ResultKind
.. versionadded:: 1.11.0
"""
return ResultKind._from_j_result_kind(self._j_table_result.getResultKind())
def print(self):
"""
Print the result contents as tableau form to client console.
NOTE: please make sure the result data to print should be small.
Because all data will be collected to local first, and then print them to console.
.. versionadded:: 1.11.0
"""
self._j_table_result.print()
| 44.751938
| 98
| 0.440499
|
794bac95dae681cbc11af1aac24e7175b4e3bff0
| 1,778
|
py
|
Python
|
pybpodgui_plugin/models/project/project_uibusy.py
|
ckarageorgkaneen/pybpod-gui-plugin
|
ef9ca8a7094b9d225dde8e3db58d94ae084aaac5
|
[
"MIT"
] | null | null | null |
pybpodgui_plugin/models/project/project_uibusy.py
|
ckarageorgkaneen/pybpod-gui-plugin
|
ef9ca8a7094b9d225dde8e3db58d94ae084aaac5
|
[
"MIT"
] | null | null | null |
pybpodgui_plugin/models/project/project_uibusy.py
|
ckarageorgkaneen/pybpod-gui-plugin
|
ef9ca8a7094b9d225dde8e3db58d94ae084aaac5
|
[
"MIT"
] | 1
|
2021-02-22T21:32:03.000Z
|
2021-02-22T21:32:03.000Z
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
from confapp import conf
from AnyQt.QtGui import QIcon
from pybpodgui_plugin.models.board import Board
from pybpodgui_plugin.models.project.project_dockwindow import ProjectDockWindow
logger = logging.getLogger(__name__)
class ProjectUIBusy(ProjectDockWindow):
"""
"""
def update_ui(self):
"""
Update user interface
"""
busy_status = Board.STATUS_READY
# This boolean makes it easy to disable run buttons on subjects
sessionrunning = False
logger.debug('Project [{0}] status:{1}'.format(self.name, busy_status))
for board in self.boards:
if board.status > Board.STATUS_READY:
busy_status = board.status
break
if busy_status == Board.STATUS_READY:
self.node.setIcon(0, QIcon(conf.PROJECT_SMALL_ICON))
self.experiments_node.setIcon(0, QIcon(conf.EXPERIMENTS_SMALL_ICON))
self.boards_node.setIcon(0, QIcon(conf.BOARDS_SMALL_ICON))
self.subjects_node.setIcon(0, QIcon(conf.SUBJECTS_SMALL_ICON))
elif busy_status in [Board.STATUS_RUNNING_TASK]:
self.node.setIcon(0, QIcon(conf.PLAY_SMALL_ICON))
self.experiments_node.setIcon(0, QIcon(conf.PLAY_SMALL_ICON))
self.boards_node.setIcon(0, QIcon(conf.PLAY_SMALL_ICON))
self.subjects_node.setIcon(0, QIcon(conf.PLAY_SMALL_ICON))
# Flag this true so we can disable 'Run' buttons
sessionrunning = True
for exp in self.experiments:
exp.update_ui()
for board in self.boards:
board.update_ui()
for subj in self.subjects:
subj.update_ui(sessionrunning)
| 30.135593
| 80
| 0.651294
|
794bae6bbf231a94145647ff288700c43055c656
| 1,978
|
py
|
Python
|
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/INGR/color_clamp.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/INGR/color_clamp.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/INGR/color_clamp.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
'''OpenGL extension INGR.color_clamp
Overview (from the spec)
Various RGBA color space conversions require clamping to values
in a more constrained range than [0, 1]. This extension allows
the definition of independent color clamp values for each of the
four color components as part of the Final Conversion in the pixel
transfer path for draws, reads, and copies.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/INGR/color_clamp.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_INGR_color_clamp'
GL_RED_MIN_CLAMP_INGR = constant.Constant( 'GL_RED_MIN_CLAMP_INGR', 0x8560 )
glget.addGLGetConstant( GL_RED_MIN_CLAMP_INGR, (1,) )
GL_GREEN_MIN_CLAMP_INGR = constant.Constant( 'GL_GREEN_MIN_CLAMP_INGR', 0x8561 )
glget.addGLGetConstant( GL_GREEN_MIN_CLAMP_INGR, (1,) )
GL_BLUE_MIN_CLAMP_INGR = constant.Constant( 'GL_BLUE_MIN_CLAMP_INGR', 0x8562 )
glget.addGLGetConstant( GL_BLUE_MIN_CLAMP_INGR, (1,) )
GL_ALPHA_MIN_CLAMP_INGR = constant.Constant( 'GL_ALPHA_MIN_CLAMP_INGR', 0x8563 )
glget.addGLGetConstant( GL_ALPHA_MIN_CLAMP_INGR, (1,) )
GL_RED_MAX_CLAMP_INGR = constant.Constant( 'GL_RED_MAX_CLAMP_INGR', 0x8564 )
glget.addGLGetConstant( GL_RED_MAX_CLAMP_INGR, (1,) )
GL_GREEN_MAX_CLAMP_INGR = constant.Constant( 'GL_GREEN_MAX_CLAMP_INGR', 0x8565 )
glget.addGLGetConstant( GL_GREEN_MAX_CLAMP_INGR, (1,) )
GL_BLUE_MAX_CLAMP_INGR = constant.Constant( 'GL_BLUE_MAX_CLAMP_INGR', 0x8566 )
glget.addGLGetConstant( GL_BLUE_MAX_CLAMP_INGR, (1,) )
GL_ALPHA_MAX_CLAMP_INGR = constant.Constant( 'GL_ALPHA_MAX_CLAMP_INGR', 0x8567 )
glget.addGLGetConstant( GL_ALPHA_MAX_CLAMP_INGR, (1,) )
def glInitColorClampINGR():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 47.095238
| 80
| 0.813448
|
794bb1375c02a620c3a7dbda974eb9d604239969
| 3,426
|
py
|
Python
|
nipyapi/nifi/models/funnels_entity.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/funnels_entity.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | 1
|
2020-03-16T10:02:46.000Z
|
2020-03-16T13:37:42.000Z
|
nipyapi/nifi/models/funnels_entity.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.12.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FunnelsEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'funnels': 'list[FunnelEntity]'
}
attribute_map = {
'funnels': 'funnels'
}
def __init__(self, funnels=None):
"""
FunnelsEntity - a model defined in Swagger
"""
self._funnels = None
if funnels is not None:
self.funnels = funnels
@property
def funnels(self):
"""
Gets the funnels of this FunnelsEntity.
:return: The funnels of this FunnelsEntity.
:rtype: list[FunnelEntity]
"""
return self._funnels
@funnels.setter
def funnels(self, funnels):
"""
Sets the funnels of this FunnelsEntity.
:param funnels: The funnels of this FunnelsEntity.
:type: list[FunnelEntity]
"""
self._funnels = funnels
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FunnelsEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.629032
| 479
| 0.539988
|
794bb21dd784cf0421f3152d2dc3782c2e4946bb
| 107,104
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/operations/_network_watchers_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/operations/_network_watchers_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/operations/_network_watchers_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkWatchersOperations(object):
"""NetworkWatchersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.NetworkWatcher"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkWatcher"
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher resource.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.NetworkWatcher
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkWatcher')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkWatcher"
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkWatcher"
"""Updates a network watcher tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters supplied to update network watcher tags.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkWatcherListResult"]
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkWatcherListResult"]
"""Gets all network watchers by subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def get_topology(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.TopologyParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Topology"
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the representation of topology.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TopologyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Topology, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.Topology
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topology"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.get_topology.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TopologyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topology', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'} # type: ignore
def _verify_ip_flow_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.VerificationIPFlowParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.VerificationIPFlowResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VerificationIPFlowResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._verify_ip_flow_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_verify_ip_flow_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
def begin_verify_ip_flow(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.VerificationIPFlowParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VerificationIPFlowResult"]
"""Verify IP flow from the specified VM to a location given the currently configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VerificationIPFlowParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VerificationIPFlowResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VerificationIPFlowResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VerificationIPFlowResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
def _get_next_hop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.NextHopParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.NextHopResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NextHopResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_next_hop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NextHopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_next_hop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
def begin_get_next_hop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.NextHopParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NextHopResult"]
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination endpoint.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.NextHopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NextHopResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.NextHopResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NextHopResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
def _get_vm_security_rules_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.SecurityGroupViewParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityGroupViewResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityGroupViewResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_vm_security_rules_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vm_security_rules_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
def begin_get_vm_security_rules(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.SecurityGroupViewParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityGroupViewResult"]
"""Gets the configured and effective security group rules on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the VM to check security groups for.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.SecurityGroupViewParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityGroupViewResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.SecurityGroupViewResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityGroupViewResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
def _get_troubleshooting_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.TroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.TroubleshootingResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
def begin_get_troubleshooting(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.TroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.TroubleshootingResult"]
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to troubleshoot.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
def _get_troubleshooting_result_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.QueryTroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.TroubleshootingResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_result_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_result_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
def begin_get_troubleshooting_result(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.QueryTroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.TroubleshootingResult"]
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to query the troubleshooting result.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.QueryTroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
def _set_flow_log_configuration_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.FlowLogInformation"
**kwargs # type: Any
):
# type: (...) -> "_models.FlowLogInformation"
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_flow_log_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_flow_log_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
def begin_set_flow_log_configuration(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.FlowLogInformation"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FlowLogInformation"]
"""Configures flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow log.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.FlowLogInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
def _get_flow_log_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.FlowLogStatusParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.FlowLogInformation"
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_flow_log_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_flow_log_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
def begin_get_flow_log_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.FlowLogStatusParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FlowLogInformation"]
"""Queries status of flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define a resource to query flow log and traffic analytics
(optional) status.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.FlowLogStatusParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
def _check_connectivity_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.ConnectivityParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectivityInformation"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectivityInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_connectivity_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_connectivity_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
def begin_check_connectivity(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.ConnectivityParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectivityInformation"]
"""Verifies the possibility of establishing a direct TCP connection from a virtual machine to a
given endpoint including another VM or an arbitrary remote server.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine how the connectivity check will be performed.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.ConnectivityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectivityInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.ConnectivityInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectivityInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
def _get_azure_reachability_report_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.AzureReachabilityReportParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.AzureReachabilityReport"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureReachabilityReport"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_azure_reachability_report_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_azure_reachability_report_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
def begin_get_azure_reachability_report(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.AzureReachabilityReportParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AzureReachabilityReport"]
"""NOTE: This feature is currently in preview and still being tested for stability. Gets the
relative latency score for internet service providers from a specified location to Azure
regions.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine Azure reachability report configuration.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.AzureReachabilityReportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AzureReachabilityReport or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.AzureReachabilityReport]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureReachabilityReport"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
def _list_available_providers_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.AvailableProvidersListParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.AvailableProvidersList"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableProvidersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._list_available_providers_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_available_providers_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
def begin_list_available_providers(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.AvailableProvidersListParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AvailableProvidersList"]
"""NOTE: This feature is currently in preview and still being tested for stability. Lists all
available internet service providers for a specified Azure region.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that scope the list of available providers.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.AvailableProvidersListParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AvailableProvidersList or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.AvailableProvidersList]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableProvidersList"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
def _get_network_configuration_diagnostic_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.NetworkConfigurationDiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkConfigurationDiagnosticResponse"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkConfigurationDiagnosticResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_network_configuration_diagnostic_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_network_configuration_diagnostic_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
def begin_get_network_configuration_diagnostic(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "_models.NetworkConfigurationDiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkConfigurationDiagnosticResponse"]
"""Gets Network Configuration Diagnostic data to help customers understand and debug network
behavior. It provides detailed information on what security rules were applied to a specified
traffic flow and the result of evaluating these rules. Customers must provide details of a flow
like source, destination, protocol, etc. The API returns whether traffic was allowed or denied,
the rules evaluated for the specified flow and the evaluation results.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters to get network configuration diagnostic.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.NetworkConfigurationDiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkConfigurationDiagnosticResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.NetworkConfigurationDiagnosticResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkConfigurationDiagnosticResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
| 53.074331
| 250
| 0.679526
|
794bb2c40e5551d7b1820d6101b8f9aaa877dd5f
| 7,518
|
py
|
Python
|
src/monitoring/notifications/notifier.py
|
gkovacs81/argus_server
|
97ebf705ed3e61a69bd561faf495e2c19bda510d
|
[
"MIT"
] | null | null | null |
src/monitoring/notifications/notifier.py
|
gkovacs81/argus_server
|
97ebf705ed3e61a69bd561faf495e2c19bda510d
|
[
"MIT"
] | 3
|
2021-06-02T04:07:35.000Z
|
2021-12-27T20:21:46.000Z
|
src/monitoring/notifications/notifier.py
|
gkovacs81/argus_server
|
97ebf705ed3e61a69bd561faf495e2c19bda510d
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import smtplib
from queue import Empty, Queue
from smtplib import SMTPException
from threading import Thread
from time import sleep
from models import Option
from monitoring.broadcast import Broadcaster
from monitoring.constants import LOG_NOTIFIER, MONITOR_STOP, MONITOR_UPDATE_CONFIG, THREAD_NOTIFIER
from monitoring.database import Session
from monitoring.notifications.templates import (
ALERT_STARTED_EMAIL,
ALERT_STARTED_SMS,
ALERT_STOPPED_EMAIL,
ALERT_STOPPED_SMS,
)
# check if running on Raspberry
if os.uname()[4][:3] == "arm":
from monitoring.adapters.gsm import GSM
else:
from monitoring.adapters.mock.gsm import GSM
"""
Messages
{
"type": "alert_started" / "alert_stopped",
"id": "alert id",
"sensors": ["Sensor name"],
"time": "start time",
}
"""
ALERT_STARTED = "alert_started"
ALERT_STOPPED = "alert_stopped"
"""
options = {
"subscriptions": {
"sms": {
ALERT_STARTED: True,
ALERT_STOPPED: True,
WEEKLY_REPORT: False
},
"email": {
ALERT_STARTED: False,
ALERT_STOPPED: False,
WEEKLY_REPORT: False
}
},
"email": {
'smtp_username': 'smtp_username',
'smtp_password': 'smtp_password',
'email_address': 'email_address'
},
"gsm": {
"phone_number": "phone number"
}
}
"""
class Notifier(Thread):
MAX_RETRY = 5
RETRY_WAIT = 30
_notifications = []
# TODO: consider instead of calling these methods to be notified with actions
# and retrieve information from the database
@classmethod
def notify_alert_started(cls, alert_id, sensors, time):
cls._notifications.append({
"type": ALERT_STARTED,
"id": alert_id,
"sensors": sensors,
"time": time,
"retry": 0
})
@classmethod
def notify_alert_stopped(cls, alert_id, time):
cls._notifications.append({
"type": ALERT_STOPPED,
"id": alert_id,
"time": time,
"retry": 0
})
def __init__(self, broadcaster: Broadcaster):
super(Notifier, self).__init__(name=THREAD_NOTIFIER)
self._actions = Queue()
self._broadcaster = broadcaster
self._logger = logging.getLogger(LOG_NOTIFIER)
self._gsm = GSM()
self._notifications = []
self._options = None
self._db_session = None
self._broadcaster.register_queue(id(self), self._actions)
self._logger.info("Notifier created")
def run(self):
self._logger.info("Notifier started...")
# --------------------------------------------------------------
# Workaround to avoid hanging of keypad process on create_engine
sleep(5)
# --------------------------------------------------------------
self._db_session = Session()
self._options = self.get_options()
self._logger.info("Subscription configuration: %s", self._options["subscriptions"])
self._gsm.setup()
while True:
message = None
try:
message = self._actions.get(timeout=Notifier.RETRY_WAIT)
except Empty:
# self._logger.debug("No message found")
pass
# handle monitoring and notification actions
if message and "action" in message:
if message["action"] == MONITOR_STOP:
break
elif message["action"] == MONITOR_UPDATE_CONFIG:
self._options = self.get_options()
self._gsm.destroy()
self._gsm = GSM()
self._gsm.setup()
# try to send the message but not forever
if len(self._notifications) > 0:
notification = self._notifications[0]
if self.send_message(message):
self._notifications.pop(0)
else:
notification["retry"] += 1
if notification["retry"] >= Notifier.MAX_RETRY:
self._logger.debug(
"Deleted message after max retry (%s): %s", Notifier.MAX_RETRY, self._notifications.pop(0)
)
self._db_session.close()
self._logger.info("Notifier stopped")
def get_options(self):
options = {}
for section_name in ("email", "gsm", "subscriptions"):
section = self._db_session.query(Option).filter_by(name="notifications", section=section_name).first()
options[section_name] = json.loads(section.value) if section else ""
self._logger.debug("Notifier loaded subscriptions: {}".format(options))
return options
def send_message(self, message):
self._logger.info("Sending message: %s", message)
success = False
has_subscription = False
try:
if self._options["subscriptions"]["sms"][message["type"]]:
if message["type"] == ALERT_STARTED:
has_subscription = True
success |= self.notify_alert_started_SMS(message)
elif message["type"] == ALERT_STOPPED:
has_subscription = True
success |= self.notify_alert_stopped_SMS(message)
if self._options["subscriptions"]["email"][message["type"]]:
if message["type"] == ALERT_STARTED:
has_subscription = True
success |= self.notify_alert_started_email(message)
elif message["type"] == ALERT_STOPPED:
has_subscription = True
success |= self.notify_alert_stopped_email(message)
except (KeyError, TypeError) as error:
self._logger.info("Failed to send message: '%s'! (%s)", message, error)
except Exception:
self._logger.exception("Sending message failed!")
return success or not has_subscription
def notify_alert_started_SMS(self, message):
return self.notify_SMS(ALERT_STARTED_SMS.format(**message))
def notify_alert_stopped_SMS(self, message):
return self.notify_SMS(ALERT_STOPPED_SMS.format(**message))
def notify_alert_started_email(self, message):
return self.notify_email("Alert started", ALERT_STARTED_EMAIL.format(**message))
def notify_alert_stopped_email(self, message):
return self.notify_email("Alert stopped", ALERT_STOPPED_EMAIL.format(**message))
def notify_SMS(self, message):
return self._gsm.sendSMS(self._options["gsm"]["phone_number"], message)
def notify_email(self, subject, content):
self._logger.info("Sending email ...")
try:
server = smtplib.SMTP("smtp.gmail.com:587")
server.ehlo()
server.starttls()
server.login(self._options["email"]["smtp_username"], self._options["email"]["smtp_password"])
message = "Subject: {}\n\n{}".format(subject, content).encode(encoding="utf_8", errors="strict")
server.sendmail(from_addr="info@argus", to_addrs=self._options["email"]["email_address"], msg=message)
server.quit()
except SMTPException as error:
self._logger.error("Can't send email %s ", error)
return False
self._logger.info("Sent email")
return True
| 33.864865
| 118
| 0.58606
|
794bb30cdaf3231eaa3b6ffc815950532b159867
| 8,474
|
py
|
Python
|
shapely/affinity.py
|
gisjedi/Shapely
|
2555c8ba80cf275783df8a927a19bef2c8283206
|
[
"BSD-3-Clause"
] | 1
|
2015-04-29T18:09:02.000Z
|
2015-04-29T18:09:02.000Z
|
shapely/affinity.py
|
gisjedi/Shapely
|
2555c8ba80cf275783df8a927a19bef2c8283206
|
[
"BSD-3-Clause"
] | null | null | null |
shapely/affinity.py
|
gisjedi/Shapely
|
2555c8ba80cf275783df8a927a19bef2c8283206
|
[
"BSD-3-Clause"
] | null | null | null |
"""Affine transforms, both in general and specific, named transforms."""
from math import sin, cos, tan, pi
__all__ = ['affine_transform', 'rotate', 'scale', 'skew', 'translate']
def affine_transform(geom, matrix):
"""Returns a transformed geometry using an affine transformation matrix.
The coefficient matrix is provided as a list or tuple with 6 or 12 items
for 2D or 3D transformations, respectively.
For 2D affine transformations, the 6 parameter matrix is:
[a, b, d, e, xoff, yoff]
which represents the augmented matrix:
/ a b xoff \
[x' y' 1] = [x y 1] | d e yoff |
\ 0 0 1 /
or the equations for the transformed coordinates:
x' = a * x + b * y + xoff
y' = d * x + e * y + yoff
For 3D affine transformations, the 12 parameter matrix is:
[a, b, c, d, e, f, g, h, i, xoff, yoff, zoff]
which represents the augmented matrix:
/ a b c xoff \
[x' y' z' 1] = [x y z 1] | d e f yoff |
| g h i zoff |
\ 0 0 0 1 /
or the equations for the transformed coordinates:
x' = a * x + b * y + c * z + xoff
y' = d * x + e * y + f * z + yoff
z' = g * x + h * y + i * z + zoff
"""
if geom.is_empty:
return geom
if len(matrix) == 6:
ndim = 2
a, b, d, e, xoff, yoff = matrix
if geom.has_z:
ndim = 3
i = 1.0
c = f = g = h = zoff = 0.0
matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff
elif len(matrix) == 12:
ndim = 3
a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix
if not geom.has_z:
ndim = 2
matrix = a, b, d, e, xoff, yoff
else:
raise ValueError("'matrix' expects either 6 or 12 coefficients")
def affine_pts(pts):
"""Internal function to yield affine transform of coordinate tuples"""
if ndim == 2:
for x, y in pts:
xp = a * x + b * y + xoff
yp = d * x + e * y + yoff
yield (xp, yp)
elif ndim == 3:
for x, y, z in pts:
xp = a * x + b * y + c * z + xoff
yp = d * x + e * y + f * z + yoff
zp = g * x + h * y + i * z + zoff
yield (xp, yp, zp)
# Process coordinates from each supported geometry type
if geom.type in ('Point', 'LineString'):
return type(geom)(list(affine_pts(geom.coords)))
elif geom.type == 'Polygon':
ring = geom.exterior
shell = type(ring)(list(affine_pts(ring.coords)))
holes = list(geom.interiors)
for pos, ring in enumerate(holes):
holes[pos] = type(ring)(list(affine_pts(ring.coords)))
return type(geom)(shell, holes)
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
# Recursive call
# TODO: fix GeometryCollection constructor
return type(geom)([affine_transform(part, matrix)
for part in geom.geoms])
else:
raise ValueError('Type %r not recognized' % geom.type)
def interpret_origin(geom, origin, ndim):
"""Returns interpreted coordinate tuple for origin parameter.
This is a helper function for other transform functions.
The point of origin can be a keyword 'center' for the 2D bounding box
center, 'centroid' for the geometry's 2D centroid, a Point object or a
coordinate tuple (x0, y0, z0).
"""
# get coordinate tuple from 'origin' from keyword or Point type
if origin == 'center':
# bounding box center
minx, miny, maxx, maxy = geom.bounds
origin = ((maxx + minx)/2.0, (maxy + miny)/2.0)
elif origin == 'centroid':
origin = geom.centroid.coords[0]
elif isinstance(origin, str):
raise ValueError("'origin' keyword %r is not recognized" % origin)
elif hasattr(origin, 'type') and origin.type == 'Point':
origin = origin.coords[0]
# origin should now be tuple-like
if len(origin) not in (2, 3):
raise ValueError("Expected number of items in 'origin' to be "
"either 2 or 3")
if ndim == 2:
return origin[0:2]
else: # 3D coordinate
if len(origin) == 2:
return origin + (0.0,)
else:
return origin
def rotate(geom, angle, origin='center', use_radians=False):
"""Returns a rotated geometry on a 2D plane.
The angle of rotation can be specified in either degrees (default) or
radians by setting ``use_radians=True``. Positive angles are
counter-clockwise and negative are clockwise rotations.
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point object
or a coordinate tuple (x0, y0).
The affine transformation matrix for 2D rotation is:
/ cos(r) -sin(r) xoff \
| sin(r) cos(r) yoff |
\ 0 0 1 /
where the offsets are calculated from the origin Point(x0, y0):
xoff = x0 - x0 * cos(r) + y0 * sin(r)
yoff = y0 - x0 * sin(r) - y0 * cos(r)
"""
if not use_radians: # convert from degrees
angle *= pi/180.0
cosp = cos(angle)
sinp = sin(angle)
if abs(cosp) < 2.5e-16:
cosp = 0.0
if abs(sinp) < 2.5e-16:
sinp = 0.0
x0, y0 = interpret_origin(geom, origin, 2)
matrix = (cosp, -sinp, 0.0,
sinp, cosp, 0.0,
0.0, 0.0, 1.0,
x0 - x0 * cosp + y0 * sinp, y0 - x0 * sinp - y0 * cosp, 0.0)
return affine_transform(geom, matrix)
def scale(geom, xfact=1.0, yfact=1.0, zfact=1.0, origin='center'):
"""Returns a scaled geometry, scaled by factors along each dimension.
The point of origin can be a keyword 'center' for the 2D bounding box
center (default), 'centroid' for the geometry's 2D centroid, a Point
object or a coordinate tuple (x0, y0, z0).
Negative scale factors will mirror or reflect coordinates.
The general 3D affine transformation matrix for scaling is:
/ xfact 0 0 xoff \
| 0 yfact 0 yoff |
| 0 0 zfact zoff |
\ 0 0 0 1 /
where the offsets are calculated from the origin Point(x0, y0, z0):
xoff = x0 - x0 * xfact
yoff = y0 - y0 * yfact
zoff = z0 - z0 * zfact
"""
x0, y0, z0 = interpret_origin(geom, origin, 3)
matrix = (xfact, 0.0, 0.0,
0.0, yfact, 0.0,
0.0, 0.0, zfact,
x0 - x0 * xfact, y0 - y0 * yfact, z0 - z0 * zfact)
return affine_transform(geom, matrix)
def skew(geom, xs=0.0, ys=0.0, origin='center', use_radians=False):
"""Returns a skewed geometry, sheared by angles along x and y dimensions.
The shear angle can be specified in either degrees (default) or radians
by setting ``use_radians=True``.
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point object
or a coordinate tuple (x0, y0).
The general 2D affine transformation matrix for skewing is:
/ 1 tan(xs) xoff \
| tan(ys) 1 yoff |
\ 0 0 1 /
where the offsets are calculated from the origin Point(x0, y0):
xoff = -y0 * tan(xs)
yoff = -x0 * tan(ys)
"""
if not use_radians: # convert from degrees
xs *= pi/180.0
ys *= pi/180.0
tanx = tan(xs)
tany = tan(ys)
if abs(tanx) < 2.5e-16:
tanx = 0.0
if abs(tany) < 2.5e-16:
tany = 0.0
x0, y0 = interpret_origin(geom, origin, 2)
matrix = (1.0, tanx, 0.0,
tany, 1.0, 0.0,
0.0, 0.0, 1.0,
-y0 * tanx, -x0 * tany, 0.0)
return affine_transform(geom, matrix)
def translate(geom, xoff=0.0, yoff=0.0, zoff=0.0):
"""Returns a translated geometry shifted by offsets along each dimension.
The general 3D affine transformation matrix for translation is:
/ 1 0 0 xoff \
| 0 1 0 yoff |
| 0 0 1 zoff |
\ 0 0 0 1 /
"""
matrix = (1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
xoff, yoff, zoff)
return affine_transform(geom, matrix)
| 33.101563
| 78
| 0.547439
|
794bb30ef46251f70b96a4dc008b4b2ba2c63fb8
| 6,875
|
py
|
Python
|
codebase/train.py
|
vishalbelsare/AmortizedCausalDiscovery
|
eaea1e4be3583b896bd9c2653e87869b302dd7c4
|
[
"MIT"
] | null | null | null |
codebase/train.py
|
vishalbelsare/AmortizedCausalDiscovery
|
eaea1e4be3583b896bd9c2653e87869b302dd7c4
|
[
"MIT"
] | null | null | null |
codebase/train.py
|
vishalbelsare/AmortizedCausalDiscovery
|
eaea1e4be3583b896bd9c2653e87869b302dd7c4
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import time
import numpy as np
import torch
from model.modules import *
from utils import arg_parser, logger, data_loader, forward_pass_and_eval
from model import utils, model_loader
def train():
best_val_loss = np.inf
best_epoch = 0
for epoch in range(args.epochs):
t_epoch = time.time()
train_losses = defaultdict(list)
for batch_idx, minibatch in enumerate(train_loader):
data, relations, temperatures = data_loader.unpack_batches(args, minibatch)
optimizer.zero_grad()
losses, _, _, _ = forward_pass_and_eval.forward_pass_and_eval(
args,
encoder,
decoder,
data,
relations,
rel_rec,
rel_send,
args.hard,
edge_probs=edge_probs,
log_prior=log_prior,
temperatures=temperatures,
)
loss = losses["loss"]
loss.backward()
optimizer.step()
train_losses = utils.append_losses(train_losses, losses)
string = logs.result_string("train", epoch, train_losses, t=t_epoch)
logs.write_to_log_file(string)
logs.append_train_loss(train_losses)
scheduler.step()
if args.validate:
val_losses = val(epoch)
val_loss = np.mean(val_losses["loss"])
if val_loss < best_val_loss:
print("Best model so far, saving...")
logs.create_log(
args,
encoder=encoder,
decoder=decoder,
optimizer=optimizer,
accuracy=np.mean(val_losses["acc"]),
)
best_val_loss = val_loss
best_epoch = epoch
elif (epoch + 1) % 100 == 0:
logs.create_log(
args,
encoder=encoder,
decoder=decoder,
optimizer=optimizer,
accuracy=np.mean(train_losses["acc"]),
)
logs.draw_loss_curves()
return best_epoch, epoch
def val(epoch):
t_val = time.time()
val_losses = defaultdict(list)
if args.use_encoder:
encoder.eval()
decoder.eval()
for batch_idx, minibatch in enumerate(valid_loader):
data, relations, temperatures = data_loader.unpack_batches(args, minibatch)
with torch.no_grad():
losses, _, _, _ = forward_pass_and_eval.forward_pass_and_eval(
args,
encoder,
decoder,
data,
relations,
rel_rec,
rel_send,
True,
edge_probs=edge_probs,
log_prior=log_prior,
testing=True,
temperatures=temperatures,
)
val_losses = utils.append_losses(val_losses, losses)
string = logs.result_string("validate", epoch, val_losses, t=t_val)
logs.write_to_log_file(string)
logs.append_val_loss(val_losses)
if args.use_encoder:
encoder.train()
decoder.train()
return val_losses
def test(encoder, decoder, epoch):
args.shuffle_unobserved = False
# args.prediction_steps = 49
test_losses = defaultdict(list)
if args.load_folder == "":
## load model that had the best validation performance during training
if args.use_encoder:
encoder.load_state_dict(torch.load(args.encoder_file))
decoder.load_state_dict(torch.load(args.decoder_file))
if args.use_encoder:
encoder.eval()
decoder.eval()
for batch_idx, minibatch in enumerate(test_loader):
data, relations, temperatures = data_loader.unpack_batches(args, minibatch)
with torch.no_grad():
assert (data.size(2) - args.timesteps) >= args.timesteps
data_encoder = data[:, :, : args.timesteps, :].contiguous()
data_decoder = data[:, :, args.timesteps : -1, :].contiguous()
losses, _, _, _, = forward_pass_and_eval.forward_pass_and_eval(
args,
encoder,
decoder,
data,
relations,
rel_rec,
rel_send,
True,
data_encoder=data_encoder,
data_decoder=data_decoder,
edge_probs=edge_probs,
log_prior=log_prior,
testing=True,
temperatures=temperatures,
)
test_losses = utils.append_losses(test_losses, losses)
string = logs.result_string("test", epoch, test_losses)
logs.write_to_log_file(string)
logs.append_test_loss(test_losses)
logs.create_log(
args,
decoder=decoder,
encoder=encoder,
optimizer=optimizer,
final_test=True,
test_losses=test_losses,
)
if __name__ == "__main__":
args = arg_parser.parse_args()
logs = logger.Logger(args)
if args.GPU_to_use is not None:
logs.write_to_log_file("Using GPU #" + str(args.GPU_to_use))
(
train_loader,
valid_loader,
test_loader,
loc_max,
loc_min,
vel_max,
vel_min,
) = data_loader.load_data(args)
rel_rec, rel_send = utils.create_rel_rec_send(args, args.num_atoms)
encoder, decoder, optimizer, scheduler, edge_probs = model_loader.load_model(
args, loc_max, loc_min, vel_max, vel_min
)
logs.write_to_log_file(encoder)
logs.write_to_log_file(decoder)
if args.prior != 1:
assert 0 <= args.prior <= 1, "args.prior not in the right range"
prior = np.array(
[args.prior]
+ [
(1 - args.prior) / (args.edge_types - 1)
for _ in range(args.edge_types - 1)
]
)
logs.write_to_log_file("Using prior")
logs.write_to_log_file(prior)
log_prior = torch.FloatTensor(np.log(prior))
log_prior = log_prior.unsqueeze(0).unsqueeze(0)
if args.cuda:
log_prior = log_prior.cuda()
else:
log_prior = None
if args.global_temp:
args.categorical_temperature_prior = utils.get_categorical_temperature_prior(
args.alpha, args.num_cats, to_cuda=args.cuda
)
##Train model
try:
if args.test_time_adapt:
raise KeyboardInterrupt
best_epoch, epoch = train()
except KeyboardInterrupt:
best_epoch, epoch = -1, -1
print("Optimization Finished!")
logs.write_to_log_file("Best Epoch: {:04d}".format(best_epoch))
if args.test:
test(encoder, decoder, epoch)
| 27.721774
| 87
| 0.574836
|
794bb351e234495c2fa7d594ae3aa09e6a3ae9cc
| 575
|
py
|
Python
|
python-while/desafio2.py
|
MaylaAlves/phyton_1
|
520a0489366df33c8cc233f9487f93f0b305c69f
|
[
"MIT"
] | null | null | null |
python-while/desafio2.py
|
MaylaAlves/phyton_1
|
520a0489366df33c8cc233f9487f93f0b305c69f
|
[
"MIT"
] | null | null | null |
python-while/desafio2.py
|
MaylaAlves/phyton_1
|
520a0489366df33c8cc233f9487f93f0b305c69f
|
[
"MIT"
] | null | null | null |
import random
valor = random.randint(1,10)
contador = 0
chute = 0
print('Chute um numero entre 1 e 10')
while chute != valor:
contador+=1
chute = input (f'Entre com um chute #{contador}: ')
if chute.isnumeric():
chute = int(chute)
else:
print('Somente números, por favor!')
continue
if chute > valor:
print('Você chutou um número muito alto, tente novamente!')
elif chute < valor:
print ('Você chutou um número muito baixo, por favor tente novamente!')
else:
print(f'Você chutou {contador} vezes!')
| 23.958333
| 79
| 0.627826
|
794bb41551132b6a6c58319f0cf295db30cc9c45
| 177
|
py
|
Python
|
SimTracker/TrackerMaterialAnalysis/python/trackingMaterialAnalyser_ForHGCalPhaseII_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
SimTracker/TrackerMaterialAnalysis/python/trackingMaterialAnalyser_ForHGCalPhaseII_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
SimTracker/TrackerMaterialAnalysis/python/trackingMaterialAnalyser_ForHGCalPhaseII_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# Analyze and plot the tracking material
from SimTracker.TrackerMaterialAnalysis.trackingMaterialAnalyser_ForHGCalPhaseII_cfi import *
| 35.4
| 93
| 0.875706
|
794bb4337eef7c7ffa4a2181fd12fcbaf2a49cb5
| 14,666
|
py
|
Python
|
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
|
potsmaster/cinder
|
275c2acdfb4282b0ec0314c9875b759958c093f8
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
|
potsmaster/cinder
|
275c2acdfb4282b0ec0314c9875b759958c093f8
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
|
potsmaster/cinder
|
275c2acdfb4282b0ec0314c9875b759958c093f8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp NFS storage driver
"""
import os
import copy
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
class NetAppNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppNfsDriverTestCase, self).setUp()
configuration = mock.Mock()
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_used_ratio = 0.89
configuration.nfs_oversub_ratio = 3.0
kwargs = {'configuration': configuration}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
self.driver.ssc_enabled = False
self.driver.db = mock.Mock()
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
def test_get_share_capacity_info(self):
mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info')
mock_get_capacity.return_value = fake.CAPACITY_VALUES
expected_total_capacity_gb = (na_utils.round_down(
(fake.TOTAL_BYTES *
self.driver.configuration.nfs_oversub_ratio) /
units.Gi, '0.01'))
expected_free_capacity_gb = (na_utils.round_down(
(fake.AVAILABLE_BYTES *
self.driver.configuration.nfs_oversub_ratio) /
units.Gi, '0.01'))
expected_reserved_percentage = round(
100 * (1 - self.driver.configuration.nfs_used_ratio))
result = self.driver._get_share_capacity_info(fake.NFS_SHARE)
self.assertEqual(expected_total_capacity_gb,
result['total_capacity_gb'])
self.assertEqual(expected_free_capacity_gb,
result['free_capacity_gb'])
self.assertEqual(expected_reserved_percentage,
result['reserved_percentage'])
def test_get_capacity_info_ipv4_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITY_VALUES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
def test_get_capacity_info_ipv6_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITY_VALUES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_no_pool(self):
volume = copy.deepcopy(fake.NFS_VOLUME)
volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.mock_object(self.driver, '_ensure_shares_mounted')
self.assertRaises(exception.InvalidHost,
self.driver.create_volume,
volume)
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_from_snapshot(self):
provider_location = fake.POOL_NAME
snapshot = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
snapshot)
self.assertEqual(provider_location, result)
def test_clone_source_to_destination_volume(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume')
expected = {'provider_location': fake.POOL_NAME}
result = self.driver._clone_source_to_destination_volume(
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
self.assertEqual(expected, result)
def test_clone_source_to_destination_volume_with_do_qos_exception(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume', mock.Mock(
side_effect=Exception))
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._clone_source_to_destination_volume,
fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
def test_clone_with_extension_check_equal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size']
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(0, mock_extend_volume.call_count)
def test_clone_with_extension_check_unequal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(1, mock_extend_volume.call_count)
def test_clone_with_extension_check_extend_exception(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
mock_extend_volume.side_effect = Exception
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
clone_source,
fake.NFS_VOLUME)
self.assertEqual(1, mock_cleanup.call_count)
def test_clone_with_extension_check_no_discovery(self):
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
self.mock_object(self.driver, '_set_rw_permissions')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = False
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
fake.CLONE_SOURCE,
fake.NFS_VOLUME)
def test_create_cloned_volume(self):
provider_location = fake.POOL_NAME
src_vref = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
src_vref)
self.assertEqual(provider_location, result)
def test_do_qos_for_volume(self):
self.assertRaises(NotImplementedError,
self.driver._do_qos_for_volume,
fake.NFS_VOLUME,
fake.EXTRA_SPECS)
def test_cleanup_volume_on_failure(self):
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
mock_local_path = self.mock_object(self.driver, 'local_path')
mock_local_path.return_value = path
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = True
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
mock_delete.assert_has_calls([mock.call(path)])
def test_cleanup_volume_on_failure_no_path(self):
self.mock_object(self.driver, 'local_path')
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = False
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
self.assertEqual(0, mock_delete.call_count)
def test_get_vol_for_share(self):
self.assertRaises(NotImplementedError,
self.driver._get_vol_for_share,
fake.NFS_SHARE)
def test_get_export_ip_path_volume_id_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_ip_path_share_provided(self):
expected = (fake.SHARE_IP, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(share=fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_volume_id_and_share_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(
fake.VOLUME_ID, fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_no_args(self):
self.assertRaises(exception.InvalidInput,
self.driver._get_export_ip_path)
def test_get_host_ip(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.SHARE_IP
result = self.driver._get_host_ip(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_path(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.EXPORT_PATH
result = self.driver._get_export_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
| 41.429379
| 79
| 0.676326
|
794bb5a4a32801e59a0b29705dcf4a8e73d795df
| 10,257
|
py
|
Python
|
src/api/dataflow/batch/views/debug_views.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/dataflow/batch/views/debug_views.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/dataflow/batch/views/debug_views.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.bklanguage import BkLanguage
from common.decorators import detail_route, params_valid
from common.local import get_request_username
from common.views import APIViewSet
from rest_framework.response import Response
from dataflow.batch.debug.debug_driver import (
create_debug,
get_basic_info,
get_node_info,
set_error_data,
set_result_data,
stop_debug,
update_metric_info,
)
from dataflow.batch.serializer.serializers import (
DebugCreateSerializer,
DebugSerializer,
GetNodeInfoSerializer,
MetricInfoSerializer,
SaveErrorDataSerializer,
SaveResultDataSerializer,
)
from dataflow.shared.jobnavi.jobnavi_helper import JobNaviHelper
class DebugViewSet(APIViewSet):
"""
调试api
"""
lookup_field = "debug_id"
# lookup_value_regex = '\d+'
@detail_route(methods=["get"], url_path="basic_info")
def basic_info(self, request, debug_id):
"""
@api {get} /dataflow/batch/debugs/:debug_id/basic_info 获取debug basic info
@apiName debugs/:debug_id/basic_info
@apiGroup batch
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": {
"result_tables":{
"123_parser":{
"output_total_count":2190
},
"123_filter":{
"output_total_count":200
}
},
"debug_error":{
"error_result_table_id":"123_filter"
}
},
"result": true
}
"""
basic_info = get_basic_info(debug_id)
return Response(basic_info)
@detail_route(methods=["get"], url_path="node_info")
@params_valid(serializer=GetNodeInfoSerializer)
def node_info(self, request, debug_id, params):
"""
@api {get} /dataflow/batch/debugs/:debug_id/node_info 获取调试node info
@apiName debugs/:debug_id/node_info
@apiGroup Stream
@apiParam {string} job_id
@apiParam {string} result_table_id
@apiParamExample {json} 参数样例:
{
"result_table_id": "xxxx"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": {
"debug_errcode":{
"error_code":101,
"error_message":"在【123_test】中的字段【aaa】转换失败"
},
"debug_metric":{
"input_total_count":45210,
"output_total_count":0,
},
"debug_data":{
"result_data":[
{"ip":"x.x.x.x","cc_set":"test","cc_module":"test"},
{"ip":"x.x.x.x","cc_set":"test","cc_module":"test"}
]
}
},
"result": true
}
"""
language = BkLanguage.get_user_language(get_request_username())
node_info = get_node_info(params, debug_id, language)
return Response(node_info)
@params_valid(serializer=DebugCreateSerializer)
def create(self, request, params):
"""
@api {post} /dataflow/batch/debugs/ 创建调试任务
@apiName debugs/
@apiGroup batch
@apiParam {String} heads result_table的heads,多个head用逗号分割
@apiParam {String} tails result_table的tails,多个tail用逗号分割
@apiParam {String} jobserver_config
@apiParamExample {json} 参数样例:
{
"heads": "123_filter1,123_batch_2",
"tails": "123_batch_3,123_batch_4",
"jobserver_config": "stream"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": {
"debug_id": "debug_XXXX"
},
"result": true
}
"""
debug_id = create_debug(params)
return Response({"debug_id": debug_id})
@detail_route(methods=["post"], url_path="stop")
@params_valid(serializer=DebugSerializer)
def stop(self, request, debug_id, params):
"""
@api {post} /dataflow/batch/debugs/:debug_id/stop 停止调试任务
@apiName debugs/:debug_id/stop
@apiGroup batch
@apiParamExample {json} 参数样例:
{
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": null,
"result": true
}
"""
geog_area_code = params["geog_area_code"]
cluster_id = JobNaviHelper.get_jobnavi_cluster("batch")
stop_debug(debug_id, geog_area_code, cluster_id)
return Response()
@detail_route(methods=["post"], url_path="error_data")
@params_valid(serializer=SaveErrorDataSerializer)
def error_data(self, request, debug_id, params):
"""
@api {post} /dataflow/batch/debugs/:debug_id/error_data 设置错误信息
@apiName debugs/:debug_id/error_data
@apiGroup batch
@apiParam {string} job_id
@apiParam {string} result_table_id
@apiParam {string} error_code
@apiParam {string} error_message
@apiParam {string} debug_date
@apiParamExample {json} 参数样例:
{
"job_id": "1234",
"result_table_id": "1_abc",
"error_code": "xxx",
"error_message": "xxxxx",
"debug_date": "1537152075939"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": null,
"result": true
}
"""
set_error_data(params, debug_id)
return Response()
@detail_route(methods=["post"], url_path="metric_info")
@params_valid(serializer=MetricInfoSerializer)
def metric_info(self, request, debug_id, params):
"""
@api {post} /dataflow/batch/debugs/:debug_id/metric_info 设置节点metric信息
@apiName debugs/:debug_id/metric_info
@apiGroup batch
@apiParam {string} job_id
@apiParam {long} input_total_count
@apiParam {long} output_total_count
@apiParam {long} filter_discard_count
@apiParam {long} transformer_discard_count
@apiParam {long} aggregator_discard_count
@apiParam {string} result_table_id
@apiParamExample {json} 参数样例:
{
"job_id": "1234",
"input_total_count": 100,
"output_total_count": 100,
"result_table_id": "2_abc"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": null,
"result": true
}
"""
update_metric_info(params, debug_id)
return Response()
@detail_route(methods=["post"], url_path="result_data")
@params_valid(serializer=SaveResultDataSerializer)
def result_data(self, request, debug_id, params):
"""
@api {post} /dataflow/batch/debugs/:debug_id/result_data 设置节点的结果数据
@apiName debugs/:debug_id/result_data
@apiGroup batch
@apiParam {string} job_id
@apiParam {string} result_table_id
@apiParam {string} result_data
@apiParam {long} debug_date
@apiParam {int} thedate
@apiParamExample {json} 参数样例:
{
"job_id": "1234",
"result_table_id": "1_abc",
"result_data": "[{"gsid": 130011101},{"gsid": "xxxxxx"}]",
"debug_date": 1537152075939,
"thedate": 20180917
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": null,
"result": true
}
"""
set_result_data(params, debug_id)
return Response()
| 36.37234
| 111
| 0.542654
|
794bb68ea588aa83cbd56880c05eed3da723b0ea
| 48,199
|
py
|
Python
|
src/beanmachine/ppl/compiler/special_function_caller.py
|
facebookresearch/beanmachine
|
225114d9964b90c3a49adddc4387b4a47d1b4262
|
[
"MIT"
] | 177
|
2021-12-12T14:19:05.000Z
|
2022-03-24T05:48:10.000Z
|
src/beanmachine/ppl/compiler/special_function_caller.py
|
facebookresearch/beanmachine
|
225114d9964b90c3a49adddc4387b4a47d1b4262
|
[
"MIT"
] | 171
|
2021-12-11T06:12:05.000Z
|
2022-03-31T20:26:29.000Z
|
src/beanmachine/ppl/compiler/special_function_caller.py
|
facebookresearch/beanmachine
|
225114d9964b90c3a49adddc4387b4a47d1b4262
|
[
"MIT"
] | 31
|
2021-12-11T06:27:19.000Z
|
2022-03-25T13:31:56.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import math
import operator
from types import MethodType
from typing import Any, Callable, Dict, List, NoReturn, Optional, Set, Tuple
import beanmachine.ppl.compiler.bmg_nodes as bn
import torch
import torch.distributions as dist
from beanmachine.ppl.compiler.beanstalk_common import allowed_functions
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.bmg_nodes import BMGNode
from beanmachine.ppl.compiler.hint import log1mexp, math_log1mexp
_in_place_operator_names = {
operator.iadd: "__iadd__",
operator.iand: "__iand__",
operator.ifloordiv: "__ifloordiv__",
operator.ilshift: "__ilshift__",
operator.imatmul: "__imatmul__",
operator.imod: "__imod__",
operator.imul: "__imul__",
operator.ior: "__ior__",
operator.ipow: "__ipow__",
operator.irshift: "__irshift__",
operator.isub: "__isub__",
operator.itruediv: "__idiv__",
operator.ixor: "__ixor__",
}
_in_place_to_regular = {
operator.iadd: operator.add,
operator.iand: operator.and_,
operator.ifloordiv: operator.floordiv,
operator.ilshift: operator.lshift,
operator.imatmul: operator.matmul,
operator.imod: operator.mod,
operator.imul: operator.mul,
operator.ior: operator.or_,
operator.ipow: operator.pow,
operator.irshift: operator.rshift,
operator.isub: operator.sub,
operator.itruediv: operator.truediv,
operator.ixor: operator.xor,
}
def _raise_unsupported(func: Any) -> NoReturn:
if inspect.ismethoddescriptor(func) or isinstance(
func, _builtin_function_or_method
):
func = func.__name__
raise ValueError(f"Function {func} is not supported by Bean Machine Graph.")
def _is_in_place_operator(func: Callable) -> bool:
return func in _in_place_to_regular
def _ordinary_arg_or_const(arg: Any) -> bool:
return isinstance(arg, bn.ConstantNode) or not isinstance(arg, BMGNode)
def only_ordinary_arguments(args, kwargs) -> bool:
if any(isinstance(arg, BMGNode) for arg in args):
return False
if any(isinstance(arg, BMGNode) for arg in kwargs.values()):
return False
return True
def _only_ordinary_arguments_or_constants(
args: List[Any], kwargs: Dict[str, Any]
) -> bool:
return all(_ordinary_arg_or_const(arg) for arg in args) and all(
_ordinary_arg_or_const(arg) for arg in kwargs.values()
)
def _get_ordinary_value(x: Any) -> Any:
return x.value if isinstance(x, bn.ConstantNode) else x
def _is_standard_normal(x: Any) -> bool:
return isinstance(x, dist.Normal) and x.mean == 0.0 and x.stddev == 1.0
def _is_phi_bound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:
# Is this Normal(0.0, 1.0).cdf(x) ?
# TODO: Support kwargs
return (
isinstance(f, MethodType)
and f.__func__ is dist.Normal.cdf
and len(arguments) == 1
and _is_standard_normal(f.__self__)
)
def _is_phi_unbound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:
# Is this Normal.cdf(Normal(0.0, 1.0), x)?
# TODO: Support kwargs
return (
f is dist.Normal.cdf
and len(arguments) == 2
and _is_standard_normal(arguments[0])
)
def _is_phi(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:
return _is_phi_unbound(f, arguments, kwargs) or _is_phi_bound(f, arguments, kwargs)
def _flatten_all_lists(xs):
"""Takes a list-of-lists, with arbitrary nesting level;
returns an iteration of all elements."""
if isinstance(xs, list):
for x in xs:
yield from _flatten_all_lists(x)
else:
yield xs
def _list_to_zeros(xs):
"""Takes a list-of-lists, with arbitrary nesting level;
returns a list-of-lists of the same shape but with every non-list
element replaced with zero."""
if isinstance(xs, list):
return [_list_to_zeros(x) for x in xs]
return 0
def _hashable(x: Any) -> bool:
# Oddly enough, Python does not allow you to test for set inclusion
# if the object is not hashable. Since it is impossible for an unhashable
# object to be in a set, Python could simply say no when asked if a set
# contains any unhashable object. It does not, so we are forced to do so.
# All hashable objects have a callable __hash__ attribute.
if not hasattr(x, "__hash__"):
return False
if not isinstance(x.__hash__, Callable):
return False
# It is possible that callable __hash__ exists but throws, which makes it
# unhashable. Eliminate that possibility as well.
try:
hash(x)
except Exception:
return False
return True
_empty_args = []
_empty_kwargs = {}
# Oddly enough there does not appear to be an easy way to obtain the type
# of builtin methods.
_builtin_function_or_method = type(abs)
def _is_any_torch_function(f: Callable) -> bool:
# Torch functions we either know about or we reject them immediately;
# we do not attempt to extract a graph of a model which contains
# a call to an unknown torch function with stochastic arguments.
#
# Given a reference to a function, how can we know if it is
# a torch function? Torch does not make it very easy on us to figure
# out what module a function is from. Let's choose some typical
# methods as examples, like arccos or erf:
#
# * torch.Tensor.arccos has no __module__ attribute.
# * torch.arccos.__module__ is None but .__objclass__ has a module string.
# * torch.special.erf.__module__ is the string "torch.special.erf.__module__"
# * torch.tensor(1).arccos.__module__ is None and has no .__objclass__, but
# does have a __self__ with a module.
#
# Our first step then is to see if we have a module.
m = getattr(f, "__module__", None)
if m is None:
# We don't have a module. Do we have an __objclass__ with a module?
oc = getattr(f, "__objclass__", None)
if oc is not None:
m = getattr(oc, "__module__", None)
if m is None:
# We still don't have a module. Maybe __self__ has a module.
s = getattr(f, "__self__", None)
if s is not None:
m = getattr(s, "__module__", None)
if m is not None:
return isinstance(m, str) and (m == "torch" or m.startswith("torch."))
# We don't have a module or an objclass.
#
# If we have something like torch.arccos then we can simply
# check the torch module to see if we can find this exact reference.
return any(item is f for _, item in torch.__dict__.items())
def _is_tensor_unbound_instance_method(f: Callable) -> bool:
# This identifies if a function object is a method *descriptor*
# such as torch.Tensor.add; that is, the method before it is bound
# to a particular self. This function does NOT identify if a function
# is a bound instance method, such as torch.tensor(1.0).add. See below.
if not inspect.ismethoddescriptor(f):
return False
objc = getattr(f, "__objclass__", None)
return objc is torch.Tensor or objc in torch.Tensor.__bases__
def _is_tensor_bound_instance_method(f: Callable) -> bool:
# This identifies if a function object is an instance method of
# a tensor already bound to a particular self. All such functions
# in torch are marked as builtin.
return isinstance(f, _builtin_function_or_method) and isinstance(
getattr(f, "__self__", None), torch.Tensor
)
def _get_unbound_tensor_method(f: Callable) -> Callable:
# Given a bound-to-self tensor instance method, obtain its corresponding
# unbound descriptor. In normal Python, the protocol is that the bound
# method has attribute __func__ pointing back to the descriptor but
# torch does not follow this protocol. Rather, we'll look it up by name.
assert _is_tensor_bound_instance_method(f)
unbound = getattr(torch.Tensor, f.__name__, None)
assert _is_tensor_unbound_instance_method(unbound)
return unbound
def canonicalize_function(
function: Any, arguments: List[Any]
) -> Tuple[Callable, List[Any]]:
# In Python a function that is a member of a class can be in either a "bound"
# or "unbound" form. Suppose c is of type C and we are calling foo with argument
# x. We could have:
#
# bound: c.foo(x)
# unbound: C.foo(c, x)
#
# The bound version calls the unbound version. How? In the bound case the fetch
# of c.foo returns a method object with attribute __self__ set to c and attribute
# __func__ set to C.foo. The call on the method object then invokes
# __func__(__self__, x).
#
# Unfortunately, calls to torch tensor methods do not follow this convention;
# instead of returning a method object with __func__ and __self__, it returns
# a builtin method object with __self__ but no __func__, so we call special helpers
# for those.
#
# It is useful when analyzing calls to have them in a consistent form. This function
# turns bound function calls into the equivalent unbound function call.
if isinstance(function, MethodType):
f = function.__func__
args = [function.__self__] + arguments
assert isinstance(f, Callable)
elif _is_tensor_bound_instance_method(function):
f = _get_unbound_tensor_method(function)
args = [function.__self__] + arguments
elif isinstance(function, Callable):
f = function
args = arguments
else:
_raise_unsupported(function)
assert isinstance(f, Callable), ( # pyre-ignore
"_canonicalize_function should return callable "
+ f"but got {type(f)} {str(f)}" # pyre-ignore
)
return (f, args) # pyre-ignore
# This helper class is to solve a problem in the simulated
# execution of the model during graph accumulation. Consider
# a model fragment like:
#
# n = normal()
# y = n.exp()
#
# During graph construction, n will be a SampleNode whose
# operand is a NormalNode, but SampleNode does not have a
# method "exp".
#
# The lifted program we execute will be something like:
#
# n = bmg.handle_function(normal, [])
# func = bmg.handle_dot(n, "exp")
# y = bmg.handle_function(func, [])
#
# The "func" that is returned is one of these KnownFunction
# objects, which captures the notion "I am an invocation
# of known function Tensor.exp on a receiver that is a BMG
# node". We then turn that into a exp node in handle_function.
class KnownFunction:
receiver: BMGNode
function: Callable
def __init__(self, receiver: BMGNode, function: Callable) -> None:
if not isinstance(receiver, BMGNode):
raise TypeError(
f"KnownFunction receiver must be BMGNode but is {type(receiver)}"
)
if not isinstance(function, Callable):
raise TypeError(
f"KnownFunction function must be Callable but is {type(function)}"
)
self.receiver = receiver
self.function = function
class SpecialFunctionCaller:
# As we execute the lifted program, we accumulate graph nodes in the
# graph builder,and the program passes around graph nodes instead of
# regular values. What happens when a graph node is passed to a
# function, or used as the receiver of a function? That function will be
# expecting a regular value as its argument or receiver.
#
# Certain function calls are special because they call graph nodes to
# be created; we have a dictionary here that maps Python function objects
# to the graph builder method that knows how to create the appropriate
# node type.
#
# There are also some functions which we know can be passed a graph node
# and will treat it correctly even though it is a graph node and not
# a value. For example, the function which constructs a dictionary
# or the function which constructs a list. When we encounter one of
# these functions in the lifted program, we do not create a graph node
# or call a special helper function; we simply allow it to be called normally.
_bmg: BMGraphBuilder
_function_map: Dict[Callable, Callable]
_special_tensor_instance_function_names: Set[str]
def __init__(self, bmg: BMGraphBuilder) -> None:
self._bmg = bmg
self._function_map = {
#
# Built-in functions
#
float: self._builtin_float,
#
# Math functions
#
math.exp: self._math_exp,
math.log: self._math_log,
#
# Hints
#
log1mexp: self._hint_log1mexp,
math_log1mexp: self._hint_log1mexp,
#
# Operators as functions
#
operator.add: self._operator_add,
operator.and_: self._operator_and,
operator.contains: self._operator_contains,
operator.eq: self._operator_eq,
operator.floordiv: self._operator_floordiv,
operator.ge: self._operator_ge,
operator.gt: self._operator_gt,
operator.inv: self._operator_inv,
operator.is_: self._operator_is,
operator.is_not: self._operator_is_not,
operator.le: self._operator_le,
operator.lshift: self._operator_lshift,
operator.lt: self._operator_lt,
operator.matmul: self._operator_matmul,
operator.mod: self._operator_mod,
operator.mul: self._operator_mul,
operator.ne: self._operator_ne,
operator.neg: self._operator_neg,
operator.not_: self._operator_not,
operator.or_: self._operator_or,
operator.pos: self._operator_pos,
operator.pow: self._operator_pow,
operator.rshift: self._operator_rshift,
operator.sub: self._operator_sub,
operator.truediv: self._operator_truediv,
operator.xor: self._operator_xor,
#
#
# Torch distributions
#
# (Remember to add a case to distribution_to_node.)
#
dist.Bernoulli: self._dist_bernoulli,
dist.Beta: self._dist_beta,
dist.Binomial: self._dist_binomial,
dist.Categorical: self._dist_categorical,
# TODO: Cauchy
dist.Chi2: self._dist_chi2,
# TODO: ContinuousBernoulli
dist.Dirichlet: self._dist_dirichlet,
# TODO: Exponential
# TODO: FisherSnedecor
dist.Gamma: self._dist_gamma,
# TODO: Geometric
# TODO: Gumbel
dist.HalfCauchy: self._dist_halfcauchy,
dist.HalfNormal: self._dist_halfnormal,
# TODO: Independent
# TODO: Kumaraswamy
# TODO: LKJCholesky
# TODO: Laplace
# TODO: LogNormal
# TODO: LowRankMultivariateNormal
# TODO: MixtureSameFamily
# TODO: Multinomial
# TODO: MultivariateNormal
# TODO: NegativeBinomial
dist.Normal: self._dist_normal,
# TODO: OneHotCategorical
# TODO: Pareto
# TODO: Poisson
dist.Poisson: self._dist_poisson,
# TODO: RelaxedBernoulli
# TODO: LogitRelaxedBernoulli
# TODO: RelaxedOneHotCategorical
dist.StudentT: self._dist_studentt,
# TODO: TransformedDistribution
dist.Uniform: self._dist_uniform,
# TODO: VonMises
# TODO: Weibull
#
# Torch functions
#
torch.Tensor.add: self._torch_add,
torch.add: self._torch_add,
torch.Tensor.bitwise_and: self._torch_bitwise_and,
torch.bitwise_and: self._torch_bitwise_and,
torch.Tensor.bitwise_not: self._torch_bitwise_not,
torch.bitwise_not: self._torch_bitwise_not,
torch.Tensor.bitwise_or: self._torch_bitwise_or,
torch.bitwise_or: self._torch_bitwise_or,
torch.Tensor.bitwise_xor: self._torch_bitwise_xor,
torch.bitwise_xor: self._torch_bitwise_xor,
torch.Tensor.bitwise_left_shift: self._torch_bitwise_left_shift,
torch.bitwise_left_shift: self._torch_bitwise_left_shift,
torch.Tensor.bitwise_right_shift: self._torch_bitwise_right_shift,
torch.bitwise_right_shift: self._torch_bitwise_right_shift,
torch.Tensor.cholesky: self._torch_cholesky,
torch.linalg.cholesky: self._torch_cholesky,
torch.Tensor.div: self._torch_div,
torch.div: self._torch_div,
torch.Tensor.divide: self._torch_div,
torch.divide: self._torch_div,
torch.Tensor.eq: self._torch_eq,
torch.eq: self._torch_eq,
torch.Tensor.equal: self._torch_eq,
torch.equal: self._torch_eq,
torch.Tensor.exp: self._torch_exp,
torch.exp: self._torch_exp,
torch.Tensor.exp2: self._torch_exp2,
torch.exp2: self._torch_exp2,
torch.special.exp2: self._torch_exp2,
torch.Tensor.expm1: self._torch_expm1,
torch.expm1: self._torch_expm1,
torch.special.expm1: self._torch_expm1,
torch.Tensor.float: self._torch_float,
# TODO: float_power
torch.Tensor.floor_divide: self._torch_floor_divide,
torch.floor_divide: self._torch_floor_divide,
torch.Tensor.fmod: self._torch_fmod,
torch.fmod: self._torch_fmod,
torch.Tensor.ge: self._torch_ge,
torch.ge: self._torch_ge,
torch.Tensor.greater: self._torch_gt,
torch.greater: self._torch_gt,
torch.Tensor.greater_equal: self._torch_ge,
torch.greater_equal: self._torch_ge,
torch.Tensor.gt: self._torch_gt,
torch.gt: self._torch_gt,
torch.Tensor.int: self._torch_int,
torch.Tensor.item: self._torch_item,
torch.Tensor.le: self._torch_le,
torch.le: self._torch_le,
torch.Tensor.less: self._torch_lt,
torch.less: self._torch_lt,
torch.Tensor.less_equal: self._torch_le,
torch.less_equal: self._torch_le,
torch.Tensor.log: self._torch_log,
torch.log: self._torch_log,
torch.Tensor.log10: self._torch_log10,
torch.log10: self._torch_log10,
torch.Tensor.log1p: self._torch_log1p,
torch.log1p: self._torch_log1p,
torch.special.log1p: self._torch_log1p,
torch.Tensor.log2: self._torch_log2,
torch.log2: self._torch_log2,
# TODO: logical_and
# TODO: special.logit
torch.Tensor.logical_not: self._torch_logical_not,
torch.logical_not: self._torch_logical_not,
# TODO: logical_or
# TODO: logical_xor
torch.Tensor.logsumexp: self._torch_logsumexp,
torch.logsumexp: self._torch_logsumexp,
torch.special.logsumexp: self._torch_logsumexp,
torch.Tensor.lt: self._torch_lt,
torch.lt: self._torch_lt,
torch.Tensor.matmul: self._torch_matmul,
torch.matmul: self._torch_matmul,
torch.Tensor.mm: self._torch_mm,
torch.mm: self._torch_mm,
torch.Tensor.mul: self._torch_mul,
torch.mul: self._torch_mul,
torch.Tensor.multiply: self._torch_mul,
torch.multiply: self._torch_mul,
torch.Tensor.ne: self._torch_ne,
torch.ne: self._torch_ne,
torch.Tensor.not_equal: self._torch_ne,
torch.not_equal: self._torch_ne,
torch.Tensor.neg: self._torch_neg,
torch.neg: self._torch_neg,
torch.Tensor.negative: self._torch_neg,
torch.negative: self._torch_neg,
torch.Tensor.pow: self._torch_pow,
torch.pow: self._torch_pow,
torch.Tensor.remainder: self._torch_fmod,
torch.remainder: self._torch_fmod,
torch.sigmoid: self._torch_sigmoid,
torch.Tensor.sigmoid: self._torch_sigmoid,
torch.special.expit: self._torch_sigmoid,
torch.Tensor.sqrt: self._torch_sqrt,
torch.sqrt: self._torch_sqrt,
torch.Tensor.sub: self._torch_sub,
torch.sub: self._torch_sub,
torch.Tensor.subtract: self._torch_sub,
torch.subtract: self._torch_sub,
torch.Tensor.sum: self._torch_sum,
torch.sum: self._torch_sum,
torch.Tensor.true_divide: self._torch_div,
torch.true_divide: self._torch_div,
}
self._special_tensor_instance_function_names = {
f.__name__
for f in self._function_map
if _is_tensor_unbound_instance_method(f)
}
def _is_special_tensor_bound_instance_method_name(self, name: str) -> bool:
return name in self._special_tensor_instance_function_names
def bind_tensor_instance_function(
self, receiver: BMGNode, name: str
) -> KnownFunction:
# TODO: What if the node represents a distribution, not a tensor?
# Should we produce a better error message?
if hasattr(torch.Tensor, name):
return KnownFunction(receiver, getattr(torch.Tensor, name))
_raise_unsupported(name)
def is_special_tensor_bound_instance_method(self, f: Callable) -> bool:
return self._is_special_tensor_bound_instance_method_name(
f.__name__
) and _is_tensor_bound_instance_method(f)
def get_special_tensor_unbound_instance_method(self, f: Callable) -> Callable:
assert self.is_special_tensor_bound_instance_method(f)
return _get_unbound_tensor_method(f)
def _make_constant(self, arg: Any) -> BMGNode:
return arg if isinstance(arg, BMGNode) else self._bmg.add_constant(arg)
def is_special_function(
self,
func: Callable,
args: List[Any] = _empty_args, # TODO: Unused
kwargs: Dict[str, Any] = _empty_kwargs, # TODO: Unused
) -> bool:
if isinstance(func, KnownFunction):
return True
if _is_any_torch_function(func):
return True
if not _hashable(func):
return False
if func in allowed_functions:
return True
if func in self._function_map:
return True
# All in-place operators are special functions.
if _is_in_place_operator(func):
return True
return False
def _canonicalize_function(
self, func: Callable, args: List[Any]
) -> Tuple[Callable, List[Any]]:
if isinstance(func, KnownFunction):
args = [func.receiver] + args
func = func.function
else:
func, args = canonicalize_function(func, args)
return func, args
def do_special_call_maybe_stochastic(
self,
func: Any,
args: List[Any],
kwargs: Dict[str, Any] = _empty_kwargs,
) -> Any:
# If we possibly can, just call the original function with ordinary arguments.
# Otherwise, convert everything to a graph node and call our helper which
# does node construction.
assert self.is_special_function(func, args, kwargs)
func, args = self._canonicalize_function(func, args)
if func is torch.tensor:
return self._tensor_constructor(*args, **kwargs)
if (
_only_ordinary_arguments_or_constants(args, kwargs)
or func in allowed_functions
):
new_args = (_get_ordinary_value(arg) for arg in args)
new_kwargs = {key: _get_ordinary_value(arg) for key, arg in kwargs.items()}
return func(*new_args, **new_kwargs)
if _is_in_place_operator(func):
return self._in_place_operator(func, *args)
return self.do_special_call_always_stochastic(func, args, kwargs)
def do_special_call_always_stochastic(
self,
func: Callable,
args: List[Any],
kwargs: Dict[str, Any] = _empty_kwargs,
) -> BMGNode:
# Never call the original function with ordinary arguments. Convert everything
# to a graph node and call our helper which does node construction.
assert self.is_special_function(func, args, kwargs)
# We should never call do_special_call_always_stochastic on (1) a tensor
# constructor, or (2) a function known to be allowed to take any values.
assert func not in allowed_functions
assert func is not torch.tensor
func, args = self._canonicalize_function(func, args)
if _is_phi_unbound(func, args, kwargs):
args = args[1:]
node_constructor = self._phi
elif _hashable(func) and func in self._function_map:
node_constructor = self._function_map[func]
else:
# We are trying to do an always-stochastic call on a function that
# we do not yet know how to handle.
_raise_unsupported(func)
new_args = (self._make_constant(arg) for arg in args)
new_kwargs = {key: self._make_constant(arg) for key, arg in kwargs.items()}
return node_constructor(*new_args, **new_kwargs) # pyre-ignore
#
# Builtins; these must have the same signature as their corresponding
# builtin functions.
#
def _builtin_float(self, input: BMGNode) -> BMGNode:
# TODO: Do we want to do this at all? Why should float(t) insert a
# TO_REAL node into the graph? We can simply insert TO_REAL where required
# by the BMG type system.
return self._bmg.add_to_real(input)
#
# Math functions
#
def _math_exp(self, input: BMGNode) -> BMGNode:
# TODO: Right signature?
return self._bmg.add_exp(input)
def _math_log(self, input: BMGNode) -> BMGNode:
return self._bmg.add_log(input)
#
# Hints
# TODO: Eliminate this hack. Write a problem fixer which detects these
# patterns and rewrites them into the more efficient operator.
#
def _hint_log1mexp(self, x: BMGNode) -> BMGNode:
return self._bmg.add_log1mexp(x)
#
# Distributions; these must have the same signature as the corresponding
# constructor.
#
def distribution_to_node( # noqa
self, distribution: dist.Distribution
) -> bn.DistributionNode:
t = type(distribution)
if isinstance(distribution, dist.Bernoulli):
args = [distribution.probs]
elif isinstance(distribution, dist.Beta):
args = [distribution.concentration1, distribution.concentration0]
elif isinstance(distribution, dist.Binomial):
args = [distribution.total_count, distribution.probs]
elif isinstance(distribution, dist.Categorical):
args = [distribution.probs]
elif isinstance(distribution, dist.Chi2):
args = [distribution.df]
elif isinstance(distribution, dist.Dirichlet):
args = [distribution.concentration]
elif isinstance(distribution, dist.Gamma):
args = [distribution.concentration, distribution.rate]
elif isinstance(distribution, dist.HalfCauchy):
args = [distribution.scale]
elif isinstance(distribution, dist.HalfNormal):
args = [distribution.scale]
elif isinstance(distribution, dist.Normal):
args = [distribution.mean, distribution.stddev]
elif isinstance(distribution, dist.Poisson):
args = [distribution.rate]
elif isinstance(distribution, dist.StudentT):
args = [distribution.df, distribution.loc, distribution.scale]
elif isinstance(distribution, dist.Uniform):
args = [distribution.low, distribution.high]
else:
# TODO: Better error
raise TypeError(
f"Distribution '{t.__name__}' is not supported by Bean Machine Graph."
)
d = self.do_special_call_always_stochastic(t, args, {})
assert isinstance(d, bn.DistributionNode)
return d
def _dist_bernoulli(
self,
probs: Optional[BMGNode] = None,
logits: Optional[BMGNode] = None,
validate_args: Any = None,
) -> BMGNode:
if (probs is None and logits is None) or (
probs is not None and logits is not None
):
raise ValueError("Bernoulli requires exactly one of probs or logits")
if logits is not None:
return self._bmg.add_bernoulli_logit(logits)
return self._bmg.add_bernoulli(probs)
def _dist_beta(
self,
concentration1: BMGNode,
concentration0: BMGNode,
validate_args: Any = None,
) -> BMGNode:
return self._bmg.add_beta(concentration1, concentration0)
def _dist_binomial(
self,
total_count: Optional[BMGNode] = None,
probs: Optional[BMGNode] = None,
logits: Optional[BMGNode] = None,
validate_args: Any = None,
) -> BMGNode:
if (probs is None and logits is None) or (
probs is not None and logits is not None
):
raise ValueError("Binomial requires exactly one of probs or logits")
# TODO: Create a test case for Binomial(probs=0.5) where total_count
# is omitted.
if total_count is None:
total_count = self._make_constant(1)
if logits is not None:
return self._bmg.add_binomial_logit(total_count, logits)
return self._bmg.add_binomial(total_count, probs)
def _dist_categorical(
self,
probs: Optional[BMGNode] = None,
logits: Optional[BMGNode] = None,
validate_args: Any = None,
) -> BMGNode:
if (probs is None and logits is None) or (
probs is not None and logits is not None
):
raise ValueError("Categorical requires exactly one of probs or logits")
if logits is not None:
return self._bmg.add_categorical_logit(logits)
return self._bmg.add_categorical(probs)
def _dist_chi2(self, df: BMGNode, validate_args: Any = None) -> BMGNode:
return self._bmg.add_chi2(df)
def _dist_dirichlet(self, concentration: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_dirichlet(concentration)
def _dist_gamma(
self, concentration: BMGNode, rate: BMGNode, validate_args=None
) -> BMGNode:
return self._bmg.add_gamma(concentration, rate)
def _dist_halfcauchy(self, scale: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_halfcauchy(scale)
def _dist_halfnormal(self, scale: Any, validate_args=None) -> BMGNode:
return self._bmg.add_halfnormal(scale)
def _dist_normal(self, loc: BMGNode, scale: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_normal(loc, scale)
def _dist_poisson(self, rate: BMGNode) -> BMGNode:
return self._bmg.add_poisson(rate)
def _dist_studentt(
self,
df: BMGNode,
loc: Optional[BMGNode] = None,
scale: Optional[BMGNode] = None,
validate_args=None,
) -> BMGNode:
if loc is None:
loc = self._make_constant(0)
if scale is None:
scale = self._make_constant(1)
return self._bmg.add_studentt(df, loc, scale)
def _dist_uniform(self, low: BMGNode, high: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_uniform(low, high)
#
# Tensor constructor
#
def _tensor_constructor(self, data: Any) -> Any:
# The tensor constructor is a bit tricky because it takes a single
# argument that is either a value or a list of values. We need:
# (1) a flattened list of all the arguments, and
# (2) the size of the original tensor.
flattened_args = list(_flatten_all_lists(data))
if not any(isinstance(arg, BMGNode) for arg in flattened_args):
# None of the arguments are graph nodes. We can just
# construct the tensor normally.
return torch.tensor(data)
# At least one of the arguments is a graph node.
#
# If we're constructing a singleton tensor and the single value
# is a graph node, we can just keep it as that graph node.
if len(flattened_args) == 1:
return flattened_args[0]
# We have two or more arguments and at least one is a graph node.
# Convert them all to graph nodes.
for index, arg in enumerate(flattened_args):
if not isinstance(arg, BMGNode):
flattened_args[index] = self._bmg.add_constant(arg)
# What shape is this tensor? Rather than duplicating the logic in the
# tensor class, let's just construct the same shape made of entirely
# zeros and then ask what shape it is.
size = torch.tensor(_list_to_zeros(data)).size()
return self._bmg.add_tensor(size, *flattened_args)
#
# Tensor functions; these must have the same signature as the
# corresponding torch function.
#
# TODO: We do not support mutation of stochastic tensors; we should produce an
# error if there are any "out" values.
def _phi(self, value: BMGNode) -> BMGNode:
return self._bmg.add_phi(value)
def _torch_add(
self,
input: BMGNode,
other: BMGNode,
alpha: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: tensor add has the semantics input + alpha * other; if alpha is present
# then we need to generate a multiply and an addition.
return self._bmg.add_addition(input, other)
def _torch_bitwise_and(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
return self._bmg.add_bitand(input, other)
def _torch_bitwise_left_shift(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
# TODO: In torch, a << b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_lshift(input, other)
def _torch_bitwise_not(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_invert(input)
def _torch_bitwise_or(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
return self._bmg.add_bitor(input, other)
def _torch_bitwise_right_shift(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
# TODO: In torch, a >> b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** -b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_rshift(input, other)
def _torch_bitwise_xor(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
return self._bmg.add_bitxor(input, other)
def _torch_cholesky(
self,
input: BMGNode,
upper: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: What to do with upper?
return self._bmg.add_cholesky(input)
def _torch_div(
self,
input: BMGNode,
other: BMGNode,
rounding_mode: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: Should we give an error if there is a rounding mode?
return self._bmg.add_division(input, other)
def _torch_eq(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_equal(input, other)
def _torch_exp(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_exp(input)
def _torch_exp2(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_exp2(input)
def _torch_expm1(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_expm1(input)
def _torch_float(
self, input: BMGNode, memory_format: Optional[BMGNode] = None
) -> BMGNode:
# TODO: Do we want to do this at all? Why should t.float() insert a
# TO_REAL node into the graph? We can simply insert TO_REAL where required
# by the BMG type system.
# TODO: If we do keep this, what should we do with memory_format?
return self._bmg.add_to_real(input)
def _torch_floor_divide(
self,
input: BMGNode,
other: BMGNode,
out: Any = None,
) -> BMGNode:
return self._bmg.add_floordiv(input, other)
def _torch_fmod(
self,
input: BMGNode,
other: BMGNode,
out: Any = None,
) -> BMGNode:
return self._bmg.add_mod(input, other)
def _torch_ge(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_greater_than_equal(input, other)
def _torch_gt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_greater_than(input, other)
def _torch_int(
self, input: BMGNode, memory_format: Optional[BMGNode] = None
) -> BMGNode:
# TODO: What should we do with memory_format?
return self._bmg.add_to_int(input)
def _torch_item(self, input: BMGNode) -> Any:
return self._bmg.add_item(input)
def _torch_le(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_less_than_equal(input, other)
def _torch_log(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log(input)
def _torch_log10(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log10(input)
def _torch_log1p(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log1p(input)
def _torch_log2(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log2(input)
def _torch_logical_not(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_not(input)
def _torch_logsumexp(
self,
input: BMGNode,
dim: BMGNode,
keepdim: Optional[BMGNode] = None,
out: Any = None,
) -> Any:
if keepdim is None:
keepdim = self._make_constant(False)
return self._bmg.add_logsumexp_torch(input, dim, keepdim)
def _torch_lt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_less_than(input, other)
def _torch_matmul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
# TODO: mm and matmul have different behavior; we probably need to make
# a distinction here.
return self._bmg.add_matrix_multiplication(input, other)
def _torch_mm(self, input: BMGNode, mat2: BMGNode, out: Any = None) -> BMGNode:
# TODO: mm and matmul have different behavior; we probably need to make
# a distinction here.
return self._bmg.add_matrix_multiplication(input, mat2)
def _torch_mul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_multiplication(input, other)
def _torch_ne(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_not_equal(input, other)
def _torch_neg(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_negate(input)
def _torch_pow(self, input: BMGNode, exponent: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_power(input, exponent)
def _torch_sigmoid(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_logistic(input)
def _torch_sqrt(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_squareroot(input)
def _torch_sub(
self,
input: BMGNode,
other: BMGNode,
alpha: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: tensor sub has the semantics input - alpha * other; if alpha is present
# then we need to generate a multiply and an subtraction
return self._bmg.add_subtraction(input, other)
def _torch_sum(
self,
input: BMGNode,
dtype: Any = None,
) -> Any:
return self._bmg.add_sum(input)
#
# Operators as functions
#
def _operator_add(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_addition(a, b)
def _operator_and(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_bitand(a, b)
def _operator_contains(self, a: BMGNode, b: BMGNode) -> BMGNode:
# Note that "a" is the container and "b" is the query. That is,
# this means "b in a", NOT "a in b"
return self._bmg.add_in(b, a)
def _operator_eq(self, a: Any, b: Any) -> Any:
return self._bmg.add_equal(a, b)
def _operator_floordiv(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_floordiv(a, b)
def _operator_ge(self, a: Any, b: Any) -> Any:
return self._bmg.add_greater_than_equal(a, b)
def _operator_gt(self, a: Any, b: Any) -> Any:
return self._bmg.add_greater_than(a, b)
def _operator_inv(self, obj: BMGNode) -> BMGNode:
return self._bmg.add_invert(obj)
def _operator_is(self, a: Any, b: Any) -> Any:
return self._bmg.add_is(a, b)
def _operator_is_not(self, a: Any, b: Any) -> Any:
return self._bmg.add_is_not(a, b)
def _operator_le(self, a: Any, b: Any) -> Any:
return self._bmg.add_less_than_equal(a, b)
def _operator_lshift(self, a: BMGNode, b: BMGNode) -> BMGNode:
# TODO: In torch, a << b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_lshift(a, b)
def _operator_lt(self, a: Any, b: Any) -> Any:
return self._bmg.add_less_than(a, b)
def _operator_matmul(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_matrix_multiplication(a, b)
def _operator_mod(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_mod(a, b)
def _operator_mul(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_multiplication(a, b)
def _operator_ne(self, a: Any, b: Any) -> Any:
return self._bmg.add_not_equal(a, b)
def _operator_neg(self, obj: BMGNode) -> BMGNode:
return self._bmg.add_negate(obj)
def _operator_not(self, obj: BMGNode) -> BMGNode:
return self._bmg.add_not(obj)
def _operator_or(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_bitor(a, b)
def _operator_pos(self, obj: BMGNode) -> BMGNode:
# unary + is an identity on graph nodes
return obj
def _operator_pow(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_power(a, b)
def _operator_rshift(self, a: BMGNode, b: BMGNode) -> BMGNode:
# TODO: In torch, a >> b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** -b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_rshift(a, b)
def _operator_sub(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_subtraction(a, b)
def _operator_truediv(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_division(a, b)
def _operator_xor(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_bitxor(a, b)
#
# Augmented assignment operators
#
def _in_place_operator(
self,
native_in_place: Callable, # operator.iadd, for example
left: Any,
right: Any,
) -> Any:
# Handling augmented assignments (+=, -=, *=, and so on) has a lot of cases;
# to cut down on code duplication we call this higher-level method. Throughout
# the comments below we assume that we're handling a +=; the logic is the same
# for all the operators.
# TODO: We have a problem that we need to resolve regarding compilation of models
# which have mutations of aliased tensors. Compare the action of these two similar:
# models in the original Bean Machine implementation:
#
# @functional def foo():
# x = flip() # 0 or 1
# y = x # y is an alias for x
# y += 1 # y is mutated in place and continues to alias x
# return x # returns 1 or 2
#
# vs
#
# @functional def foo():
# x = flip() # 0 or 1
# y = x # y is an alias for x
# y = y + 1 # y no longer aliases x; y is 1 or 2
# return x # returns 0 or 1
#
# Suppose we are asked to compile the first model; how should we execute
# the rewritten form of it so as to accumulate the correct graph? Unlike
# tensors, graph nodes are not mutable!
#
# Here's what we're going to do for now:
#
# If neither operand is a graph node then do exactly what the model would
# normally do:
#
if not isinstance(left, BMGNode) and not isinstance(right, BMGNode):
return native_in_place(left, right)
assert native_in_place in _in_place_to_regular
native_regular = _in_place_to_regular[native_in_place]
# At least one operand is a graph node. If we have tensor += graph_node
# or graph_node += anything then optimistically assume that there
# is NOT any alias of the mutated left side, and treat the += as though
# it is a normal addition.
#
# TODO: Should we produce some sort of warning here telling the user that
# the compiled model semantics might be different than the original model?
# Or is that too noisy? There are going to be a lot of models with += where
# one of the operands is an ordinary tensor and one is a graph node, but which
# do not have any aliasing problem.
if isinstance(left, torch.Tensor) or isinstance(left, BMGNode):
return self.do_special_call_always_stochastic(
native_regular, [left, right], {}
)
# If we've made it here then we have x += graph_node, where x is not a
# tensor. There are two possibilities: either x is some type which implements
# mutating in-place +=, or it is not. If it is, then just call the mutator
# and hope for the best.
#
# TODO: This scenario is another opportunity for a warning or error, since
# the model is probably not one that can be compiled if it is depending on
# in-place mutation of an object which has a stochastic quantity added to it.
assert isinstance(right, BMGNode)
assert native_in_place in _in_place_operator_names
if hasattr(left, _in_place_operator_names[native_in_place]):
# It is possible that the operator exists but either returns
# NotImplemented or raises NotImplementedError. In either case,
# assume that we can fall back to non-mutating addition.
try:
result = native_in_place(left, right)
if result is not NotImplemented:
return result
except NotImplementedError:
pass
# We have x += graph_node, and x is not mutating in place, so just
# do x + graph_node:
return self.do_special_call_maybe_stochastic(native_regular, [left, right], {})
| 38.466879
| 91
| 0.640324
|
794bb6f43886e81a1ccff675bbab83fd20262f6f
| 1,699
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
locnt1195/recipe-app-api
|
2bf23bf0c9693d450ea3318132c2d07875ba82d7
|
[
"MIT"
] | 1
|
2021-01-15T09:51:19.000Z
|
2021-01-15T09:51:19.000Z
|
app/core/migrations/0001_initial.py
|
locnt1195/recipe-app-api
|
2bf23bf0c9693d450ea3318132c2d07875ba82d7
|
[
"MIT"
] | 7
|
2021-01-15T09:51:47.000Z
|
2021-09-22T19:27:09.000Z
|
app/core/migrations/0001_initial.py
|
locnt1195/recipe-app-api
|
2bf23bf0c9693d450ea3318132c2d07875ba82d7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2020-07-17 09:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 49.970588
| 266
| 0.637434
|
794bb7f9982f79c18424d9e8c922c3f1bacc64d2
| 2,186
|
py
|
Python
|
app.py
|
Jigys123/OCE-heroku
|
3abd7a6ece0ba03553abfcc9e5422afea94baef1
|
[
"BSD-3-Clause"
] | 8
|
2017-12-11T12:18:30.000Z
|
2020-08-01T18:41:41.000Z
|
app.py
|
Jigys123/OCE-heroku
|
3abd7a6ece0ba03553abfcc9e5422afea94baef1
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
Jigys123/OCE-heroku
|
3abd7a6ece0ba03553abfcc9e5422afea94baef1
|
[
"BSD-3-Clause"
] | 2
|
2019-07-22T23:44:12.000Z
|
2019-10-05T15:57:29.000Z
|
import os,cv2,pytesseract
from flask import Flask, render_template, request,jsonify
from PIL import Image
pytesseract.pytesseract.tesseract_cmd = 'C:\Program Files (x86)\Tesseract-OCR\\tesseract'
app = Flask(__name__)
UPLOAD_FOLDER = os.path.basename('.')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/ocr', methods=['POST','GET'])
def upload_file():
if request.method == "GET":
return "This is the api BLah blah"
elif request.method == "POST":
file = request.files['image']
f = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
# add your custom code to check that the uploaded file is a valid image and not a malicious file (out-of-scope for this post)
file.save(f)
# print(file.filename)
image = cv2.imread(UPLOAD_FOLDER+"/"+file.filename)
os.remove(UPLOAD_FOLDER+"/"+file.filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we should apply thresholding to preprocess the
# image
preprocess = request.form["preprocess"]
if preprocess == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif preprocess == "blur":
gray = cv2.medianBlur(gray, 3)
print(preprocess)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
# print("C:/Users/mzm/PycharmProjects/My_website/ocr_using_video/"+filename,Image.open("C:\\Users\mzm\PycharmProjects\My_website\ocr_using_video\\"+filename))
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
print("Text in Image :\n",text)
return jsonify({"text" : text})
app.run("0.0.0.0",5000,threaded=True,debug=True)
| 34.15625
| 166
| 0.643184
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.