hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb8816a7b683c68b5302c23202f9078fcba7c711
| 2,270
|
py
|
Python
|
chialite/consensus/block_rewards.py
|
Storch-Network/chialite
|
587fc53e8ef452e07c6f3f266f58962d065feb5c
|
[
"Apache-2.0"
] | 2
|
2021-06-29T14:05:41.000Z
|
2021-07-15T19:28:26.000Z
|
chialite/consensus/block_rewards.py
|
Storch-Network/chialite
|
587fc53e8ef452e07c6f3f266f58962d065feb5c
|
[
"Apache-2.0"
] | 31
|
2021-06-26T23:11:46.000Z
|
2022-03-29T00:12:30.000Z
|
chialite/consensus/block_rewards.py
|
Storch-Network/chialite
|
587fc53e8ef452e07c6f3f266f58962d065feb5c
|
[
"Apache-2.0"
] | null | null | null |
from chialite.util.ints import uint32, uint64
# 1 Chialite coin = 1,000,000,000,000 = 1 trillion mojo.
_mojo_per_chialite = 1000000000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((7 / 8) * 21000000 * _mojo_per_chialite))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * 2 * _mojo_per_chialite))
elif height < 6 * _blocks_per_year:
return uint64(int((7 / 8) * 1 * _mojo_per_chialite))
elif height < 9 * _blocks_per_year:
return uint64(int((7 / 8) * 0.5 * _mojo_per_chialite))
elif height < 12 * _blocks_per_year:
return uint64(int((7 / 8) * 0.25 * _mojo_per_chialite))
else:
return uint64(int((7 / 8) * 0.125 * _mojo_per_chialite))
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
The base fee reward is 1/8 of total block reward
Returns the coinbase reward at a certain block height. These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((1 / 8) * 21000000 * _mojo_per_chialite))
elif height < 3 * _blocks_per_year:
return uint64(int((1 / 8) * 2 * _mojo_per_chialite))
elif height < 6 * _blocks_per_year:
return uint64(int((1 / 8) * 1 * _mojo_per_chialite))
elif height < 9 * _blocks_per_year:
return uint64(int((1 / 8) * 0.5 * _mojo_per_chialite))
elif height < 12 * _blocks_per_year:
return uint64(int((1 / 8) * 0.25 * _mojo_per_chialite))
else:
return uint64(int((1 / 8) * 0.125 * _mojo_per_chialite))
| 43.653846
| 116
| 0.670925
|
e6db2e36daf3e3ed90df3f904ba160d9b58978f0
| 322
|
py
|
Python
|
pyramda/iterable/filter_test.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 124
|
2015-07-30T21:34:25.000Z
|
2022-02-19T08:45:50.000Z
|
pyramda/iterable/filter_test.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 37
|
2015-08-31T23:02:20.000Z
|
2022-02-04T04:45:28.000Z
|
pyramda/iterable/filter_test.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 20
|
2015-08-04T18:59:09.000Z
|
2021-12-13T08:08:59.000Z
|
from .filter import filter
from pyramda.private.asserts import assert_iterables_equal
def positive(x):
return x > 0
def filter_nocurry_test():
assert_iterables_equal(filter(positive, [2, -1, 0, 3, -2]), [2, 3])
def filter_curry_test():
assert_iterables_equal(filter(positive)([2, -1, 0, 3, -2]), [2, 3])
| 21.466667
| 71
| 0.692547
|
0fc025c381959fcb2594cc73163ed65ce456b4f4
| 2,598
|
py
|
Python
|
tests/test_slots.py
|
Mome/pdir2
|
57882ab902b089d86f862ccc9d655b8296b3078b
|
[
"MIT"
] | 1,394
|
2017-03-10T01:58:24.000Z
|
2022-03-30T19:15:48.000Z
|
tests/test_slots.py
|
Mome/pdir2
|
57882ab902b089d86f862ccc9d655b8296b3078b
|
[
"MIT"
] | 61
|
2017-03-10T15:12:36.000Z
|
2022-03-31T10:20:54.000Z
|
tests/test_slots.py
|
Mome/pdir2
|
57882ab902b089d86f862ccc9d655b8296b3078b
|
[
"MIT"
] | 67
|
2017-03-10T13:40:50.000Z
|
2022-03-07T07:11:54.000Z
|
"""
Test classes with __slots__
"""
from typing import List
import pdir
from pdir.attr_category import AttrCategory, category_match
BASE = 'base'
DERIVE = 'derive'
class BaseNoSlot:
pass
class BaseEmptySlot:
__slots__: List[str] = []
class BaseSlot:
__slots__: List[str] = [BASE]
class DeriveNoSlotBaseEmpty(BaseEmptySlot):
pass
class DeriveNoSlotBaseSlot(BaseSlot):
pass
class DeriveEmptySlotBaseNo(BaseNoSlot):
__slots__: List[str] = []
class DeriveEmptySlotBaseEmpty(BaseEmptySlot):
__slots__: List[str] = []
class DeriveEmptySlotBaseSlot(BaseSlot):
__slots__: List[str] = []
class DeriveSlotBaseNo(BaseNoSlot):
__slots__ = [DERIVE]
class DeriveSlotBaseEmpty(BaseEmptySlot):
__slots__ = [DERIVE]
class DeriveSlotBaseSlot(BaseSlot):
__slots__ = [DERIVE]
def test_not_set():
expected_res = [ # class type empty slot attr num
(DeriveNoSlotBaseEmpty, 0),
(DeriveNoSlotBaseSlot, 1),
(DeriveEmptySlotBaseNo, 0),
(DeriveEmptySlotBaseEmpty, 0),
(DeriveEmptySlotBaseSlot, 1),
(DeriveSlotBaseNo, 1),
(DeriveSlotBaseEmpty, 1),
(DeriveSlotBaseSlot, 2),
]
for c_type, attr_num in expected_res:
attr_count = 0
for attr in pdir(c_type()).pattrs:
if attr.name in [BASE, DERIVE]:
attr_count += 1
assert category_match(attr.category, AttrCategory.DESCRIPTOR)
assert category_match(attr.category, AttrCategory.SLOT)
assert attr_count == attr_num
def test_set_derive():
c_types = [DeriveSlotBaseNo, DeriveSlotBaseEmpty, DeriveSlotBaseSlot]
for c_type in c_types:
instance = c_type()
instance.derive = 'foo'
for attr in pdir(instance).pattrs:
if attr.name == DERIVE:
assert category_match(attr.category, AttrCategory.DESCRIPTOR)
assert category_match(attr.category, AttrCategory.SLOT)
break
else:
# No derive attribute found
assert False
def test_set_base():
c_types = [DeriveNoSlotBaseSlot, DeriveEmptySlotBaseSlot, DeriveSlotBaseSlot]
for c_type in c_types:
instance = c_type()
instance.base = 'foo'
for attr in pdir(instance).pattrs:
if attr.name == BASE:
assert category_match(attr.category, AttrCategory.DESCRIPTOR)
assert category_match(attr.category, AttrCategory.SLOT)
break
else:
# No base attribute found
assert False
| 24.055556
| 81
| 0.644727
|
3379a64d8b9484128adf3032e012ca1eef7e56b9
| 1,625
|
py
|
Python
|
setup.py
|
agoragames/locust
|
465100c903cc8558e408760f4e49792798dd7b16
|
[
"MIT"
] | 1
|
2015-07-08T15:37:15.000Z
|
2015-07-08T15:37:15.000Z
|
setup.py
|
agoragames/locust
|
465100c903cc8558e408760f4e49792798dd7b16
|
[
"MIT"
] | null | null | null |
setup.py
|
agoragames/locust
|
465100c903cc8558e408760f4e49792798dd7b16
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from setuptools import setup, find_packages, Command
import sys, os
version = '0.7.2.ag0'
class Unit2Discover(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys, subprocess
basecmd = ['unit2', 'discover']
errno = subprocess.call(basecmd)
raise SystemExit(errno)
setup(
name='locustio-agora',
version=version,
description="Website load testing framework",
long_description="""Locust is a python utility for doing easy, distributed load testing of a web site""",
classifiers=[
"Topic :: Software Development :: Testing :: Traffic Generation",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
],
keywords='',
author='Jonatan Heyman, Carl Bystrom, Joakim Hamrén, Hugo Heyman, Matt Wilson',
author_email='mwilson@agoragames.com',
url='http://locust.io',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=["gevent>=1.0.1", "flask>=0.10.1", "requests>=2.4.1", "msgpack-python>=0.4.2"],
tests_require=['unittest2', 'mock', 'pyzmq'],
entry_points={
'console_scripts': [
'locust = locust.main:main',
]
},
test_suite='unittest2.collector',
)
| 29.017857
| 109
| 0.633231
|
5c9ca776e05d2f5cdc00ae2f2f25d548f9aad8de
| 1,298
|
py
|
Python
|
src/dataset/preprocess.py
|
yulun-rayn/DGAPN
|
6d87376fa933a0a5efff180ebe1fe5772a060987
|
[
"MIT"
] | 5
|
2022-01-21T21:15:59.000Z
|
2022-01-24T20:02:46.000Z
|
src/dataset/preprocess.py
|
yulun-rayn/DGAPN
|
6d87376fa933a0a5efff180ebe1fe5772a060987
|
[
"MIT"
] | null | null | null |
src/dataset/preprocess.py
|
yulun-rayn/DGAPN
|
6d87376fa933a0a5efff180ebe1fe5772a060987
|
[
"MIT"
] | null | null | null |
import csv
def main(dataset_path):
with open(dataset_path, newline='') as csvfile:
reader = csv.reader(csvfile)
nb_col = len(next(reader))
if nb_col == 2:
scores, smiles = read_2col(reader)
elif nb_col == 4:
scores, smiles = read_4col(reader)
return scores, smiles
def read_2col(reader):
all_score = []
all_smiles = []
for i, (score, smiles) in enumerate (reader):
# Some fields are empty, if score is empty it will be caught by the exception. If smile is empty, conditional kicks in.
try:
if smiles is not None:
all_score.append(float(score))
all_smiles.append(smiles)
else:
continue
except:
print("Row " + str(i) + " was not read.")
continue
return all_score, all_smiles
def read_4col(reader):
all_score = []
all_smiles = []
for i, (_, smiles, _, score) in enumerate (reader):
try:
if smiles is not None:
all_score.append(float(score))
all_smiles.append(smiles)
else:
continue
except:
print("Row " + str(i) + " was not read.")
continue
return all_score, all_smiles
| 30.186047
| 127
| 0.546225
|
0925f7fec46862b9d3fe822c5d8a834cc951206d
| 3,437
|
py
|
Python
|
tests/test_schema.py
|
repo-helper/repo_helper_pycharm
|
7ae51f4a015edc6dcc7cfc0640e417ba82e7eee9
|
[
"MIT"
] | null | null | null |
tests/test_schema.py
|
repo-helper/repo_helper_pycharm
|
7ae51f4a015edc6dcc7cfc0640e417ba82e7eee9
|
[
"MIT"
] | 24
|
2020-12-21T16:46:30.000Z
|
2022-01-25T09:49:39.000Z
|
tests/test_schema.py
|
repo-helper/repo_helper_pycharm
|
7ae51f4a015edc6dcc7cfc0640e417ba82e7eee9
|
[
"MIT"
] | null | null | null |
# stdlib
import os
import re
from abc import abstractmethod
# 3rd party
import pytest
from click.testing import CliRunner, Result # type: ignore
from coincidence import check_file_regression
from domdf_python_tools.paths import PathPlus, in_directory
from pytest_regressions.file_regression import FileRegressionFixture
# this package
from repo_helper_pycharm import schema
from repo_helper_pycharm.register_schema import register_schema
class BaseTest:
# TODO: check when file exists
if os.sep == '/':
def check_output(self, tmp_pathplus, file_regression: FileRegressionFixture, stdout: str):
assert re.match(
r"Wrote schema to .*/repo_helper/repo_helper_schema\.json",
stdout.splitlines()[0],
)
file_content = re.sub(
'value=".*/repo_helper/repo_helper_schema.json"',
'value="repo_helper/repo_helper_schema.json"',
(tmp_pathplus / ".idea/jsonSchemas.xml").read_text(),
)
check_file_regression(file_content, file_regression, extension=".xml")
else:
def check_output(self, tmp_pathplus, file_regression: FileRegressionFixture, stdout: str):
assert re.match(
r"Wrote schema to .*\\repo_helper\\repo_helper_schema\.json",
stdout.splitlines()[0],
)
file_content = re.sub(
r'value=".*\\repo_helper\\repo_helper_schema.json"',
r'value="repo_helper\\repo_helper_schema.json"',
(tmp_pathplus / ".idea/jsonSchemas.xml").read_text(),
)
check_file_regression(file_content, file_regression, extension=".xml")
@pytest.mark.usefixtures("tmp_project")
@pytest.mark.skipif(condition=os.sep == '\\', reason="Different test for platforms where os.sep == \\")
def test_pycharm_schema_forward(self, tmp_pathplus, file_regression: FileRegressionFixture, capsys):
self.run_test(tmp_pathplus, file_regression, capsys)
@pytest.mark.skipif(condition=os.sep == '/', reason="Different test for platforms where os.sep == /")
@pytest.mark.usefixtures("tmp_project")
def test_pycharm_schema_back(self, tmp_pathplus, file_regression: FileRegressionFixture, capsys):
self.run_test(tmp_pathplus, file_regression, capsys)
@abstractmethod
def run_test(self, tmp_pathplus, file_regression, capsys):
raise NotImplementedError
class TestCommand(BaseTest):
@pytest.mark.usefixtures("tmp_project")
def test_pycharm_schema_not_project(self, no_idea, tmp_pathplus):
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(schema, catch_exceptions=False)
assert result.exit_code == 1
assert result.stderr == f"{no_idea}\nAborted!\n"
assert not result.stdout
def run_test(self, tmp_pathplus: PathPlus, file_regression: FileRegressionFixture, capsys):
(tmp_pathplus / ".idea").maybe_make()
with in_directory(tmp_pathplus):
runner = CliRunner()
result: Result = runner.invoke(schema, catch_exceptions=False)
assert result.exit_code == 0
self.check_output(tmp_pathplus, file_regression, result.stdout)
class TestFunction(BaseTest):
@pytest.mark.usefixtures("tmp_project")
def test_pycharm_schema_not_project(self, tmp_pathplus, no_idea):
with pytest.raises(FileNotFoundError, match=no_idea):
register_schema(tmp_pathplus)
def run_test(self, tmp_pathplus: PathPlus, file_regression: FileRegressionFixture, capsys):
(tmp_pathplus / ".idea").maybe_make()
register_schema(tmp_pathplus)
self.check_output(tmp_pathplus, file_regression, capsys.readouterr().out)
| 34.717172
| 104
| 0.764038
|
81b028ed5f5caad5f59c68b7f82c1a4661cf4d6f
| 21,737
|
py
|
Python
|
mmpose/models/backbones/vipnas_resnet.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | 1
|
2022-02-13T12:27:40.000Z
|
2022-02-13T12:27:40.000Z
|
mmpose/models/backbones/vipnas_resnet.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | 1
|
2022-03-13T12:52:47.000Z
|
2022-03-13T12:52:47.000Z
|
mmpose/models/backbones/vipnas_resnet.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer
from mmcv.cnn.bricks import ContextBlock
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class ViPNAS_Bottleneck(nn.Module):
"""Bottleneck block for ViPNAS_ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the input/output channels of conv2. Default: 4.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module): downsample operation on identity branch.
Default: None.
style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: "pytorch".
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
kernel_size (int): kernel size of conv2 searched in ViPANS.
groups (int): group number of conv2 searched in ViPNAS.
attention (bool): whether to use attention module in the end of
the block.
"""
def __init__(self,
in_channels,
out_channels,
expansion=4,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
kernel_size=3,
groups=1,
attention=False):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
assert style in ['pytorch', 'caffe']
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, out_channels, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
self.mid_channels,
kernel_size=kernel_size,
stride=self.conv2_stride,
padding=kernel_size // 2,
groups=groups,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
if attention:
self.attention = ContextBlock(out_channels,
max(1.0 / 16, 16.0 / out_channels))
else:
self.attention = None
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: the normalization layer named "norm3" """
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.attention is not None:
out = self.attention(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def get_expansion(block, expansion=None):
"""Get the expansion of a residual block.
The block expansion will be obtained by the following order:
1. If ``expansion`` is given, just return it.
2. If ``block`` has the attribute ``expansion``, then return
``block.expansion``.
3. Return the default value according the the block type:
4 for ``ViPNAS_Bottleneck``.
Args:
block (class): The block class.
expansion (int | None): The given expansion ratio.
Returns:
int: The expansion of the block.
"""
if isinstance(expansion, int):
assert expansion > 0
elif expansion is None:
if hasattr(block, 'expansion'):
expansion = block.expansion
elif issubclass(block, ViPNAS_Bottleneck):
expansion = 1
else:
raise TypeError(f'expansion is not specified for {block.__name__}')
else:
raise TypeError('expansion must be an integer or None')
return expansion
class ViPNAS_ResLayer(nn.Sequential):
"""ViPNAS_ResLayer to build ResNet style backbone.
Args:
block (nn.Module): Residual block used to build ViPNAS ResLayer.
num_blocks (int): Number of blocks.
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int, optional): The expansion for BasicBlock/Bottleneck.
If not specified, it will firstly be obtained via
``block.expansion``. If the block has no attribute "expansion",
the following default values will be used: 1 for BasicBlock and
4 for Bottleneck. Default: None.
stride (int): stride of the first block. Default: 1.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
kernel_size (int): Kernel Size of the corresponding convolution layer
searched in the block.
groups (int): Group number of the corresponding convolution layer
searched in the block.
attention (bool): Whether to use attention module in the end of the
block.
"""
def __init__(self,
block,
num_blocks,
in_channels,
out_channels,
expansion=None,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
kernel_size=3,
groups=1,
attention=False,
**kwargs):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
self.block = block
self.expansion = get_expansion(block, expansion)
downsample = None
if stride != 1 or in_channels != out_channels:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, out_channels)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if downsample_first:
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
kernel_size=kernel_size,
groups=groups,
attention=attention,
**kwargs))
in_channels = out_channels
for _ in range(1, num_blocks):
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
kernel_size=kernel_size,
groups=groups,
attention=attention,
**kwargs))
else: # downsample_first=False is for HourglassModule
for i in range(0, num_blocks - 1):
layers.append(
block(
in_channels=in_channels,
out_channels=in_channels,
expansion=self.expansion,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
kernel_size=kernel_size,
groups=groups,
attention=attention,
**kwargs))
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
kernel_size=kernel_size,
groups=groups,
attention=attention,
**kwargs))
super().__init__(*layers)
@BACKBONES.register_module()
class ViPNAS_ResNet(BaseBackbone):
"""ViPNAS_ResNet backbone.
"ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"
More details can be found in the `paper
<https://arxiv.org/abs/2105.10154>`__ .
Args:
depth (int): Network depth, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Stages of the network. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
Default: ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
out_indices (Sequence[int]): Output from which stages. If only one
stage is specified, a single tensor (feature map) is returned,
otherwise multiple stages are specified, a tuple of tensors will
be returned. Default: ``(3, )``.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv.
Default: False.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict | None): The config dict for conv layers. Default: None.
norm_cfg (dict): The config dict for norm layers.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: True.
wid (list(int)): Searched width config for each stage.
expan (list(int)): Searched expansion ratio config for each stage.
dep (list(int)): Searched depth config for each stage.
ks (list(int)): Searched kernel size config for each stage.
group (list(int)): Searched group number config for each stage.
att (list(bool)): Searched attention config for each stage.
"""
arch_settings = {
50: ViPNAS_Bottleneck,
}
def __init__(self,
depth,
in_channels=3,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(3, ),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
wid=[48, 80, 160, 304, 608],
expan=[None, 1, 1, 1, 1],
dep=[None, 4, 6, 7, 3],
ks=[7, 3, 5, 5, 5],
group=[None, 16, 16, 16, 16],
att=[None, True, False, True, True]):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = dep[0]
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block = self.arch_settings[depth]
self.stage_blocks = dep[1:1 + num_stages]
self._make_stem_layer(in_channels, wid[0], ks[0])
self.res_layers = []
_in_channels = wid[0]
for i, num_blocks in enumerate(self.stage_blocks):
expansion = get_expansion(self.block, expan[i + 1])
_out_channels = wid[i + 1] * expansion
stride = strides[i]
dilation = dilations[i]
res_layer = self.make_res_layer(
block=self.block,
num_blocks=num_blocks,
in_channels=_in_channels,
out_channels=_out_channels,
expansion=expansion,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
kernel_size=ks[i + 1],
groups=group[i + 1],
attention=att[i + 1])
_in_channels = _out_channels
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = res_layer[-1].out_channels
def make_res_layer(self, **kwargs):
"""Make a ViPNAS ResLayer."""
return ViPNAS_ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels, kernel_size):
"""Make stem layer."""
if self.deep_stem:
self.stem = nn.Sequential(
ConvModule(
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True),
ConvModule(
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True),
ConvModule(
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=kernel_size,
stride=2,
padding=kernel_size // 2,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
"""Freeze parameters."""
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize model weights."""
super().init_weights(pretrained)
if pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 36.842373
| 79
| 0.538483
|
c9575c00e220ab8c03fd2544c69483477217d825
| 8,886
|
py
|
Python
|
contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/alerts/alert_webhcat_server.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,664
|
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/alerts/alert_webhcat_server.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 3,018
|
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/alerts/alert_webhcat_server.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,673
|
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import socket
import time
import urllib2
import traceback
import logging
from resource_management.core.environment import Environment
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
RESULT_CODE_OK = "OK"
RESULT_CODE_CRITICAL = "CRITICAL"
RESULT_CODE_UNKNOWN = "UNKNOWN"
OK_MESSAGE = "WebHCat status was OK ({0:.3f}s response from {1})"
CRITICAL_CONNECTION_MESSAGE = "Connection failed to {0} + \n{1}"
CRITICAL_HTTP_MESSAGE = "HTTP {0} response from {1} \n{2}"
CRITICAL_WEBHCAT_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE = "Unable to determine WebHCat health from unexpected JSON response"
TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
WEBHCAT_PRINCIPAL_KEY = '{{webhcat-site/templeton.kerberos.principal}}'
WEBHCAT_KEYTAB_KEY = '{{webhcat-site/templeton.kerberos.keytab}}'
SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
WEBHCAT_OK_RESPONSE = 'ok'
WEBHCAT_PORT_DEFAULT = 50111
CONNECTION_TIMEOUT_KEY = 'connection.timeout'
CONNECTION_TIMEOUT_DEFAULT = 5.0
CURL_CONNECTION_TIMEOUT_DEFAULT = str(int(CONNECTION_TIMEOUT_DEFAULT))
# default keytab location
SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
# default smoke principal
SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
# default smoke user
SMOKEUSER_DEFAULT = 'ambari-qa'
logger = logging.getLogger('ambari_alerts')
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (TEMPLETON_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, SMOKEUSER_KEY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
result_code = RESULT_CODE_UNKNOWN
if configurations is None:
return (result_code, ['There were no configurations supplied to the script.'])
webhcat_port = WEBHCAT_PORT_DEFAULT
if TEMPLETON_PORT_KEY in configurations:
webhcat_port = int(configurations[TEMPLETON_PORT_KEY])
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'
# parse script arguments
connection_timeout = CONNECTION_TIMEOUT_DEFAULT
curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
if CONNECTION_TIMEOUT_KEY in parameters:
connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
curl_connection_timeout = str(int(connection_timeout))
# the alert will always run on the webhcat host
if host_name is None:
host_name = socket.getfqdn()
smokeuser = SMOKEUSER_DEFAULT
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
# webhcat always uses http, never SSL
query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)
# initialize
total_time = 0
json_response = {}
if security_enabled:
try:
# defaults
smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
# check script params
if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
# check configurations last as they should always take precedence
if SMOKEUSER_PRINCIPAL_KEY in configurations:
smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
if SMOKEUSER_KEYTAB_KEY in configurations:
smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
kerberos_executable_search_paths = None
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
env = Environment.get_instance()
stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
"WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
# check the response code
response_code = int(stdout)
# 0 indicates no connection
if response_code == 0:
label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
# any other response aside from 200 is a problem
if response_code != 200:
label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
# now that we have the http status and it was 200, get the content
stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
False, "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
json_response = json.loads(stdout)
except:
return (RESULT_CODE_CRITICAL, [traceback.format_exc()])
else:
url_response = None
try:
# execute the query for the JSON that includes WebHCat status
start_time = time.time()
url_response = urllib2.urlopen(query_url, timeout=connection_timeout)
total_time = time.time() - start_time
json_response = json.loads(url_response.read())
except urllib2.HTTPError as httpError:
label = CRITICAL_HTTP_MESSAGE.format(httpError.code, query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
except:
label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
finally:
if url_response is not None:
try:
url_response.close()
except:
pass
# if status is not in the response, we can't do any check; return CRIT
if 'status' not in json_response:
return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + str(json_response)])
# URL response received, parse it
try:
webhcat_status = json_response['status']
except:
return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + "\n" + traceback.format_exc()])
# proper JSON received, compare against known value
if webhcat_status.lower() == WEBHCAT_OK_RESPONSE:
result_code = RESULT_CODE_OK
label = OK_MESSAGE.format(total_time, query_url)
else:
result_code = RESULT_CODE_CRITICAL
label = CRITICAL_WEBHCAT_STATUS_MESSAGE.format(webhcat_status)
return (result_code, [label])
| 38.803493
| 128
| 0.770876
|
2aa77ecdc3eb52ae3dffd876310a7e5a5c3a837b
| 69
|
py
|
Python
|
src/eda/__init__.py
|
jonas-mika/ml-project
|
c052c33010033cd9fd596eb5ac4d270d1bf98ee3
|
[
"MIT"
] | null | null | null |
src/eda/__init__.py
|
jonas-mika/ml-project
|
c052c33010033cd9fd596eb5ac4d270d1bf98ee3
|
[
"MIT"
] | null | null | null |
src/eda/__init__.py
|
jonas-mika/ml-project
|
c052c33010033cd9fd596eb5ac4d270d1bf98ee3
|
[
"MIT"
] | 1
|
2022-01-29T17:23:15.000Z
|
2022-01-29T17:23:15.000Z
|
from ._feature_inspection import run_eda
__all__ = [
'run_eda'
]
| 13.8
| 40
| 0.724638
|
10e04465a4a6f8d10f7ec2fbb91aa3c03053a83f
| 3,890
|
py
|
Python
|
Archive/Current_Software_Modules/R&D/RETIRED_StitchAPI/main.py
|
Jual6332/OTheRS_IP
|
420cfde562249941f4a85f936d549d11edadc36f
|
[
"MIT"
] | null | null | null |
Archive/Current_Software_Modules/R&D/RETIRED_StitchAPI/main.py
|
Jual6332/OTheRS_IP
|
420cfde562249941f4a85f936d549d11edadc36f
|
[
"MIT"
] | 17
|
2018-11-13T04:13:26.000Z
|
2019-04-29T21:00:26.000Z
|
Archive/Current_Software_Modules/R&D/RETIRED_StitchAPI/main.py
|
Jual6332/OTheRS_IP
|
420cfde562249941f4a85f936d549d11edadc36f
|
[
"MIT"
] | 1
|
2018-10-29T18:14:45.000Z
|
2018-10-29T18:14:45.000Z
|
#!/usr/bin/env python3
################################################################################
################################################################################
### "main.py" ##################################################################
################################################################################
## Justin Alvey ####################################################
## OTheRS IP Lead ####################################################
## Date Created: 1/20/19 ####################################################
## Date Modified: 2/16/19 ####################################################
################################################################################
# Main Purpose: Continue to explore image stitching techniques. Return StitchAPI
# resulting image data.
# Action Item: Test with 160X120 images.
# Caution: Prevent temperature data from being compromised.
#####################---------Libraries---------################################
import numpy as np
import cv2
import math
import time
import sys
from PIL import Image
from matplotlib import pyplot as plt
from random import randrange
#####################---------Main Code---------################################
def main():
# Read in images
img = cv2.imread('Inputs/stitch3.png',cv2.IMREAD_COLOR) # Left image for stitch
img2 = cv2.imread('Inputs/stitch4.png',cv2.IMREAD_COLOR) # Right image for stitch
# Align Images
align_images(img,img2) # Check # matches, quality of matches
result = call_stitchAPI(img,img2) # (img1,img2) -> single image
# Write Image
cv2.imwrite("Outputs/Low-ResThermalOutput.jpg",result) # Output
def align_images(img1,img2):
# Otsu Thresholding Test 1
img1Gray = gray_scale(img1)
thr = Otsu_thresholding(img1Gray)
# Otsu Thresholding Test 2
img2Gray = gray_scale(img2)
thr2 = Otsu_thresholding(img2Gray)
# Initialization of the Feature Detector
orb = cv2.ORB_create()
# Locate keypoints in the image, calculate descriptors
kp, desc = orb.detectAndCompute(img1Gray,None)
kp2, desc2 = orb.detectAndCompute(img2Gray,None)
# Create BFMatcher object for Matching Images, Brutte Force Hamming Matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True)
# Match Descriptor, Similar Features between 2 Images
matches = bf.match(desc,desc2)
# Sort the matches by score for sitching/meshing
matches_final = sorted(matches, key = lambda x:x.distance)
print("Total matches:"+str(len(matches_final)))
# Remove "BAD" matches (85% threshold, only keep top 15% of matches)
goodMatches = int(len(matches_final)*0.90)
matches_final = matches_final[:goodMatches]
print("Top matches:"+str(len(matches_final)))
# Draw the top Matches in the Image
img3 = cv2.drawMatches(img1,kp,img2,kp2,matches_final,None,flags=2)
cv2.imwrite("Outputs/FindMatches.jpg",img3)
# Initialize Image Objects
points1 = np.zeros((len(matches),2), dtype=np.float32) # Initialization
points2 = np.zeros((len(matches),2), dtype=np.float32) # Initialization
# Extract Location of Good Matches
for x, match in enumerate(matches):
points1[x,:] = kp[match.queryIdx].pt
points2[x,:] = kp2[match.trainIdx].pt
def call_stitchAPI(img1,img2):
# Call Stitch API, return resulting image data
stitcher = cv2.createStitcher(False)
result = stitcher.stitch((img1,img2))
return result[1]
# Grayscale
def gray_scale(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
return(gray)
# Otsu Thresholding
def Otsu_thresholding(blur):
retval, threshold = cv2.threshold(blur,10,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return(threshold)
if __name__ == '__main__':
main()
#####################-----------Close-----------################################
| 37.403846
| 85
| 0.560154
|
adbd1b6b1d983e326fe2071d4431cd8fe12897ac
| 17,040
|
py
|
Python
|
core/dbt/parser/models.py
|
xshadowlegendx/dbt-core
|
c690ecc1fd0f1239db9ddd26c4e9cd14ea508b77
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/parser/models.py
|
xshadowlegendx/dbt-core
|
c690ecc1fd0f1239db9ddd26c4e9cd14ea508b77
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/parser/models.py
|
xshadowlegendx/dbt-core
|
c690ecc1fd0f1239db9ddd26c4e9cd14ea508b77
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from dbt.context.context_config import ContextConfig
from dbt.contracts.graph.parsed import ParsedModelNode
import dbt.flags as flags
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.node_types import NodeType
from dbt.parser.base import SimpleSQLParser
from dbt.parser.search import FileBlock
import dbt.tracking as tracking
from dbt import utils
from dbt_extractor import ExtractionError, py_extract_from_source # type: ignore
from functools import reduce
from itertools import chain
import random
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class ModelParser(SimpleSQLParser[ParsedModelNode]):
def parse_from_dict(self, dct, validate=True) -> ParsedModelNode:
if validate:
ParsedModelNode.validate(dct)
return ParsedModelNode.from_dict(dct)
@property
def resource_type(self) -> NodeType:
return NodeType.Model
@classmethod
def get_compiled_path(cls, block: FileBlock):
return block.path.relative_path
def render_update(
self, node: ParsedModelNode, config: ContextConfig
) -> None:
self.manifest._parsing_info.static_analysis_path_count += 1
if not flags.STATIC_PARSER:
# jinja rendering
super().render_update(node, config)
logger.debug(f"1605: jinja rendering because of STATIC_PARSER flag. file: {node.path}")
return
# only sample for experimental parser correctness on normal runs,
# not when the experimental parser flag is on.
exp_sample: bool = False
# sampling the stable static parser against jinja is significantly
# more expensive and therefor done far less frequently.
stable_sample: bool = False
# there are two samples above, and it is perfectly fine if both happen
# at the same time. If that happens, the experimental parser, stable
# parser, and jinja rendering will run on the same model file and
# send back codes for experimental v stable, and stable v jinja.
if not flags.USE_EXPERIMENTAL_PARSER:
# `True` roughly 1/5000 times this function is called
# sample = random.randint(1, 5001) == 5000
stable_sample = random.randint(1, 5001) == 5000
# sampling the experimental parser is explicitly disabled here, but use the following
# commented code to sample a fraction of the time when new
# experimental features are added.
# `True` roughly 1/100 times this function is called
# exp_sample = random.randint(1, 101) == 100
# top-level declaration of variables
statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None
experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None
exp_sample_node: Optional[ParsedModelNode] = None
exp_sample_config: Optional[ContextConfig] = None
jinja_sample_node: Optional[ParsedModelNode] = None
jinja_sample_config: Optional[ContextConfig] = None
result: List[str] = []
# sample the experimental parser only during a normal run
if exp_sample and not flags.USE_EXPERIMENTAL_PARSER:
logger.debug(f"1610: conducting experimental parser sample on {node.path}")
experimental_sample = self.run_experimental_parser(node)
# if the experimental parser succeeded, make a full copy of model parser
# and populate _everything_ into it so it can be compared apples-to-apples
# with a fully jinja-rendered project. This is necessary because the experimental
# parser will likely add features that the existing static parser will fail on
# so comparing those directly would give us bad results. The comparison will be
# conducted after this model has been fully rendered either by the static parser
# or by full jinja rendering
if isinstance(experimental_sample, dict):
model_parser_copy = self.partial_deepcopy()
exp_sample_node = deepcopy(node)
exp_sample_config = deepcopy(config)
model_parser_copy.populate(
exp_sample_node,
exp_sample_config,
experimental_sample
)
# use the experimental parser exclusively if the flag is on
if flags.USE_EXPERIMENTAL_PARSER:
statically_parsed = self.run_experimental_parser(node)
# run the stable static parser unless it is explicitly turned off
else:
statically_parsed = self.run_static_parser(node)
# if the static parser succeeded, extract some data in easy-to-compare formats
if isinstance(statically_parsed, dict):
# only sample jinja for the purpose of comparing with the stable static parser
# if we know we don't need to fall back to jinja (i.e. - nothing to compare
# with jinja v jinja).
# This means we skip sampling for 40% of the 1/5000 samples. We could run the
# sampling rng here, but the effect would be the same since we would only roll
# it 40% of the time. So I've opted to keep all the rng code colocated above.
if stable_sample and not flags.USE_EXPERIMENTAL_PARSER:
logger.debug(f"1611: conducting full jinja rendering sample on {node.path}")
# if this will _never_ mutate anything `self` we could avoid these deep copies,
# but we can't really guarantee that going forward.
model_parser_copy = self.partial_deepcopy()
jinja_sample_node = deepcopy(node)
jinja_sample_config = deepcopy(config)
# rendering mutates the node and the config
super(ModelParser, model_parser_copy) \
.render_update(jinja_sample_node, jinja_sample_config)
# update the unrendered config with values from the static parser.
# values from yaml files are in there already
self.populate(
node,
config,
statically_parsed
)
# if we took a jinja sample, compare now that the base node has been populated
if jinja_sample_node is not None and jinja_sample_config is not None:
result = _get_stable_sample_result(
jinja_sample_node,
jinja_sample_config,
node,
config
)
# if we took an experimental sample, compare now that the base node has been populated
if exp_sample_node is not None and exp_sample_config is not None:
result = _get_exp_sample_result(
exp_sample_node,
exp_sample_config,
node,
config,
)
self.manifest._parsing_info.static_analysis_parsed_path_count += 1
# if the static parser failed, add the correct messages for tracking
elif isinstance(statically_parsed, str):
if statically_parsed == "cannot_parse":
result += ["01_stable_parser_cannot_parse"]
elif statically_parsed == "has_banned_macro":
result += ["08_has_banned_macro"]
super().render_update(node, config)
logger.debug(
f"1602: parser fallback to jinja rendering on {node.path}"
)
# if the static parser didn't succeed, fall back to jinja
else:
# jinja rendering
super().render_update(node, config)
logger.debug(
f"1602: parser fallback to jinja rendering on {node.path}"
)
# only send the tracking event if there is at least one result code
if result:
# fire a tracking event. this fires one event for every sample
# so that we have data on a per file basis. Not only can we expect
# no false positives or misses, we can expect the number model
# files parseable by the experimental parser to match our internal
# testing.
if tracking.active_user is not None: # None in some tests
tracking.track_experimental_parser_sample({
"project_id": self.root_project.hashed_name(),
"file_id": utils.get_hash(node),
"status": result
})
def run_static_parser(
self, node: ParsedModelNode
) -> Optional[Union[str, Dict[str, List[Any]]]]:
# if any banned macros have been overridden by the user, we cannot use the static parser.
if self._has_banned_macro(node):
# this log line is used for integration testing. If you change
# the code at the beginning of the line change the tests in
# test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
logger.debug(
f"1601: detected macro override of ref/source/config in the scope of {node.path}"
)
return "has_banned_macro"
# run the stable static parser and return the results
try:
statically_parsed = py_extract_from_source(
node.raw_sql
)
logger.debug(f"1699: static parser successfully parsed {node.path}")
return _shift_sources(statically_parsed)
# if we want information on what features are barring the static
# parser from reading model files, this is where we would add that
# since that information is stored in the `ExtractionError`.
except ExtractionError:
logger.debug(
f"1603: static parser failed on {node.path}"
)
return "cannot_parse"
def run_experimental_parser(
self, node: ParsedModelNode
) -> Optional[Union[str, Dict[str, List[Any]]]]:
# if any banned macros have been overridden by the user, we cannot use the static parser.
if self._has_banned_macro(node):
# this log line is used for integration testing. If you change
# the code at the beginning of the line change the tests in
# test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
logger.debug(
f"1601: detected macro override of ref/source/config in the scope of {node.path}"
)
return "has_banned_macro"
# run the experimental parser and return the results
try:
# for now, this line calls the stable static parser since there are no
# experimental features. Change `py_extract_from_source` to the new
# experimental call when we add additional features.
experimentally_parsed = py_extract_from_source(
node.raw_sql
)
logger.debug(f"1698: experimental parser successfully parsed {node.path}")
return _shift_sources(experimentally_parsed)
# if we want information on what features are barring the experimental
# parser from reading model files, this is where we would add that
# since that information is stored in the `ExtractionError`.
except ExtractionError:
logger.debug(
f"1604: experimental parser failed on {node.path}"
)
return "cannot_parse"
# checks for banned macros
def _has_banned_macro(
self, node: ParsedModelNode
) -> bool:
# first check if there is a banned macro defined in scope for this model file
root_project_name = self.root_project.project_name
project_name = node.package_name
banned_macros = ['ref', 'source', 'config']
all_banned_macro_keys: Iterator[str] = chain.from_iterable(
map(
lambda name: [
f"macro.{project_name}.{name}",
f"macro.{root_project_name}.{name}"
],
banned_macros
)
)
return reduce(
lambda z, key: z or (key in self.manifest.macros),
all_banned_macro_keys,
False
)
# this method updates the model node rendered and unrendered config as well
# as the node object. Used to populate these values when circumventing jinja
# rendering like the static parser.
def populate(
self,
node: ParsedModelNode,
config: ContextConfig,
statically_parsed: Dict[str, Any]
):
# manually fit configs in
config._config_call_dict = _get_config_call_dict(statically_parsed)
# if there are hooks present this, it WILL render jinja. Will need to change
# when the experimental parser supports hooks
self.update_parsed_node_config(node, config)
# update the unrendered config with values from the file.
# values from yaml files are in there already
node.unrendered_config.update(dict(statically_parsed['configs']))
# set refs and sources on the node object
node.refs += statically_parsed['refs']
node.sources += statically_parsed['sources']
# configs don't need to be merged into the node because they
# are read from config._config_call_dict
# the manifest is often huge so this method avoids deepcopying it
def partial_deepcopy(self):
return ModelParser(
deepcopy(self.project),
self.manifest,
deepcopy(self.root_project)
)
# pure function. safe to use elsewhere, but unlikely to be useful outside this file.
def _get_config_call_dict(
static_parser_result: Dict[str, Any]
) -> Dict[str, Any]:
config_call_dict: Dict[str, Any] = {}
for c in static_parser_result['configs']:
ContextConfig._add_config_call(config_call_dict, {c[0]: c[1]})
return config_call_dict
# TODO if we format sources in the extractor to match this type, we won't need this function.
def _shift_sources(
static_parser_result: Dict[str, List[Any]]
) -> Dict[str, List[Any]]:
shifted_result = deepcopy(static_parser_result)
source_calls = []
for s in static_parser_result['sources']:
source_calls.append([s[0], s[1]])
shifted_result['sources'] = source_calls
return shifted_result
# returns a list of string codes to be sent as a tracking event
def _get_exp_sample_result(
sample_node: ParsedModelNode,
sample_config: ContextConfig,
node: ParsedModelNode,
config: ContextConfig
) -> List[str]:
result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config)
def process(codemsg):
code, msg = codemsg
return f"0{code}_experimental_{msg}"
return list(map(process, result))
# returns a list of string codes to be sent as a tracking event
def _get_stable_sample_result(
sample_node: ParsedModelNode,
sample_config: ContextConfig,
node: ParsedModelNode,
config: ContextConfig
) -> List[str]:
result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config)
def process(codemsg):
code, msg = codemsg
return f"8{code}_stable_{msg}"
return list(map(process, result))
# returns a list of string codes that need a single digit prefix to be prepended
# before being sent as a tracking event
def _get_sample_result(
sample_node: ParsedModelNode,
sample_config: ContextConfig,
node: ParsedModelNode,
config: ContextConfig
) -> List[Tuple[int, str]]:
result: List[Tuple[int, str]] = []
# look for false positive configs
for k in sample_config._config_call_dict.keys():
if k not in config._config_call_dict.keys():
result += [(2, "false_positive_config_value")]
break
# look for missed configs
for k in config._config_call_dict.keys():
if k not in sample_config._config_call_dict.keys():
result += [(3, "missed_config_value")]
break
# look for false positive sources
for s in sample_node.sources:
if s not in node.sources:
result += [(4, "false_positive_source_value")]
break
# look for missed sources
for s in node.sources:
if s not in sample_node.sources:
result += [(5, "missed_source_value")]
break
# look for false positive refs
for r in sample_node.refs:
if r not in node.refs:
result += [(6, "false_positive_ref_value")]
break
# look for missed refs
for r in node.refs:
if r not in sample_node.refs:
result += [(7, "missed_ref_value")]
break
# if there are no errors, return a success value
if not result:
result = [(0, "exact_match")]
return result
| 41.764706
| 99
| 0.640669
|
0d5dd289f31d63e5dc968a4c5df9e5bdfa427fd7
| 1,119
|
py
|
Python
|
b_cfn_lambda_layer/dependency.py
|
Biomapas/B.CfnLambdaLayer
|
b2ddb2e233d626e4591235a0174b70fda9807063
|
[
"Apache-2.0"
] | 1
|
2021-10-04T12:23:53.000Z
|
2021-10-04T12:23:53.000Z
|
b_cfn_lambda_layer/dependency.py
|
Biomapas/B.CfnLambdaLayer
|
b2ddb2e233d626e4591235a0174b70fda9807063
|
[
"Apache-2.0"
] | null | null | null |
b_cfn_lambda_layer/dependency.py
|
Biomapas/B.CfnLambdaLayer
|
b2ddb2e233d626e4591235a0174b70fda9807063
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional
from b_cfn_lambda_layer.package_version import PackageVersion
class Dependency:
def __init__(self, name: str, version: Optional[PackageVersion] = None):
self.__name = name
self.__version = version or PackageVersion.latest()
def build_string(self) -> str:
"""
Creates string representation of the dependency that can be used by PIP tool.
For example, if you initiate this class:
>>> dep = Dependency('jwt', PackageVersion.from_string_version('123'))
Then when you execute this method, you will get:
>>> dep.build_string() == 'jwt==123'
:return: String representation.
"""
if self.__version.version_type == PackageVersion.VersionType.LATEST:
return self.__name
if self.__version.version_type == PackageVersion.VersionType.SPECIFIC:
return f'{self.__name}=={self.__version.version_string}'
if self.__version.version_type == PackageVersion.VersionType.NONE:
return ''
else:
raise ValueError('Unsupported enum value.')
| 33.909091
| 85
| 0.662198
|
d56f82baeaa668baa3a9e6a76a6f6926208ed2bb
| 2,490
|
py
|
Python
|
tutorials/workshop/lib/lfs.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 30
|
2019-08-22T19:27:59.000Z
|
2022-03-13T22:03:15.000Z
|
tutorials/workshop/lib/lfs.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 2
|
2019-08-22T16:51:58.000Z
|
2022-03-21T02:59:18.000Z
|
tutorials/workshop/lib/lfs.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 31
|
2019-08-22T19:28:08.000Z
|
2022-03-23T12:50:49.000Z
|
#
# PLACE YOUR LFs HERE
#
import re
from snorkel.lf_helpers import (
get_left_tokens, get_right_tokens, get_between_tokens,
get_text_between, get_tagged_text,
)
from lib.dbpedia import known_spouses
# Helper function to get last name
def last_name(s):
name_parts = s.split(' ')
return name_parts[-1] if len(name_parts) > 1 else None
# Last name pairs for known spouses
last_names = set([(last_name(x), last_name(y)) for x, y in known_spouses if last_name(x) and last_name(y)])
spouses = {'spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'}
family = {'father', 'mother', 'sister', 'brother', 'son', 'daughter',
'grandfather', 'grandmother', 'uncle', 'aunt', 'cousin'}
family = family | {f + '-in-law' for f in family}
other = {'boyfriend', 'girlfriend' 'boss', 'employee', 'secretary', 'co-worker'}
#
# Pattern LFs
#
def LF_husband_wife(c):
return 1 if len(spouses.intersection(get_between_tokens(c))) > 0 else 0
def LF_husband_wife_left_window(c):
if len(spouses.intersection(get_left_tokens(c[0], window=2))) > 0:
return 1
elif len(spouses.intersection(get_left_tokens(c[1], window=2))) > 0:
return 1
else:
return 0
def LF_same_last_name(c):
p1_last_name = last_name(c.person1.get_span())
p2_last_name = last_name(c.person2.get_span())
if p1_last_name and p2_last_name and p1_last_name == p2_last_name:
if c.person1.get_span() != c.person2.get_span():
return 1
return 0
def LF_and_married(c):
return 1 if 'and' in get_between_tokens(c) and 'married' in get_right_tokens(c) else 0
def LF_familial_relationship(c):
return -1 if len(family.intersection(get_between_tokens(c))) > 0 else 0
def LF_family_left_window(c):
if len(family.intersection(get_left_tokens(c[0], window=2))) > 0:
return -1
elif len(family.intersection(get_left_tokens(c[1], window=2))) > 0:
return -1
else:
return 0
def LF_other_relationship(c):
return -1 if len(other.intersection(get_between_tokens(c))) > 0 else 0
#
# Distant Supervision
#
def LF_distant_supervision(c):
p1, p2 = c.person1.get_span(), c.person2.get_span()
return 1 if (p1, p2) in known_spouses or (p2, p1) in known_spouses else 0
def LF_distant_supervision_last_names(c):
p1, p2 = c.person1.get_span(), c.person2.get_span()
p1n, p2n = last_name(p1), last_name(p2)
return 1 if (p1 != p2) and ((p1n, p2n) in last_names or (p2n, p1n) in last_names) else 0
| 33.2
| 107
| 0.681526
|
bf06561f5fae36e6ce062f5919fd45afe17283cc
| 440
|
py
|
Python
|
setup.py
|
rbiswas4/basicio
|
9e6698ea9519bdb5ff316bc1c811d694a50f9f4e
|
[
"MIT"
] | null | null | null |
setup.py
|
rbiswas4/basicio
|
9e6698ea9519bdb5ff316bc1c811d694a50f9f4e
|
[
"MIT"
] | 1
|
2015-10-21T16:40:33.000Z
|
2015-10-21T16:40:33.000Z
|
setup.py
|
rbiswas4/basicio
|
9e6698ea9519bdb5ff316bc1c811d694a50f9f4e
|
[
"MIT"
] | null | null | null |
#from ez_setup import use_setuptools
#use_setuptools()
from setuptools import setup, find_packages
setup(# package information
name="io",
version="0.0.1dev",
description='utilities for io',
long_description=''' ''',
# What code to include as packages
packages=['basicio'],
# What data to include as packages
include_package_data=True,
package_data={'': ['example_data/*.dat']}
)
| 27.5
| 47
| 0.652273
|
c97460cd4adf16a253d923a1611dcf4916c47336
| 243
|
py
|
Python
|
Aula09/chef024.py
|
AdryanPablo/Python
|
d469d394b41f44dbd753bf9a7f7eebaa81096562
|
[
"MIT"
] | null | null | null |
Aula09/chef024.py
|
AdryanPablo/Python
|
d469d394b41f44dbd753bf9a7f7eebaa81096562
|
[
"MIT"
] | null | null | null |
Aula09/chef024.py
|
AdryanPablo/Python
|
d469d394b41f44dbd753bf9a7f7eebaa81096562
|
[
"MIT"
] | null | null | null |
# Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome "Santo".
cidade = str(input('Digite o nome de uma cidade: ')).strip()
print(f'Essa cidade começa com "Santo"? {"Santo" in (cidade.title().split()[0])}')
| 40.5
| 96
| 0.683128
|
90e072e33a5f0f0fae1139ff5dedc42a6eda7103
| 1,363
|
py
|
Python
|
Hackerrank/Algorithms/Graph Theory/Roads and libraries.py
|
udayan14/Competitive_Coding
|
79e23fdeb909b4161a193d88697a4fe5f4fbbdce
|
[
"MIT"
] | null | null | null |
Hackerrank/Algorithms/Graph Theory/Roads and libraries.py
|
udayan14/Competitive_Coding
|
79e23fdeb909b4161a193d88697a4fe5f4fbbdce
|
[
"MIT"
] | null | null | null |
Hackerrank/Algorithms/Graph Theory/Roads and libraries.py
|
udayan14/Competitive_Coding
|
79e23fdeb909b4161a193d88697a4fe5f4fbbdce
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import queue
def roadsAndLibraries(n, c_lib, c_road, cities):
# Complete this function
if c_road >= c_lib:
return c_lib * n
else:
comp = 0
visited = [False]*(n+1)
for c in range(1,n+1):
# print(c)
if(not visited[c]):
comp = comp + 1
q = queue.Queue(maxsize = 10**5)
q.put(c)
visited[c] = True
while(not q.empty()):
w = q.get()
l = [x for x in cities if w in x]
l1 = [x[0] for x in l if x[1] == w] + [x[1] for x in l if x[0] == w]
for x in l1:
if not visited[x]:
visited[x] = True
q.put(x)
return len(cities)*c_road + (c_lib-c_road)* comp
if __name__ == "__main__":
q = int(input().strip())
for a0 in range(q):
n, m, c_lib, c_road = input().strip().split(' ')
n, m, c_lib, c_road = [int(n), int(m), int(c_lib), int(c_road)]
cities = []
for cities_i in range(m):
cities_t = [int(cities_temp) for cities_temp in input().strip().split(' ')]
cities.append(cities_t)
result = roadsAndLibraries(n, c_lib, c_road, cities)
print(result)
| 34.948718
| 88
| 0.448276
|
0d8fb2a4acb45d1c4fbae73d8202b17e0173fcb9
| 10,826
|
py
|
Python
|
mpc_tracking.py
|
urosolia/SLIP
|
07d091070b59df0c42ba0f0d277b97f0002ce6fe
|
[
"MIT"
] | 1
|
2021-03-04T01:10:08.000Z
|
2021-03-04T01:10:08.000Z
|
mpc_tracking.py
|
urosolia/SLIP
|
07d091070b59df0c42ba0f0d277b97f0002ce6fe
|
[
"MIT"
] | null | null | null |
mpc_tracking.py
|
urosolia/SLIP
|
07d091070b59df0c42ba0f0d277b97f0002ce6fe
|
[
"MIT"
] | 1
|
2022-02-22T20:12:24.000Z
|
2022-02-22T20:12:24.000Z
|
from casadi import *
from numpy import *
import pdb
import itertools
import numpy as np
from cvxpy import *
import time
##### MPC ######
class MPC_tracking(object):
""" Model Predictive Controller (MPC)
Methods:
- solve: solves the FTOCP given the initial condition x0 and terminal contraints
- buildNonlinearProgram: builds the ftocp program solved by the above solve method
- model: given x_t and u_t computes x_{t+1} = f( x_t, u_t )
"""
def __init__(self, N, n, d, Q, R, Qf, xlb, xub, ulb, uub, dt):
# Define variables
self.xlb = xlb
self.xub = xub
self.ulb = ulb
self.uub = uub
self.Ninit = N
self.n = n
self.nl = 2
self.d = d
self.Q = Q
self.Qf = Qf
self.R = R
self.dt = dt
self.l = 0.7
self.m = 1
self.g = 9.81
self.k0 = self.m*self.g/0.15
self.Sslack = 10**8 * np.diag([1.0,1.0])
self.Qslack = 10**8 * np.diag([1.0,1.0,1.0,1.0])
self.xf = np.zeros(n)
self.solverTime = []
# Define Terminal Constraint Set
self.xSS = []
self.uSS = []
self.sSS = []
self.rSS = []
self.cSS = []
self.timeReset()
def timeReset(self):
self.time = 0
self.N = self.Ninit
self.idxSS = self.N
def addTraj(self, x_cl, u_cl, s_cl, region):
self.xSS.append(x_cl)
self.uSS.append(u_cl)
self.rSS.append(region)
self.sSS.append(s_cl)
self.cSS.append(self.computeCost(x_cl, u_cl))
def computeCost(self, x, u):
# Compute the csot of the roll-out (sum realized cost over closed-loop trajectory)
for i in range(0, x.shape[0]):
idx = x.shape[0] - 1 - i
xt = x[idx,:]
if i == 0:
c = [np.dot(xt, np.dot(self.Q, xt))]
else:
ut = u[idx,:]
c.append(c[-1] + np.dot(xt, np.dot(self.Q, xt)) + np.dot(ut, np.dot(self.R, ut)) + 1)
costVector = np.array([np.flip(c)]).T
return costVector
def solve(self, x0, s0, r0, verbose=False):
self.solverTime = 0
self.buildTermComp(r0)
self.solveFTOCP(x0, s0, verbose=verbose)
self.time += 1
self.idxSS = int(np.min([self.idxSS + 1, self.xSS[-1].shape[0]-1]))
print("Done Solving MPC")
def buildTermComp(self, r0, deltaTime = 0):
# Select Terminal Constraint
self.idxToUse = np.min([self.idxSS + deltaTime, self.xSS[-1].shape[0]-1])
self.xref = self.xSS[-1][self.idxToUse-self.N:self.idxToUse, :]
self.uref = self.uSS[-1][self.idxToUse-self.N:self.idxToUse, :]
self.sref = self.sSS[-1][self.idxToUse-self.N:self.idxToUse, :]
self.xf = self.xSS[-1][self.idxToUse, :]
self.uf = self.uSS[-1][self.idxToUse-1, :]
self.cf = self.cSS[-1][self.idxToUse, :]
self.sf = self.sSS[-1][self.idxToUse, :]
self.region = self.rSS[-1][(self.idxToUse-self.N):(self.idxToUse)]
self.region[0] = r0
print("xf: ", self.xf)
print("self.sf: ", self.sf)
self.buildFTOCP()
def solveFTOCP(self, x0, s0, warmStart=True, verbose=False):
# print("s0: ", s0)
# Set initial condition + state and input box constraints
self.lbx = x0.tolist() + self.xlb.tolist()*(self.N) + self.ulb.tolist()*self.N + s0.tolist() + [-100.0]*self.nl*self.N
self.ubx = x0.tolist() + self.xub.tolist()*(self.N) + self.uub.tolist()*self.N + s0.tolist() + [ 100.0]*self.nl*self.N
# Pick warm start
if self.time == 0:
xGuess = self.xSS[-1][self.time:(self.time+self.N+1), :].reshape(self.n*(self.N+1))
uGuess = self.uSS[-1][self.time:(self.time+self.N+0), :].reshape(self.d*(self.N+0))
sGuess = self.sSS[-1][self.time:(self.time+self.N+1), :].reshape(self.nl*(self.N+1))
else:
# xGuess = self.xPred[0:(self.N+1), :].reshape(self.n*(self.N+1))
# uGuess = self.uPred[0:(self.N+0), :].reshape(self.d*(self.N+0))
xGuess = np.append(self.xPred[1:(self.N+1), :].reshape(self.n*(self.N-0)), self.xf)
uGuess = np.append(self.uPred[1:(self.N+0), :].reshape(self.d*(self.N-1)), self.uf)
sGuess = np.append(self.sPred[1:(self.N+1), :].reshape(self.nl*(self.N-0)), self.sf)
zGuess = np.append(np.append(xGuess,uGuess), sGuess)
# Solve nonlinear programm
start = time.time()
if warmStart == False:
sol = self.solver(lbx=self.lbx, ubx=self.ubx, lbg=self.lbg_dyanmics, ubg=self.ubg_dyanmics)
else:
sol = self.solver(lbx=self.lbx, ubx=self.ubx, lbg=self.lbg_dyanmics, ubg=self.ubg_dyanmics, x0=zGuess.tolist())
end = time.time()
self.solverTime += end - start
# Check if the solution is feasible
if (self.solver.stats()['success']):
self.feasible = 1
x = sol["x"]
self.cPred = sol['f']
self.xPred = np.array(x[0:(self.N+1)*self.n].reshape((self.n,self.N+1))).T
self.uPred = np.array(x[(self.N+1)*self.n:((self.N+1)*self.n + self.d*self.N)].reshape((self.d,self.N))).T
self.sPred = np.array(x[((self.N+1)*self.n + self.d*self.N):((self.N+1)*self.n + self.d*self.N)+self.nl*(self.N+1)].reshape((self.nl,self.N+1))).T
self.mpcInput = self.uPred[0][0]
# print("xPred[0]:", self.xPred[0,:])
# print("x0[0]:", x0)
# print("xPred:")
# print(self.xPred)
# print("xref:")
# print(self.xref)
# print("uPred:")
# print(self.uPred)
# print("sPred:")
# print(self.sPred)
# print("Term state: ", self.xPred[-1,:])
# print("Sucess")
# if np.dot( (self.xPred[0,:] - self.xref[0,:]), (self.xPred[0,:] - self.xref[0,:])) > 0.1:
# pdb.set_trace()
else:
self.cPred = 10**10
self.xPred = np.zeros((self.N+1,self.n) )
self.uPred = np.zeros((self.N,self.d))
self.mpcInput = []
self.feasible = 0
print("Unfeasible")
print("x0[0]:", x0)
pdb.set_trace()
# sol = self.solver(lbx=self.lbx, ubx=self.ubx, lbg=self.lbg_dyanmics, ubg=self.ubg_dyanmics)
# if (self.solver.stats()['success']):
# print("Sucess")
# self.feasible = 1
# x = sol["x"]
# self.xPred = np.array(x[0:(self.N+1)*self.n].reshape((self.n,self.N+1))).T
# self.uPred = np.array(x[(self.N+1)*self.n:((self.N+1)*self.n + self.d*self.N)].reshape((self.d,self.N))).T
# self.sPred = np.array(x[((self.N+1)*self.n + self.d*self.N):((self.N+1)*self.n + self.d*self.N)+self.s])
# self.mpcInput = self.uPred[0][0]
# print("xPred:")
# print(self.xPred)
# print("uPred:")
# print(self.uPred)
# print("sPred:")
# print(self.sPred)
# else:
# print("Also new problem not feasible")
# print(self.xSS[-1][self.time:self.time+self.N+1,:])
# print(self.uSS[-1][self.time:self.time+self.N,:])
def buildFTOCP(self):
# Define variables
n = self.n
d = self.d
nl = self.nl
# Define variables
X = SX.sym('X', n*(self.N+1));
U = SX.sym('U', d*self.N);
C = SX.sym('C', nl*(self.N+1));
# Define dynamic constraints
self.constraint = []
for i in range(0, self.N):
if self.region[i] < 2:
legUsed = self.region[i]
legNotUsed = 1 - legUsed
# print("SS, leg: ", legUsed, 'i: ', i, ", legNotUsed: ", legNotUsed)
X_next = self.dynamics_SS(X[n*i:n*(i+1)], U[d*i], C[i*nl + legUsed])
# Foot constraints
self.constraint = vertcat(self.constraint, C[(i+1)*nl + legUsed] - C[i*nl + legUsed] )
self.constraint = vertcat(self.constraint, C[(i+1)*nl + legNotUsed] - (C[i*nl + legNotUsed] + U[d*i+1]) )
else:
# print("DS, i: ", i)
X_next = self.dynamics_DS(X[n*i:n*(i+1)], U[d*i], C[i*nl:(i+1)*nl])
# Foot constraints
for j in range(0,2):
self.constraint = vertcat(self.constraint, C[(i+1)*nl + j] - C[i*nl + j] )
# Dyanmic update
for j in range(0, self.n):
self.constraint = vertcat(self.constraint, X_next[j] - X[n*(i+1)+j] )
# Constraints on length
lbg_leg = []
ubg_leg = []
for i in range(0, self.N):
if self.region[i] == 0:
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+0])**2 + X[n*i + 1]**2 )
# Leg 0 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
elif self.region[i] == 1:
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+1])**2 + X[n*i + 1]**2 )
# Leg 1 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
else:
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+0])**2 + X[n*i + 1]**2 )
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+1])**2 + X[n*i + 1]**2 )
# Leg 0 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
# Leg 1 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
# Defining Cost
self.cost = 0
for i in range(0, self.N):
self.cost = self.cost + (X[n*i:n*(i+1)] -self.xref[i,:]).T @ self.Q @ (X[n*i:n*(i+1)] -self.xref[i,:])
self.cost = self.cost + (C[nl*i:nl*(i+1)]-self.sref[i,:]).T @ self.Q[0:2,0:2] @ (C[nl*i:nl*(i+1)]-self.sref[i,:])
self.cost = self.cost + (U[d*i:d*(i+1)] -self.uref[i,:]).T @ self.R @ (U[d*i:d*(i+1)] -self.uref[i,:])
# Terminal Constraints
self.cost = self.cost + (X[n*self.N:n*(self.N+1)] -self.xf).T @ self.Q @ (X[n*self.N:n*(self.N+1)]-self.xf)
self.cost = self.cost + (C[nl*self.N:nl*(self.N+1)]-self.sf).T @ self.Q[0:2,0:2] @ (C[nl*self.N:nl*(self.N+1)]-self.sf)
# Set IPOPT options
# opts = {"verbose":False,"ipopt.print_level":0,"print_time":0,"ipopt.mu_strategy":"adaptive","ipopt.mu_init":1e-5,"ipopt.mu_min":1e-15,"ipopt.barrier_tol_factor":1}#, "ipopt.acceptable_constr_viol_tol":0.001}#,"ipopt.acceptable_tol":1e-4}#, "expand":True}
opts = {"verbose":False,"ipopt.print_level":0,"print_time":0, "ipopt.max_cpu_time":0.5}#\\, "ipopt.acceptable_constr_viol_tol":0.001}#,"ipopt.acceptable_tol":1e-4}#, "expand":True}
nlp = {'x':vertcat(X,U,C), 'f':self.cost, 'g':self.constraint}
self.solver = nlpsol('solver', 'ipopt', nlp, opts)
# Set lower bound of inequality constraint to zero to force n*N state dynamics
self.lbg_dyanmics = [0]*((n+nl)*self.N) + lbg_leg
self.ubg_dyanmics = [0]*((n+nl)*self.N) + ubg_leg
def dynamics_SS(self, x, u, c):
theta = np.arctan( (x[0] - c)/ x[1] )
leng = np.sqrt( (x[0] - c)**2 + x[1]**2 )
# state x = [x,y, vx, vy]
x_next = x[0] + self.dt * x[2]
y_next = x[1] + self.dt * x[3]
vx_next = x[2] + self.dt * ( sin(theta)*((u+self.k0)*(self.l - leng) ) )
vy_next = x[3] + self.dt * (-self.m*self.g + cos(theta)*((u+self.k0)*(self.l - leng) ) ) #+ cos(theta)*u[1] )
state_next = [x_next, y_next, vx_next, vy_next]
return state_next
def dynamics_DS(self, x, u, c):
theta = []
leng = []
for i in [0, 1]:
theta.append(np.arctan( (x[0] - c[i])/ x[1] ))
leng.append(np.sqrt( (x[0] - c[i])**2 + x[1]**2 ))
# state x = [x,y, vx, vy]
x_next = x[0] + self.dt * x[2]
y_next = x[1] + self.dt * x[3]
vx_next = x[2] + self.dt * (sin(theta[0])*((u[0]+self.k0)*(self.l - leng[0]) ) + sin(theta[1])*((u+self.k0)*(self.l - leng[1]) ))
vy_next = x[3] + self.dt * (cos(theta[0])*((u[0]+self.k0)*(self.l - leng[0]) ) + cos(theta[1])*((u+self.k0)*(self.l - leng[1]) - self.m*self.g))
state_next = [x_next, y_next, vx_next, vy_next]
return state_next
| 33.310769
| 258
| 0.592555
|
26495c561833a1a79ce44b54fb70773873b55c81
| 214
|
py
|
Python
|
solutions/1820.py
|
nxexox/acm.timus
|
9548d2a0b54fdd99bd60071f3be2fb7f897a7303
|
[
"MIT"
] | null | null | null |
solutions/1820.py
|
nxexox/acm.timus
|
9548d2a0b54fdd99bd60071f3be2fb7f897a7303
|
[
"MIT"
] | null | null | null |
solutions/1820.py
|
nxexox/acm.timus
|
9548d2a0b54fdd99bd60071f3be2fb7f897a7303
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
n, k = [int(i) for i in input().split()]
# TODO: Сама система не принимает на 16-м тесте
def function(n, k):
return round(float(n) * 2.0 / float(k)) if k < n else 2
print(function(n, k))
| 17.833333
| 59
| 0.616822
|
c307d841a736d2fdbab380e113b1e97ac70ae151
| 3,819
|
py
|
Python
|
src/utils/tools.py
|
oulkaid/Sudoku-SAT-Solver
|
a26f6fad10a7f36d40fe88697e132248286fc562
|
[
"MIT"
] | null | null | null |
src/utils/tools.py
|
oulkaid/Sudoku-SAT-Solver
|
a26f6fad10a7f36d40fe88697e132248286fc562
|
[
"MIT"
] | null | null | null |
src/utils/tools.py
|
oulkaid/Sudoku-SAT-Solver
|
a26f6fad10a7f36d40fe88697e132248286fc562
|
[
"MIT"
] | null | null | null |
from math import sqrt
def input_parser(lines):
n = int(lines[0])
grid = []
for i in range(1, len(lines)):
item = 0
end_of_line = False
new_line = []
while not end_of_line:
if lines[i][item] == '\n':
end_of_line = True
elif lines[i][item] == ' ':
pass
elif lines[i][item] == '-' or lines[i][item] == '_' or lines[i][item] == '.':
new_line.append(0)
elif (lines[i][item] >= 'A' and lines[i][item] <= 'P') or (lines[i][item] >= 'a' and lines[i][item] <= 'p'):
new_line.append( hex_to_int( lines[i][item] ) )
elif int(lines[i][item]) <= n and int(lines[i][item]) > 0:
new_line.append(int(lines[i][item]))
item += 1
if len(new_line) == n:
end_of_line = True
if len(new_line) == n:
grid.append(new_line)
return n, grid
def hex_to_int(c):
if c == 'A' or c == 'a':
return 10
elif c == 'B' or c == 'b':
return 11
elif c == 'C' or c == 'c':
return 12
elif c == 'D' or c == 'd':
return 13
elif c == 'E' or c == 'e':
return 14
elif c == 'F' or c == 'f':
return 15
elif c == 'G' or c == 'g':
return 16
elif c == 'H' or c == 'h':
return 17
elif c == 'I' or c == 'i':
return 18
elif c == 'J' or c == 'j':
return 19
elif c == 'K' or c == 'k':
return 20
elif c == 'L' or c == 'l':
return 21
elif c == 'M' or c == 'm':
return 22
elif c == 'N' or c == 'n':
return 23
elif c == 'O' or c == 'o':
return 24
elif c == 'P' or c == 'p':
return 25
def int_to_hex(n):
if n == 10:
return 'A'
elif n == 11:
return 'B'
elif n == 12:
return 'C'
elif n == 13:
return 'D'
elif n == 14:
return 'E'
elif n == 15:
return 'F'
elif n == 16:
return 'G'
elif n == 17:
return 'H'
elif n == 18:
return 'I'
elif n == 19:
return 'J'
elif n == 20:
return 'K'
elif n == 21:
return 'L'
elif n == 22:
return 'M'
elif n == 23:
return 'N'
elif n == 24:
return 'O'
elif n == 25:
return 'P'
else: return n
def print_grid(grid, n):
print("+" + ("-"*(int(sqrt(n))*2+1) + "+")*int(sqrt(n)))
for i, row in enumerate(grid):
if (n == 4):
print(("|" + " {} {} |"*int(sqrt(n))).format(*[x if x != 0 else "." for x in row]))
elif (n == 9):
print(("|" + " {} {} {} |"*int(sqrt(n))).format(*[x if x != 0 else "." for x in row]))
elif (n == 16):
print(("|" + " {} {} {} {} |"*int(sqrt(n))).format(*[int_to_hex(x) if x != 0 else "." for x in row]))
elif (n == 25):
print(("|" + " {} {} {} {} {} |"*int(sqrt(n))).format(*[int_to_hex(x) if x != 0 else "." for x in row]))
if i == n:
print("+" + ("-"*(int(sqrt(n))*2+1) + "+")*int(sqrt(n)))
elif i % int(sqrt(n)) == int(sqrt(n))-1:
print("+" + ("-"*(int(sqrt(n))*2+1) + "+")*int(sqrt(n)))
def print_solution(m, n):
print("+" + ("-"*(int(sqrt(n))*2+1) + "+")*int(sqrt(n)))
for i in range(n*n):
if i%n == 0:
print("| ", end="")
if (n == 16 or n == 25):
print( str(int_to_hex(m[i])) + " ", end="" )
else:
print( str(m[i]) + " ", end="" )
if (i+1)%(n*int(sqrt(n))) == 0:
print("|\n+" + ("-"*(int(sqrt(n))*2+1) + "+")*int(sqrt(n)))
elif (i+1)%n == 0:
print("|")
elif (i+1)%int(sqrt(n)) == 0:
print("| ", end="")
| 27.673913
| 120
| 0.396701
|
676cba1ba2697d2ede73a09dfd7077a8397c87b4
| 41,861
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/isisLevel1LANHelloPDU_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/isisLevel1LANHelloPDU_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/isisLevel1LANHelloPDU_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class IsisLevel1LANHelloPDU(Base):
__slots__ = ()
_SDM_NAME = 'isisLevel1LANHelloPDU'
_SDM_ATT_MAP = {
'CommonHeaderIntradomainRoutingProtocolDiscriminator': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.intradomainRoutingProtocolDiscriminator-1',
'CommonHeaderLengthIndicator': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.lengthIndicator-2',
'CommonHeaderVersionProtocolIDExtension': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.versionProtocolIDExtension-3',
'CommonHeaderIdLength': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.idLength-4',
'CommonHeaderReservedBit': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.reservedBit-5',
'CommonHeaderPduType': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.pduType-6',
'CommonHeaderVersion': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.version-7',
'CommonHeaderReserved': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.reserved-8',
'CommonHeaderMaximumAreaAddresses': 'isisLevel1LANHelloPDU.isisHeader.commonHeader.maximumAreaAddresses-9',
'FixedHeaderReservedCircuitType': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.reservedCircuitType-10',
'FixedHeaderSourceID': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.sourceID-11',
'FixedHeaderHoldingTime': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.holdingTime-12',
'FixedHeaderPduLength': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.pduLength-13',
'FixedHeaderReservedBit': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.reservedBit-14',
'FixedHeaderPriority': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.priority-15',
'FixedHeaderLanID': 'isisLevel1LANHelloPDU.isisHeader.fixedHeader.lanID-16',
'Tlv1AreaAddressesTlvCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv1AreaAddresses.tlvCode-17',
'Tlv1AreaAddressesTlvLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv1AreaAddresses.tlvLength-18',
'ValueFieldsAddressLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv1AreaAddresses.valueFields.addressLength-19',
'ValueFieldsAreaAddress': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv1AreaAddresses.valueFields.areaAddress-20',
'Tlv6ISNeighborswith6OctetMACAddressTlvCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv6ISNeighborswith6OctetMACAddress.tlvCode-21',
'Tlv6ISNeighborswith6OctetMACAddressTlvLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv6ISNeighborswith6OctetMACAddress.tlvLength-22',
'Tlv6ISNeighborswith6OctetMACAddressLanAddress': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv6ISNeighborswith6OctetMACAddress.lanAddress-23',
'Tlv7ISNeighborswithVariableLengthSNPAAddressTlvCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv7ISNeighborswithVariableLengthSNPAAddress.tlvCode-24',
'Tlv7ISNeighborswithVariableLengthSNPAAddressTlvLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv7ISNeighborswithVariableLengthSNPAAddress.tlvLength-25',
'ValueFieldsLanAddressLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv7ISNeighborswithVariableLengthSNPAAddress.valueFields.lanAddressLength-26',
'ValueFieldsLanAddress': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv7ISNeighborswithVariableLengthSNPAAddress.valueFields.lanAddress-27',
'Tlv8PaddingTlvCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv8Padding.tlvCode-28',
'Tlv8PaddingTlvLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv8Padding.tlvLength-29',
'ValueFieldsPadding': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv8Padding.valueFields.padding-30',
'Tlv10AuthenticationInformationTlvCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv10AuthenticationInformation.tlvCode-31',
'Tlv10AuthenticationInformationTlvLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv10AuthenticationInformation.tlvLength-32',
'ValueFieldsAuthenticationType': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv10AuthenticationInformation.valueFields.authenticationType-33',
'ValueFieldsAuthenticatorLengthoctets': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv10AuthenticationInformation.valueFields.authenticatorLengthoctets-34',
'ValueFieldsAuthenticator': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv10AuthenticationInformation.valueFields.authenticator-35',
'Tlv129ProtocolsSupportedCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv129ProtocolsSupported.code-36',
'Tlv129ProtocolsSupportedLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv129ProtocolsSupported.length-37',
'NlpidEntriesEntryLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv129ProtocolsSupported.nlpidEntries.entryLength-38',
'NlpidEntriesEntryID': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv129ProtocolsSupported.nlpidEntries.entryID-39',
'Tlv132IPInterfaceAddressCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv132IPInterfaceAddress.code-40',
'Tlv132IPInterfaceAddressLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv132IPInterfaceAddress.length-41',
'IpAddressEntriesIpAddress': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv132IPInterfaceAddress.ipAddressEntries.ipAddress-42',
'Tlv143MTAwarePortCapType': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.type-43',
'Tlv143MTAwarePortCapLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.length-44',
'Tlv143MTAwarePortCapResvBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.resvBit-45',
'Tlv143MTAwarePortCapTopologyID': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.topologyID-46',
'SubTLVHeaderTypeNoSubTLVs': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.noSubTLVs-47',
'SpecialVLANsandFlagsSubTLVCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.subTLVCode-48',
'SpecialVLANsandFlagsSubTLVLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.subTLVLength-49',
'SpecialVLANsandFlagsPortID': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.portID-50',
'SpecialVLANsandFlagsSenderNickname': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.senderNickname-51',
'SpecialVLANsandFlagsAppointedForwarder': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.appointedForwarder-52',
'SpecialVLANsandFlagsAccessPort': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.accessPort-53',
'SpecialVLANsandFlagsVlanMapping': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.vlanMapping-54',
'SpecialVLANsandFlagsBypassPseudonode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.bypassPseudonode-55',
'SpecialVLANsandFlagsOuterVLAN': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.outerVLAN-56',
'SpecialVLANsandFlagsTrunkPort': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.trunkPort-57',
'SpecialVLANsandFlagsResvBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.resvBit-58',
'SpecialVLANsandFlagsDesigVLAN': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.specialVLANsandFlags.desigVLAN-59',
'EnabledVLANsSubTLVCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.enabledVLANs.subTLVCode-60',
'EnabledVLANsSubTLVLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.enabledVLANs.subTLVLength-61',
'EnabledVLANsResvBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.enabledVLANs.resvBit-62',
'EnabledVLANsStartVLANID': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.enabledVLANs.startVLANID-63',
'VlanBitMapDefault': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.enabledVLANs.vlanBitMap.-64',
'AppointedForwardersSubTLVCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.subTLVCode-65',
'AppointedForwardersSubTLVLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.subTLVLength-66',
'AppointmentInformationAppointeeNickname': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.appointmentInformationEntries.appointmentInformation.appointeeNickname-67',
'AppointmentInformationResvBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.appointmentInformationEntries.appointmentInformation.resvBit-68',
'AppointmentInformationStartVLAN': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.appointmentInformationEntries.appointmentInformation.startVLAN-69',
'AppointmentInformationResvBit2': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.appointmentInformationEntries.appointmentInformation.resvBit2-70',
'AppointmentInformationEndVLAN': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv143MTAwarePortCap.subTLVHeader.subTLVHeaderType.appointedForwarders.appointmentInformationEntries.appointmentInformation.endVLAN-71',
'Tlv145TRILLNeighborType': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.type-72',
'Tlv145TRILLNeighborLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.length-73',
'Tlv145TRILLNeighborSBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.sBit-74',
'Tlv145TRILLNeighborLBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.lBit-75',
'Tlv145TRILLNeighborResvBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.resvBit-76',
'NeighborRecordFBit': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.neighborRecordEntries.neighborRecord.fBit-77',
'NeighborRecordResvBit2': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.neighborRecordEntries.neighborRecord.resvBit2-78',
'NeighborRecordMtu': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.neighborRecordEntries.neighborRecord.mtu-79',
'NeighborRecordMac': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv145TRILLNeighbor.neighborRecordEntries.neighborRecord.mac-80',
'Tlv232IPv6InterfaceAddressCode': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv232IPv6InterfaceAddress.code-81',
'Tlv232IPv6InterfaceAddressLength': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv232IPv6InterfaceAddress.length-82',
'Tlv232ipv6interfaceaddressIpAddressEntriesIpAddress': 'isisLevel1LANHelloPDU.isisHeader.tlvHeader.tlvHeaderType.tlv232IPv6InterfaceAddress.ipAddressEntries.ipAddress-83',
}
def __init__(self, parent, list_op=False):
super(IsisLevel1LANHelloPDU, self).__init__(parent, list_op)
@property
def CommonHeaderIntradomainRoutingProtocolDiscriminator(self):
"""
Display Name: Intradomain routing protocol discriminator
Default Value: 0x83
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderIntradomainRoutingProtocolDiscriminator']))
@property
def CommonHeaderLengthIndicator(self):
"""
Display Name: Length indicator
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderLengthIndicator']))
@property
def CommonHeaderVersionProtocolIDExtension(self):
"""
Display Name: Version/Protocol ID extension
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderVersionProtocolIDExtension']))
@property
def CommonHeaderIdLength(self):
"""
Display Name: ID length
Default Value: 0
Value Format: decimal
Available enum values: One, 1, Two, 2, Three, 3, Four, 4, Five, 5, Six, 6, Seven, 7, Eight, 8, 6 Octet ID field, 0, Null ID field, 255
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderIdLength']))
@property
def CommonHeaderReservedBit(self):
"""
Display Name: Reserved bit
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderReservedBit']))
@property
def CommonHeaderPduType(self):
"""
Display Name: PDU type
Default Value: 15
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderPduType']))
@property
def CommonHeaderVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderVersion']))
@property
def CommonHeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderReserved']))
@property
def CommonHeaderMaximumAreaAddresses(self):
"""
Display Name: Maximum area addresses
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonHeaderMaximumAreaAddresses']))
@property
def FixedHeaderReservedCircuitType(self):
"""
Display Name: Reserved/Circuit type
Default Value: 1
Value Format: decimal
Available enum values: Reserved value, 0, Level 1 only, 1, Level 2 only, 2, Both level 1 and 2, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderReservedCircuitType']))
@property
def FixedHeaderSourceID(self):
"""
Display Name: Source ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderSourceID']))
@property
def FixedHeaderHoldingTime(self):
"""
Display Name: Holding time
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderHoldingTime']))
@property
def FixedHeaderPduLength(self):
"""
Display Name: PDU length
Default Value: 1497
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderPduLength']))
@property
def FixedHeaderReservedBit(self):
"""
Display Name: Reserved bit
Default Value: 0
Value Format: decimal
Available enum values: Must be Zero, 0
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderReservedBit']))
@property
def FixedHeaderPriority(self):
"""
Display Name: Priority
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderPriority']))
@property
def FixedHeaderLanID(self):
"""
Display Name: LAN ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FixedHeaderLanID']))
@property
def Tlv1AreaAddressesTlvCode(self):
"""
Display Name: TLV code
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv1AreaAddressesTlvCode']))
@property
def Tlv1AreaAddressesTlvLength(self):
"""
Display Name: TLV length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv1AreaAddressesTlvLength']))
@property
def ValueFieldsAddressLength(self):
"""
Display Name: Address length
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAddressLength']))
@property
def ValueFieldsAreaAddress(self):
"""
Display Name: Area address
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAreaAddress']))
@property
def Tlv6ISNeighborswith6OctetMACAddressTlvCode(self):
"""
Display Name: TLV code
Default Value: 6
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv6ISNeighborswith6OctetMACAddressTlvCode']))
@property
def Tlv6ISNeighborswith6OctetMACAddressTlvLength(self):
"""
Display Name: TLV length
Default Value: 6
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv6ISNeighborswith6OctetMACAddressTlvLength']))
@property
def Tlv6ISNeighborswith6OctetMACAddressLanAddress(self):
"""
Display Name: LAN address
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv6ISNeighborswith6OctetMACAddressLanAddress']))
@property
def Tlv7ISNeighborswithVariableLengthSNPAAddressTlvCode(self):
"""
Display Name: TLV code
Default Value: 7
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv7ISNeighborswithVariableLengthSNPAAddressTlvCode']))
@property
def Tlv7ISNeighborswithVariableLengthSNPAAddressTlvLength(self):
"""
Display Name: TLV length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv7ISNeighborswithVariableLengthSNPAAddressTlvLength']))
@property
def ValueFieldsLanAddressLength(self):
"""
Display Name: LAN address length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsLanAddressLength']))
@property
def ValueFieldsLanAddress(self):
"""
Display Name: LAN address
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsLanAddress']))
@property
def Tlv8PaddingTlvCode(self):
"""
Display Name: TLV code
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv8PaddingTlvCode']))
@property
def Tlv8PaddingTlvLength(self):
"""
Display Name: TLV length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv8PaddingTlvLength']))
@property
def ValueFieldsPadding(self):
"""
Display Name: Padding
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsPadding']))
@property
def Tlv10AuthenticationInformationTlvCode(self):
"""
Display Name: TLV code
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv10AuthenticationInformationTlvCode']))
@property
def Tlv10AuthenticationInformationTlvLength(self):
"""
Display Name: TLV length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv10AuthenticationInformationTlvLength']))
@property
def ValueFieldsAuthenticationType(self):
"""
Display Name: Authentication type
Default Value: 1
Value Format: decimal
Available enum values: Cleartext password, 1, Routing domain private authentication method, 255
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAuthenticationType']))
@property
def ValueFieldsAuthenticatorLengthoctets(self):
"""
Display Name: Authenticator Length (octets)
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAuthenticatorLengthoctets']))
@property
def ValueFieldsAuthenticator(self):
"""
Display Name: Authenticator
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAuthenticator']))
@property
def Tlv129ProtocolsSupportedCode(self):
"""
Display Name: Code
Default Value: 129
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv129ProtocolsSupportedCode']))
@property
def Tlv129ProtocolsSupportedLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv129ProtocolsSupportedLength']))
@property
def NlpidEntriesEntryLength(self):
"""
Display Name: Entry Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NlpidEntriesEntryLength']))
@property
def NlpidEntriesEntryID(self):
"""
Display Name: Entry ID
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NlpidEntriesEntryID']))
@property
def Tlv132IPInterfaceAddressCode(self):
"""
Display Name: Code
Default Value: 132
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv132IPInterfaceAddressCode']))
@property
def Tlv132IPInterfaceAddressLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv132IPInterfaceAddressLength']))
@property
def IpAddressEntriesIpAddress(self):
"""
Display Name: IP Address
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddressEntriesIpAddress']))
@property
def Tlv143MTAwarePortCapType(self):
"""
Display Name: Type
Default Value: 143
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv143MTAwarePortCapType']))
@property
def Tlv143MTAwarePortCapLength(self):
"""
Display Name: Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv143MTAwarePortCapLength']))
@property
def Tlv143MTAwarePortCapResvBit(self):
"""
Display Name: RESV
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv143MTAwarePortCapResvBit']))
@property
def Tlv143MTAwarePortCapTopologyID(self):
"""
Display Name: Topology-ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv143MTAwarePortCapTopologyID']))
@property
def SubTLVHeaderTypeNoSubTLVs(self):
"""
Display Name: No sub-TLVs
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLVHeaderTypeNoSubTLVs']))
@property
def SpecialVLANsandFlagsSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsSubTLVCode']))
@property
def SpecialVLANsandFlagsSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsSubTLVLength']))
@property
def SpecialVLANsandFlagsPortID(self):
"""
Display Name: Port ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsPortID']))
@property
def SpecialVLANsandFlagsSenderNickname(self):
"""
Display Name: Sender Nickname
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsSenderNickname']))
@property
def SpecialVLANsandFlagsAppointedForwarder(self):
"""
Display Name: Appointed Forwarder
Default Value: 0
Value Format: decimal
Available enum values: Appointed Forwarder bit not set, 0, Appointed Forwarder bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsAppointedForwarder']))
@property
def SpecialVLANsandFlagsAccessPort(self):
"""
Display Name: Access Port
Default Value: 0
Value Format: decimal
Available enum values: Access Port bit not set, 0, Access Port bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsAccessPort']))
@property
def SpecialVLANsandFlagsVlanMapping(self):
"""
Display Name: VLAN Mapping
Default Value: 0
Value Format: decimal
Available enum values: VLAN Mapping bit not set, 0, VLAN Mapping bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsVlanMapping']))
@property
def SpecialVLANsandFlagsBypassPseudonode(self):
"""
Display Name: Bypass pseudonode
Default Value: 0
Value Format: decimal
Available enum values: Bypass pseudonode bit not set, 0, Bypass pseudonode bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsBypassPseudonode']))
@property
def SpecialVLANsandFlagsOuterVLAN(self):
"""
Display Name: Outer.VLAN
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsOuterVLAN']))
@property
def SpecialVLANsandFlagsTrunkPort(self):
"""
Display Name: Trunk port
Default Value: 0
Value Format: decimal
Available enum values: Trunk port bit not set, 0, Trunk port bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsTrunkPort']))
@property
def SpecialVLANsandFlagsResvBit(self):
"""
Display Name: RESV
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsResvBit']))
@property
def SpecialVLANsandFlagsDesigVLAN(self):
"""
Display Name: Desig.VLAN
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SpecialVLANsandFlagsDesigVLAN']))
@property
def EnabledVLANsSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnabledVLANsSubTLVCode']))
@property
def EnabledVLANsSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnabledVLANsSubTLVLength']))
@property
def EnabledVLANsResvBit(self):
"""
Display Name: RESV
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnabledVLANsResvBit']))
@property
def EnabledVLANsStartVLANID(self):
"""
Display Name: Start VLAN ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnabledVLANsStartVLANID']))
@property
def VlanBitMapDefault(self):
"""
Display Name: BitMap
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VlanBitMapDefault']))
@property
def AppointedForwardersSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointedForwardersSubTLVCode']))
@property
def AppointedForwardersSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 6
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointedForwardersSubTLVLength']))
@property
def AppointmentInformationAppointeeNickname(self):
"""
Display Name: Appointee Nickname
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointmentInformationAppointeeNickname']))
@property
def AppointmentInformationResvBit(self):
"""
Display Name: RESV
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointmentInformationResvBit']))
@property
def AppointmentInformationStartVLAN(self):
"""
Display Name: Start.VLAN
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointmentInformationStartVLAN']))
@property
def AppointmentInformationResvBit2(self):
"""
Display Name: RESV2
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointmentInformationResvBit2']))
@property
def AppointmentInformationEndVLAN(self):
"""
Display Name: End.VLAN
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppointmentInformationEndVLAN']))
@property
def Tlv145TRILLNeighborType(self):
"""
Display Name: Type
Default Value: 145
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv145TRILLNeighborType']))
@property
def Tlv145TRILLNeighborLength(self):
"""
Display Name: Length
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv145TRILLNeighborLength']))
@property
def Tlv145TRILLNeighborSBit(self):
"""
Display Name: Smallest flag
Default Value: 0
Value Format: decimal
Available enum values: Smallest Flag bit not set, 0, Smallest Flag bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv145TRILLNeighborSBit']))
@property
def Tlv145TRILLNeighborLBit(self):
"""
Display Name: Largest flag
Default Value: 0
Value Format: decimal
Available enum values: Largest Flag bit not set, 0, Largest Flag bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv145TRILLNeighborLBit']))
@property
def Tlv145TRILLNeighborResvBit(self):
"""
Display Name: RESV
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv145TRILLNeighborResvBit']))
@property
def NeighborRecordFBit(self):
"""
Display Name: Failed
Default Value: 0
Value Format: decimal
Available enum values: Failed bit not set, 0, Failed bit set, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NeighborRecordFBit']))
@property
def NeighborRecordResvBit2(self):
"""
Display Name: RESV2
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NeighborRecordResvBit2']))
@property
def NeighborRecordMtu(self):
"""
Display Name: MTU
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NeighborRecordMtu']))
@property
def NeighborRecordMac(self):
"""
Display Name: MAC Address
Default Value: 0
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NeighborRecordMac']))
@property
def Tlv232IPv6InterfaceAddressCode(self):
"""
Display Name: Code
Default Value: 232
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv232IPv6InterfaceAddressCode']))
@property
def Tlv232IPv6InterfaceAddressLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv232IPv6InterfaceAddressLength']))
@property
def Tlv232ipv6interfaceaddressIpAddressEntriesIpAddress(self):
"""
Display Name: IP Address
Default Value: 0:0:0:0:0:0:0:0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv232ipv6interfaceaddressIpAddressEntriesIpAddress']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 44.485654
| 255
| 0.721626
|
b8dc9f0e66a94c4a79d6e8c91cffa44d98bcc8e9
| 423
|
py
|
Python
|
apps/work_order/migrations/0002_auto_20190821_2331.py
|
joewen85/devops_study
|
6bbfbac7e70f295ef6068393bd9cf7d418ab4417
|
[
"Apache-2.0"
] | null | null | null |
apps/work_order/migrations/0002_auto_20190821_2331.py
|
joewen85/devops_study
|
6bbfbac7e70f295ef6068393bd9cf7d418ab4417
|
[
"Apache-2.0"
] | null | null | null |
apps/work_order/migrations/0002_auto_20190821_2331.py
|
joewen85/devops_study
|
6bbfbac7e70f295ef6068393bd9cf7d418ab4417
|
[
"Apache-2.0"
] | 1
|
2020-10-28T09:12:47.000Z
|
2020-10-28T09:12:47.000Z
|
# Generated by Django 2.2.4 on 2019-08-21 15:31
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('work_order', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='workorder',
name='order_contents',
field=ckeditor.fields.RichTextField(verbose_name='工单内容'),
),
]
| 21.15
| 69
| 0.624113
|
fd4470931d850ff863ffd047d7c90a80e92004d8
| 1,781
|
py
|
Python
|
run.py
|
hesamrasouli/CrawlerKing
|
54c6a435cda61f3e43ba8ba9ae81bb1d6f096a84
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
hesamrasouli/CrawlerKing
|
54c6a435cda61f3e43ba8ba9ae81bb1d6f096a84
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
hesamrasouli/CrawlerKing
|
54c6a435cda61f3e43ba8ba9ae81bb1d6f096a84
|
[
"Apache-2.0"
] | null | null | null |
from spider_runner_base import SpiderRunnerBase
# ! Do not delete these !
import sys
from utils.helpers.arg_helper import ArgHelper
"""
This file is main entry point and is responsible of running spider(s).
This is actually CrawlerKing runner that decides which spider(s) must be run with what parameters
based on user-provided parameters.
If no parameter is provider (default behavior), it will run all the spiders of all kinds e.g Scrapy, Selenium etc
used is allowed to pass any number of parameters
and runner must parse parameters and pass them to desired spider (if specified) or all the spiders
The parameters must be given in GNU/POSIX convention, parameters name must begin with '--'
and their value must be specified right next to them
e.g --page 2
--page 2 --verbose true
Note: runner doesn't support default value, because it's not much making sense in this case. For instance if user
providers '--page' parameter they must specify what page they desire.
"""
args = sys.argv
list = None
spider_name = None
if len(args) > 0:
# removing name of file from args (run.py)
args = args[1:len(args)]
spider_name = ArgHelper.get_parameter_value('--spider', args)
list = ArgHelper.append_all_parameters(args)
# retrieving all SpiderRunnerBase classes e.g ScrapySpiderRunner, SeleniumSpiderRunner
runners = [cls() for cls in SpiderRunnerBase.__subclasses__()]
for runner in runners:
spider_given = spider_name is not None
list_given = list is not None and len(list) > 0
if spider_given and list_given:
runner.run(spider_name, list)
elif spider_given and not list_given:
runner.run(spider_name=spider_name)
elif not spider_given and list_given:
runner.run(params=list)
else:
runner.run()
| 37.104167
| 114
| 0.747333
|
c16e63e74b85f4370d487607f70e330fe7d31fe7
| 30,514
|
py
|
Python
|
My_AutoML/_hpo/_ML.py
|
PanyiDong/My_AutoML
|
510727bd797e4f6fa213939c62d1d7601952e491
|
[
"MIT"
] | 2
|
2022-03-03T16:24:08.000Z
|
2022-03-03T17:17:28.000Z
|
My_AutoML/_hpo/_ML.py
|
PanyiDong/My_AutoML
|
510727bd797e4f6fa213939c62d1d7601952e491
|
[
"MIT"
] | null | null | null |
My_AutoML/_hpo/_ML.py
|
PanyiDong/My_AutoML
|
510727bd797e4f6fa213939c62d1d7601952e491
|
[
"MIT"
] | null | null | null |
"""
File: _ML.py
Author: Panyi Dong
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hpo/_ML.py
File Created: Tuesday, 5th April 2022 10:50:27 pm
Author: Panyi Dong (panyid2@illinois.edu)
-----
Last Modified: Sunday, 17th April 2022 1:21:39 pm
Modified By: Panyi Dong (panyid2@illinois.edu)
-----
MIT License
Copyright (c) 2022 - 2022, Panyi Dong
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from ._base import AutoTabularBase
from My_AutoML._utils._base import type_of_task
class AutoTabularRegressor(AutoTabularBase):
""" "
AutoTabular for regression tasks build on top of AutoTabularBase.
Parameters
----------
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
allow_error_prop: proportion of tasks allows failure, default = 0.1
allowed number of failures is int(max_evals * allow_error_prop)
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support regressors ("AdaboostRegressor", "ARDRegression", "DecisionTree",
"ExtraTreesRegressor", "GaussianProcess", "GradientBoosting",
"KNearestNeighborsRegressor", "LibLinear_SVR", "LibSVM_SVR",
"MLPRegressor", "RandomForest", "SGD")
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support metrics for regression ("MSE", "MAE", "MSLE", "R2", "MAX")
search_algo: search algorithm used for hyperparameter optimization, deafult = "HyperOpt"
support ("RandomSearch", "GridSearch", "BayesOptSearch", "AxSearch", "BOHB",
"BlendSearch", "CFO", "DragonflySearch", "HEBO", "HyperOpt", "Nevergrad",
"Optuna", "SigOpt", "Scikit-Optimize", "ZOOpt", "Reapter",
"ConcurrencyLimiter", callable)
search_algo_settings: search algorithm settings, default = {}
need manual configuration for each search algorithm
search_scheduler: search scheduler used, default = "FIFOScheduler"
support ("FIFOScheduler", "ASHAScheduler", "HyperBandScheduler", "MedianStoppingRule"
"PopulationBasedTraining", "PopulationBasedTrainingReplay", "PB2",
"HyperBandForBOHB", callable)
search_scheduler_settings: search scheduler settings, default = {}
need manual configuration for each search scheduler
logger: callback logger, default = ["Logger"]
list of supported callbacks, support ("Logger", "TBX", "JSON", "CSV", "MLflow", "Wandb")
progress_reporter: progress reporter, default = "CLIReporter"
support ("CLIReporter", "JupyterNotebookReporter")
full_status: whether to print full status, default = False
verbose: display for output, default = 1
support (0, 1, 2, 3)
cpu_threads: number of cpu threads to use, default = None
if None, get all available cpu threads
use_gpu: whether to use gpu, default = None
if None, will use gpu if available, otherwise False (not to use gpu)
reset_index: whether to reset index during traning, default = True
there are methods that are index independent (ignore index, resetted, e.g. GAIN)
if you wish to use these methods and set reset_index = False, please make sure
all input index are ordered and starting from 0
seed: random seed, default = 1
"""
def __init__(
self,
timeout=360,
max_evals=64,
allow_error_prop=0.1,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective="MSE",
search_algo="HyperOpt",
search_algo_settings={},
search_scheduler="FIFOScheduler",
search_scheduler_settings={},
logger=["Logger"],
progress_reporter="CLIReporter",
full_status=False,
verbose=1,
cpu_threads=None,
use_gpu=None,
reset_index=True,
seed=1,
):
self.timeout = timeout
self.max_evals = max_evals
self.allow_error_prop = allow_error_prop
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.search_algo = search_algo
self.search_algo_settings = search_algo_settings
self.search_scheduler = search_scheduler
self.search_scheduler_settings = search_scheduler_settings
self.logger = logger
self.progress_reporter = progress_reporter
self.full_status = full_status
self.verbose = verbose
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu
self.reset_index = reset_index
self.seed = seed
self._fitted = False # whether the model has been fitted
super().__init__(
task_mode="regression",
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective=self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
logger = self.logger,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
class AutoTabularClassifier(AutoTabularBase):
""" "
AutoTabular for classification tasks build on top of AutoTabularBase
Parameters
----------
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
allow_error_prop: proportion of tasks allows failure, default = 0.1
allowed number of failures is int(max_evals * allow_error_prop)
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support classifiers ('AdaboostClassifier', 'BernoulliNB', 'DecisionTree',
'ExtraTreesClassifier', 'GaussianNB', 'GradientBoostingClassifier',
'KNearestNeighborsClassifier', 'LDA', 'LibLinear_SVC', 'LibSVM_SVC',
'MLPClassifier', 'MultinomialNB','PassiveAggressive', 'QDA',
'RandomForest', 'SGD')
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support metrics for classification ("accuracy", "precision", "auc", "hinge", "f1")
search_algo: search algorithm used for hyperparameter optimization, deafult = "HyperOpt"
support ("RandomSearch", "GridSearch", "BayesOptSearch", "AxSearch", "BOHB",
"BlendSearch", "CFO", "DragonflySearch", "HEBO", "HyperOpt", "Nevergrad",
"Optuna", "SigOpt", "Scikit-Optimize", "ZOOpt", "Reapter",
"ConcurrencyLimiter", callable)
search_algo_settings: search algorithm settings, default = {}
need manual configuration for each search algorithm
search_scheduler: search scheduler used, default = "FIFOScheduler"
support ("FIFOScheduler", "ASHAScheduler", "HyperBandScheduler", "MedianStoppingRule"
"PopulationBasedTraining", "PopulationBasedTrainingReplay", "PB2",
"HyperBandForBOHB", callable)
search_scheduler_settings: search scheduler settings, default = {}
need manual configuration for each search scheduler
logger: callback logger, default = ["Logger"]
list of supported callbacks, support ("Logger", "TBX", "JSON", "CSV", "MLflow", "Wandb")
progress_reporter: progress reporter, default = "CLIReporter"
support ("CLIReporter", "JupyterNotebookReporter")
full_status: whether to print full status, default = False
verbose: display for output, default = 1
support (0, 1, 2, 3)
cpu_threads: number of cpu threads to use, default = None
if None, get all available cpu threads
use_gpu: whether to use gpu, default = None
if None, will use gpu if available, otherwise False (not to use gpu)
reset_index: whether to reset index during traning, default = True
there are methods that are index independent (ignore index, resetted, e.g. GAIN)
if you wish to use these methods and set reset_index = False, please make sure
all input index are ordered and starting from 0
seed: random seed, default = 1
"""
def __init__(
self,
timeout=360,
max_evals=64,
allow_error_prop=0.1,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective="accuracy",
search_algo="HyperOpt",
search_algo_settings={},
search_scheduler="FIFOScheduler",
search_scheduler_settings={},
logger=["Logger"],
progress_reporter="CLIReporter",
full_status=False,
verbose=1,
cpu_threads=None,
use_gpu=None,
reset_index=True,
seed=1,
):
self.timeout = timeout
self.max_evals = max_evals
self.allow_error_prop = allow_error_prop
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.search_algo = search_algo
self.search_algo_settings = search_algo_settings
self.search_scheduler = search_scheduler
self.search_scheduler_settings = search_scheduler_settings
self.logger = logger
self.progress_reporter = progress_reporter
self.full_status = full_status
self.verbose = verbose
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu
self.reset_index = reset_index
self.seed = seed
self._fitted = False # whether the model has been fitted
super().__init__(
task_mode="classification",
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective=self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
logger = self.logger,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
class AutoTabular(AutoTabularClassifier, AutoTabularRegressor):
"""
AutoTabular that automatically assign to AutoTabularClassifier or AutoTabularRegressor
Parameters
----------
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
allow_error_prop: proportion of tasks allows failure, default = 0.1
allowed number of failures is int(max_evals * allow_error_prop)
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support classifiers ('AdaboostClassifier', 'BernoulliNB', 'DecisionTree',
'ExtraTreesClassifier', 'GaussianNB', 'GradientBoostingClassifier',
'KNearestNeighborsClassifier', 'LDA', 'LibLinear_SVC', 'LibSVM_SVC',
'MLPClassifier', 'MultinomialNB','PassiveAggressive', 'QDA',
'RandomForest', 'SGD')
support regressors ("AdaboostRegressor", "ARDRegression", "DecisionTree",
"ExtraTreesRegressor", "GaussianProcess", "GradientBoosting",
"KNearestNeighborsRegressor", "LibLinear_SVR", "LibSVM_SVR",
"MLPRegressor", "RandomForest", "SGD")
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support metrics for regression ("MSE", "MAE", "MSLE", "R2", "MAX")
support metrics for classification ("accuracy", "precision", "auc", "hinge", "f1")
search_algo: search algorithm used for hyperparameter optimization, deafult = "HyperOpt"
support ("RandomSearch", "GridSearch", "BayesOptSearch", "AxSearch", "BOHB",
"BlendSearch", "CFO", "DragonflySearch", "HEBO", "HyperOpt", "Nevergrad",
"Optuna", "SigOpt", "Scikit-Optimize", "ZOOpt", "Reapter",
"ConcurrencyLimiter", callable)
search_algo_settings: search algorithm settings, default = {}
need manual configuration for each search algorithm
search_scheduler: search scheduler used, default = "FIFOScheduler"
support ("FIFOScheduler", "ASHAScheduler", "HyperBandScheduler", "MedianStoppingRule"
"PopulationBasedTraining", "PopulationBasedTrainingReplay", "PB2",
"HyperBandForBOHB", callable)
search_scheduler_settings: search scheduler settings, default = {}
need manual configuration for each search scheduler
logger: callback logger, default = ["Logger"]
list of supported callbacks, support ("Logger", "TBX", "JSON", "CSV", "MLflow", "Wandb")
progress_reporter: progress reporter, default = "CLIReporter"
support ("CLIReporter", "JupyterNotebookReporter")
full_status: whether to print full status, default = False
verbose: display for output, default = 1
support (0, 1, 2, 3)
cpu_threads: number of cpu threads to use, default = None
if None, get all available cpu threads
use_gpu: whether to use gpu, default = None
if None, will use gpu if available, otherwise False (not to use gpu)
reset_index: whether to reset index during traning, default = True
there are methods that are index independent (ignore index, resetted, e.g. GAIN)
if you wish to use these methods and set reset_index = False, please make sure
all input index are ordered and starting from 0
seed: random seed, default = 1
"""
def __init__(
self,
timeout=360,
max_evals=64,
allow_error_prop=0.1,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective=None,
search_algo="HyperOpt",
search_algo_settings={},
search_scheduler="FIFOScheduler",
search_scheduler_settings={},
logger = ["Logger"],
progress_reporter="CLIReporter",
full_status=False,
verbose=1,
cpu_threads=None,
use_gpu=None,
reset_index=True,
seed=1,
):
self.timeout = timeout
self.max_evals = max_evals
self.allow_error_prop = allow_error_prop
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.search_algo = search_algo
self.search_algo_settings = search_algo_settings
self.search_scheduler = search_scheduler
self.search_scheduler_settings = search_scheduler_settings
self.logger = logger
self.progress_reporter = progress_reporter
self.full_status = full_status
self.verbose = verbose
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu
self.reset_index = reset_index
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit(self, X, y=None):
if isinstance(y, pd.DataFrame) or isinstance(y, np.ndarray):
self._type = type_of_task(y)
elif y == None:
self._type = "Unsupervised"
if self._type in ["binary", "multiclass"]: # assign classification tasks
self.model = AutoTabularClassifier(
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective="accuracy" if not self.objective else self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
logger = self.logger,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
elif self._type in ["integer", "continuous"]: # assign regression tasks
self.model = AutoTabularRegressor(
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective="MSE" if not self.objective else self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
else:
raise ValueError(
'Not recognizing type, only ["binary", "multiclass", "integer", "continuous"] accepted, get {}!'.format(
self._type
)
)
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
if self.model:
return self.model.predict(X)
else:
raise ValueError("No tasks found! Need to fit first.")
| 40.202899
| 120
| 0.669037
|
8ee04a61f03dd472b36c75a4b44acee8402faf41
| 6,460
|
py
|
Python
|
others/top500/pinyin/__init__.py
|
galletitaoreo/PythonPentest
|
abd46cc251abfc3fba02d7f03ddd049803dc6047
|
[
"MIT"
] | 5
|
2019-08-07T08:59:53.000Z
|
2021-05-14T19:35:57.000Z
|
others/top500/pinyin/__init__.py
|
Harusenpai/PythonPentest
|
abd46cc251abfc3fba02d7f03ddd049803dc6047
|
[
"MIT"
] | null | null | null |
others/top500/pinyin/__init__.py
|
Harusenpai/PythonPentest
|
abd46cc251abfc3fba02d7f03ddd049803dc6047
|
[
"MIT"
] | 2
|
2021-03-31T21:20:19.000Z
|
2021-08-28T04:21:12.000Z
|
# -*- coding: utf-8 -*-
import os
import re
_punctuation_mapper = dict(zip(
u'?!。,、:《》“”‘’ ',
u'?!.,,:<>""\'\' '))
def _load_character_mapper():
mapper = dict()
filename = os.path.dirname(__file__)
filename += '/chars.txt'
f = open(filename)
try:
for line in f:
if re.match('^[\s]*#', line):
continue
line = line.strip()
columns = re.split('[\s(,)]+', line)
ch = unichr(int(columns[0], 16))
pinyin = columns[1:-1]
if len(pinyin) > 1:
mapper[ch] = pinyin
else:
mapper[ch] = pinyin[0]
finally:
f.close()
return mapper
_character_mapper = _load_character_mapper()
class Tokenizer:
def __init__(self, text):
assert isinstance(text, unicode)
self._text = text
self._pos = 0
self._length = len(text)
def __iter__(self):
return self
def next(self):
if self._pos >= self._length:
raise StopIteration
i = self._pos
type = self._char_type(self._text[i])
while True:
i += 1
if i >= self._length or self._char_type(self._text[i]) != type:
break
try:
return type, self._text[self._pos:i]
finally:
self._pos = i
def _char_type(self, ch):
if re.match('[\s]', ch):
return 4
elif ch in _punctuation_mapper:
return 3
elif ord(ch) <= 255:
return 1
else:
return 2
class WordMapper:
def __init__(self):
self._mapper = dict()
def load_from_file(self, filename):
f = open(filename)
try:
for line in f:
if re.match('^[\s]*#', line):
continue
line = line.strip()
columns = re.split('[\s]+', line)
word = unicode(columns[0], 'UTF-8')
pinyin = columns[1:]
self[word] = pinyin
finally:
f.close()
def __setitem__(self, word, pinyin):
assert isinstance(word, unicode)
mapper = self._mapper
for ch in word:
if not ch in mapper:
mapper[ch] = dict()
mapper = mapper[ch]
mapper['PY'] = pinyin
def __getitem__(self, word):
assert isinstance(word, unicode)
length = len(word)
pinyin = []
pos = 0
last_pinyin = None
last_pos = 0
mapper = self._mapper
while pos < length:
ch = word[pos]
if ch in mapper:
mapper = mapper[ch]
if 'PY' in mapper:
last_pinyin = mapper['PY']
last_pos = pos
pos += 1
if pos < length:
continue
if last_pinyin is None:
ch = word[last_pos]
if ch in _character_mapper:
last_pinyin = _character_mapper[ch]
else:
last_pinyin = ch
if len(last_pinyin) > 1:
pinyin.append(last_pinyin)
else:
pinyin.extend(last_pinyin)
else:
pinyin.extend(last_pinyin)
pos = last_pos + 1
mapper = self._mapper
last_pinyin = None
last_pos = pos
if last_pinyin is not None:
pinyin.extend(last_pinyin)
return pinyin
class Converter:
def __init__(self, word_mapper=WordMapper()):
self._word_mapper = word_mapper
def load_word_file(self, filename):
self._word_mapper.load_from_file(filename)
def convert(self, text, fmt='df', sc=True, pp=False, fuzzy=0):
if not isinstance(text, unicode):
text = unicode(text, 'UTF-8')
tokenizer = Tokenizer(text)
tokens = map(self._convert_token, tokenizer)
pinyin = ''
last_type = 4
for type, word in tokens:
if type == 2:
if last_type != 4:
pinyin += ' '
pinyin += self._format_word(word, fmt, sc, pp, fuzzy)
pass
elif type == 3:
pinyin += word
elif type == 4:
pinyin += word
else:
if last_type == 2:
pinyin += ' '
pinyin += word
last_type = type
return pinyin
def _convert_token(self, token):
type, word = token
if type == 2:
return type, self._word_mapper[word]
elif type == 3:
return type, _punctuation_mapper[word]
else:
return type, word.encode('UTF-8')
def _format_word(self, word, fmt, sc, pp, fuzzy):
if pp and not sc:
pinyin_set = set()
pinyin_list = [None] * len(word)
def func(idx):
if idx >= len(word):
pinyin_set.add(''.join(pinyin_list))
return
ch = word[idx]
if isinstance(ch, list):
for c in ch:
pinyin_list[idx] = self._format_ch(c, fmt, fuzzy)
func(idx+1)
else:
pinyin_list[idx] = self._format_ch(ch, fmt, fuzzy)
func(idx+1)
func(0)
return '|'.join(pinyin_set)
def func(ch):
if isinstance(ch, list):
pinyin_list = []
if pp:
for c in ch:
pinyin_list.append(self._format_ch(c, fmt, fuzzy))
else:
pinyin_list.append(self._format_ch(ch[0], fmt, fuzzy))
return '|'.join(set(pinyin_list))
else:
return self._format_ch(ch, fmt, fuzzy)
pinyin_list = map(func, word)
if sc:
return ' '.join(pinyin_list)
else:
return ''.join(pinyin_list)
def _format_ch(self, ch, fmt, fuzzy):
if fuzzy > 0:
raise Exception('Not implemented')
if fmt == 'df':
return ch[:-1]
if fmt == 'tn':
return ch
if fmt == 'fl':
return ch[0]
raise Exception('Not implemented')
| 26.916667
| 75
| 0.464551
|
15765ef8e39f40d0550763b86f868ae444159adb
| 2,997
|
py
|
Python
|
predict-images.py
|
bryner18/machine-learning-image-rec
|
dcd6f5ad9e1ec435b609680470748b084d4c8e41
|
[
"MIT"
] | null | null | null |
predict-images.py
|
bryner18/machine-learning-image-rec
|
dcd6f5ad9e1ec435b609680470748b084d4c8e41
|
[
"MIT"
] | null | null | null |
predict-images.py
|
bryner18/machine-learning-image-rec
|
dcd6f5ad9e1ec435b609680470748b084d4c8e41
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import time
import queue
import threading
import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def predict_image(q, sess, graph, image_bytes, img_full_path, labels, input_operation, output_operation):
image = read_tensor_from_image_bytes(image_bytes)
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: image
})
results = np.squeeze(results)
prediction = results.argsort()[-5:][::-1][0]
q.put({'img_full_path': img_full_path, 'prediction': labels[prediction].title(
), 'percent': results[prediction]})
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_bytes(imagebytes, input_height=299, input_width=299, input_mean=0, input_std=255):
image_reader = tf.image.decode_png(
imagebytes, channels=3, name="png_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(
dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.compat.v1.Session()
result = sess.run(normalized)
return result
def main():
graph = load_graph('/tmp/retrain_tmp/output_graph.pb')
labels = load_labels("/tmp/retrain_tmp/output_labels.txt")
input_operation = graph.get_operation_by_name("import/Placeholder")
output_operation = graph.get_operation_by_name("import/final_result")
sess = tf.compat.v1.Session(graph=graph)
q = queue.Queue()
unknown_images_dir = 'unknown_images'
unknown_images = os.listdir(unknown_images_dir)
for image in unknown_images:
img_full_path = '{}/{}'.format(unknown_images_dir, image)
print('Processing Image {}'.format(img_full_path))
while len(threading.enumerate()) > 10:
time.sleep(0.0001)
image_bytes = open(img_full_path, 'rb').read()
threading.Thread(target=predict_image, args=(q, sess, graph, image_bytes,
img_full_path, labels, input_operation, output_operation)).start()
print('Waiting For Threads to Finish...')
while q.qsize() < len(unknown_images):
time.sleep(0.001)
prediction_results = [q.get() for x in range(q.qsize())]
for prediction in prediction_results:
print('TensorFlow Predicted {img_full_path} is a {prediction} with {percent:.2%} Accuracy'.format(
**prediction))
if __name__ == "__main__":
main()
| 32.934066
| 109
| 0.694361
|
dcdb5614cb7f3ac73b319cb7d65590d7edcff32f
| 32,427
|
py
|
Python
|
rainforest/database/retrieve_radar_data.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | 3
|
2020-03-03T19:58:02.000Z
|
2021-11-02T08:22:22.000Z
|
rainforest/database/retrieve_radar_data.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | null | null | null |
rainforest/database/retrieve_radar_data.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | 5
|
2020-03-25T15:25:25.000Z
|
2021-06-11T22:15:58.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Main routine for retrieving radar data
This is meant to be run as a command line command from a slurm script
i.e. ./retrieve_radar_data -t <task_file_name> -c <config_file_name>
- o <output_folder>
IMPORTANT: this function is called by the main routine in database.py
so you should never have to call it manually
"""
import numpy as np
import pandas as pd
import datetime
import logging
import gc
logging.basicConfig(level=logging.INFO)
import os
from collections import OrderedDict
from optparse import OptionParser
from rainforest.common import constants
from rainforest.common.lookup import get_lookup
from rainforest.common.utils import split_by_time, read_task_file, envyaml
from rainforest.common.utils import aggregate_multi, nested_dict_values
from rainforest.common.radarprocessing import Radar, hydroClass_single
from rainforest.common.retrieve_data import retrieve_prod, get_COSMO_T, get_COSMO_variables
IGNORE_ERRORS = True
class Updater(object):
def __init__(self, task_file, config_file, output_folder):
"""
Creates an Updater class instance that allows to add new radar data
to the database
Parameters
----------
task_file : str
The full path to a task file, i.e. a file with the following format
timestamp, station1, station2, station3...stationN
These files are generated by the database.py module so normally you
shouldn't have to create them yourself
config_file : str
The full path of a configuration file written in yaml format
that indicates how the radar retrieval must be done
output_folder: str
The full path where the generated files will be stored
"""
self.config = envyaml(config_file)
self.tasks = read_task_file(task_file)
self.output_folder = output_folder
# These are parameters that are used in many subfunctions
self.radar_cfg = self.config['RADAR_RETRIEVAL']
self.radars = self.radar_cfg['RADARS']
self.radar_variables = self.radar_cfg['RADAR_VARIABLES']
self.radar_variables.append('TCOUNT')
self.cosmo_variables = self.radar_cfg['COSMO_VARIABLES']
self.other_variables = self.radar_cfg['OTHER_VARIABLES']
self.agg_methods = self.radar_cfg['AGGREGATION_METHODS']
self.neighb_x = self.radar_cfg['NEIGHBOURS_X']
self.neighb_y = self.radar_cfg['NEIGHBOURS_Y']
self.sweeps = self.radar_cfg['SWEEPS']
self.dims = {'nr':len(self.radars),
'nc':len(self.cosmo_variables),
'nrv':len(self.radar_variables),
'nnx':len(self.neighb_x),
'nny':len(self.neighb_y),
'no':len(self.other_variables),
'nm':len(self.agg_methods),
'ns':len(self.sweeps)}
self.lut = {'coords':{}, 'heights':{}, 'visib': {}}
for r in self.radars:
coords, _, heights = get_lookup('station_to_rad', radar = r)
self.lut['coords'][r], self.lut['heights'][r] = coords, heights
self.lut['visib'][r] = get_lookup('visibility_rad', radar = r)
if 'HYDRO' in self.radar_variables:
# Note that hydro is treated a bit differently as it is computed
# after aggregation to save time
self.dims['nrv'] -= 1
def retrieve_radar_files(self, radar, start_time, end_time,
include_vpr = True, include_status = True):
"""
Retrieves a set of radar files for a given time range
Parameters
----------
radar : char
The name of the radar, i.e either 'A','D','L','P','W'
start_time : datetime.datetime instance
starting time of the time range
end_time : datetime.datetime instance
end time of the time range
include_vpr : bool (optional)
Whether or not to also include VPR files
include_status : bool (optional)
Whether or not to also include status files
"""
sweeps = self.config['RADAR_RETRIEVAL']['SWEEPS']
files_rad = {}
files_rad['radar'] = {}
if include_vpr:
files_rad['vpr'] = {}
try:
files_r = retrieve_prod(self.config['TMP_FOLDER'],
start_time, end_time,
product_name = 'ML' + radar,
sweeps = sweeps)
files_rad['radar'] = files_r
if include_vpr:
if radar in ['L','A','D']:
radar_vpr = radar
else:
radar_vpr = 'A'
# Take only one out of two since we work at 5 min
files_v = retrieve_prod(self.config['TMP_FOLDER'],
start_time, end_time,
product_name = 'ZZ' + radar_vpr)
files_rad['vpr'] = files_v[::2]
if include_status:
files_s = retrieve_prod(self.config['TMP_FOLDER'],
start_time, end_time,
product_name = 'ST' + radar,
pattern = 'ST*')
files_rad['status'] = files_s
files_rad = split_by_time(files_rad)
except:
raise
logging.error("""Retrieval for radar {:s} at timesteps {:s}-{:s}
failed""".format(radar, str(start_time), str(end_time)))
return files_rad
def process_single_timestep(self, list_stations, radar_object, tidx):
"""
Processes a single 5 min timestep for a set of stations
Parameters
----------
list_stations : list of str
Names of all SMN or pluvio stations for which to retrieve the radar
data
radar_object : Radar object instance as defined in common.radarprocessing
a radar object which contains all radar variables in polar format
tidx : int
indicates if a radar 5 min timestep is the first or the second
in the corresponding 10 min gauge period, 1 = first, 2 = second
"""
# Some global parameters
radar = radar_object.radname
radsweeps = radar_object.radsweeps
lut_coords = self.lut['coords'][radar]
lut_heights = self.lut['heights'][radar]
# For every timestep, we have this many columns per sweep
ncols_by_sweep = (self.dims['no'] + self.dims['nc']
+ (self.dims['nnx'] * self.dims['nny'] * self.dims['nrv'])
* self.dims['nm'])
# Initialize output
N, M = len(list_stations), ncols_by_sweep * len(self.sweeps)
all_data = np.zeros((N,M), dtype = np.float32) + np.nan
#################
# Check if no processing is required, i.e. if no ZH at any station
valid_data = False
for sweep in radar_object.sweeps:
ZH = radsweeps[sweep]
for j, sta in enumerate(list_stations):
if sta not in lut_coords.keys():
# Station not visible from given radar for that sweep
continue
if sweep not in lut_coords[sta].keys():
# Station not visible from given radar for that sweep
continue
for x in self.neighb_x:
for y in self.neighb_y:
strneighb = str(x)+str(y)
if strneighb not in lut_coords[sta][sweep].keys():
continue
if not len(lut_coords[sta][sweep][strneighb]):
continue
idx = lut_coords[sta][sweep][strneighb]
if ZH.get_field(0,'ZH')[idx[:,0], idx[:,1]].count() > 0:
valid_data = True # al least one valid data
#################
if not valid_data:
logging.info('No need to process radar {:s} no measurement above stations...'.format(radar))
return all_data
# Censor file for SNR and visib, except for the visib field, which is kept as is
if 'ZH_VISIB' in self.radar_variables or 'ZV_VISIB' in self.radar_variables :
radar_object.visib_mask(self.radar_cfg['VISIB_CORR']['MIN_VISIB'],
self.radar_cfg['VISIB_CORR']['MAX_CORR'])
radar_object.snr_mask(self.radar_cfg['SNR_THRESHOLD'])
# Compute KDP if needed
if 'KDP' in self.radar_variables:
radar_object.compute_kdp(self.radar_cfg['KDP_PARAMETERS'])
# Compute attenuation correction if needed
if 'ZH_CORR' in self.radar_variables or 'ZDR_CORR' in self.radar_variables:
radar_object.correct_attenuation()
for sweep in radar_object.sweeps:
# i is an index going from 0 to number of sweeps in file
# sweep is the actual sweep number, anything from 1 to 20visibility_rad
logging.info('Sweep = ' + str(sweep))
for j, sta in enumerate(list_stations):
idx0_col = (sweep-1) * ncols_by_sweep
if sta not in lut_coords.keys():
# Station not visible from given radar for all sweeps
continue
if sweep not in lut_coords[sta].keys():
# Station not visible from given radar for that sweep
continue
try:
if 'HEIGHT' in self.other_variables:
height = lut_heights[sta][sweep]
all_data[j, idx0_col] = height
idx0_col += 1
if 'VPR' in self.other_variables:
all_data[j,idx0_col] = float(radar_object.vpr(height))
idx0_col += 1
if 'RADPRECIP' in self.other_variables:
# Get wet radome from status file
try:
wetradome = (radar_object.status['status']['sweep']
[-1]['RADAR']['STAT']['WET_RADOME'])
if wetradome == None:
radprecip = 0
else:
radprecip = float(wetradome['wetradome_mmh']['@value'])
except:
radprecip = np.nan
all_data[j,idx0_col] = radprecip
idx0_col += 1
# COSMO data
idx = lut_coords[sta][sweep]['00']
tmp = _data_at_station(radsweeps[sweep],
self.cosmo_variables,
idx)
all_data[j, idx0_col : idx0_col + self.dims['nc']] = tmp
idx0_col += self.dims['nc']
for x in self.neighb_x:
for y in self.neighb_y:
strneighb = str(x)+str(y)
if strneighb not in lut_coords[sta][sweep].keys():
continue
if not len(lut_coords[sta][sweep][strneighb]):
continue
idx = lut_coords[sta][sweep][strneighb]
# Note that we need to use i and not sweep
# in get_field from pyart, because pyart
# does not know anything about the missing sweeps!
tmp = _data_at_station(radsweeps[sweep],
self.radar_variables,
idx,
methods = self.agg_methods,
tidx = tidx)
all_data[j, idx0_col: idx0_col + len(tmp)] = tmp
idx0_col += len(tmp)
except Exception as e:
logging.error(e)
logging.info('Ignoring exception...')
if IGNORE_ERRORS:
pass # can fail if only missing data
else:
raise
return all_data
def process_all_timesteps(self):
"""
Processes all timesteps that are in the task file
"""
if 'HYDRO' in self.radar_variables:
# Hydrometeor class is computed in a bit different way, only
# after spatial and temporal aggregation
compute_hydro = True
self.radar_variables.remove('HYDRO')
else:
compute_hydro = False
if 'VPR' in self.other_variables:
include_vpr = True
else:
include_vpr = False
if ('PRECIPRAD' in self.other_variables or
'NH' in self.radar_variables or
'NV' in self.radar_variables):
include_status = True
else:
include_status = False
# COSMO retrieval for T only is much faster...
if self.cosmo_variables == ['T']:
only_cosmo_T = True
else:
only_cosmo_T = False
current_hour = None # Initialize current cour
colnames = None # Initialize column nates
# Create list of aggregation methods to use for aggregation in time 10 min
# for every radar
temp_agg_op = self.get_agg_operators()
all_timesteps = list(self.tasks.keys())
all_data_daily = []
for i, tstep in enumerate(all_timesteps):
logging.info('Processing timestep '+str(tstep))
# Works at 10 min resolution
# retrieve radar data
tstart = datetime.datetime.utcfromtimestamp(float(tstep))
# Using six minutes ensures we include also the timesteps 5 min before
tend = tstart + datetime.timedelta(minutes = 5)
tstep_end = tstep + 10 * 60
stations_to_get = self.tasks[tstep]
hour_of_year = datetime.datetime.strftime(tstart,'%Y%m%d%H')
day_of_year = hour_of_year[0:-2]
if i == 0:
current_day = day_of_year
logging.info('---')
if day_of_year != current_day or i == len(all_timesteps) - 1:
logging.info('Saving new table for day {:s}'.format(str(current_day)))
name = self.output_folder + current_day + '.parquet'
try:
# Save data to file if end of loop or new day
# Store data in new file
data = np.array(all_data_daily)
dic = OrderedDict()
for c, col in enumerate(colnames):
data_col = data[:,c]
# Check required column type
isin_listcols = [c == col.split('_')[0] for
c in constants.COL_TYPES.keys()]
if any(isin_listcols):
idx = np.where(isin_listcols)[0][0]
coltype = list(constants.COL_TYPES.values())[idx]
try:
data_col = data_col.astype(coltype)
except:# for int
data_col = data_col.astype(np.float).astype(coltype)
else:
data_col = data_col.astype(np.float32)
dic[col] = data_col
df = pd.DataFrame(dic)
# Remove duplicate rows
idx = 0
for m in self.agg_methods:
if idx == 0:
df['TCOUNT'] = df['TCOUNT_' + m]
del df['TCOUNT_' + m]
logging.info('Saving file ' + name)
df.to_parquet(name, compression = 'gzip', index = False)
except Exception as e:
logging.info('Could not save file ' + name)
logging.error(e)
if IGNORE_ERRORS:
pass # can fail if only missing data
else:
raise
# Reset list
all_data_daily = []
# Reset day counter
current_day = day_of_year
if len(self.cosmo_variables):
if hour_of_year != current_hour:
current_hour = hour_of_year
try:
if only_cosmo_T :
cosmo_data = get_COSMO_T(tstart, self.sweeps)
else:
cosmo_data = get_COSMO_variables(tstart,
self.cosmo_variables,
self.sweeps,
tmp_folder = self.config['TMP_FOLDER'])
except Exception as e:
logging.error(e)
logging.info('Ignoring exception...')
if IGNORE_ERRORS:
pass # can fail if only missing data
else:
raise
else:
cosmo_data = None
data_one_tstep = np.empty((len(stations_to_get),0),
dtype = np.float32)
for r in self.radars: # Main loop
# Check if we need to process the radar
# If no station we want is in the list of stations seen by radar
visible_stations = list(self.lut['coords']['A'].keys())
if not np.any(np.isin(stations_to_get, visible_stations)):
logging.info('No need to process radar {:s} for these stations...'.format(r))
continue
logging.info('Processing radar ' + r)
try:
data_one_rad = []
rad_files = self.retrieve_radar_files(r, tstart, tend,
include_vpr,
include_status)
for tidx, tstamp in enumerate(rad_files['radar'].keys()): # 2 timesteps to make 10 min
# Create radar object
radar = Radar(r, rad_files['radar'][tstamp],
rad_files['status'][tstamp],
rad_files['vpr'][tstamp])
if len(self.cosmo_variables):
radar.add_cosmo_data(cosmo_data[r])
tmp = self.process_single_timestep(stations_to_get,
radar, tidx + 1)
data_one_rad.append(tmp)
del radar
gc.collect()
# Now we aggregate in time over two periods of 5 min
# and add it in column direction
data_one_tstep = np.append(data_one_tstep,
aggregate_multi(np.array(data_one_rad),
temp_agg_op),
axis = 1)
except Exception as e:
logging.error(e)
logging.error("""Data retrieval for radar {:s} and timestep
{:s} failed, assigning missing data
""".format(r, str(tstep)))
empty = np.zeros((len(stations_to_get),
len(temp_agg_op)),
dtype = np.float32) + np.nan
data_one_tstep = np.append(data_one_tstep, empty, axis = 1)
if IGNORE_ERRORS:
pass # can fail if only missing data
else:
raise
# cleanup
try:
all_files = nested_dict_values(rad_files)
for files in all_files:
if os.path.exists(files):
os.remove(files)
except:
logging.error('Cleanup of radar data failed')
raise
try:
data_remapped, colnames = self._remap(data_one_tstep, tstep_end,
stations_to_get,
compute_hydro)
all_data_daily.extend(data_remapped)
del data_remapped
except Exception as e:
logging.error(e)
logging.info('Ignoring exception...')
if IGNORE_ERRORS:
pass # can fail if only missing data
else:
raise
del data_one_tstep
gc.collect()
def _remap(self, data, tstep, stations, compute_hydro = True):
'''
Remaps data from a format where all data from all sweeps and neighbours
are in the same row to a format where every sweep is on a different row
Original format
|sweep 1|,|sweep 2|,|sweep 3|,...|sweep 20|
where |...| = |OTHER_VARIABLES_SWEEPX, COSMO_VARIABLES_SWEEPX, RADAR_VARIABLES_SWEEPX]
Output format
a shape with one sweep, one neighbour per row
TSTEP, STATION, SWEEP1, NX, NY, OTHER_VARIABLES_SWEEP1, COSMO_VARIABLES_SWEEP1, RADAR_VARIABLES_SWEEP1
TSTEP, SSTATION, WEEP2, NX, NY, OTHER_VARIABLES_SWEEP2, COSMO_VARIABLES_SWEEP2, RADAR_VARIABLES_SWEEP2
...
TSTEP, STATION, SWEEP20, NX, NY, OTHER_VARIABLES_SWEEP20, COSMO_VARIABLES_SWEEP20, RADAR_VARIABLES_SWEEP20
Note that the timestep and station information are also added to the
data
Parameters
----------
data : 2D numpy array
data in original format with all sweeps and neighbours on one row
tstep : str
timestamp in str format
stations : list of str
list of all stations must have the same length as the data
compute_hydro (optional):
whether or not to compute the hydrometeor classification and add it
to the data
'''
logging.info('Remapping to tabular format')
rearranged = []
for ii, row in enumerate(data): # Loop on stations
cur_idx = 0
for i in range(self.dims['nr']): # Loop on radar
for j in range(self.dims['ns']): # Loop on sweeps
idx_sweep_start = cur_idx # idx of the beginning of the sweep
cur_idx += self.dims['nc'] + self.dims['no'] # Current idx in sweep
for k in range(self.dims['nnx']):
for l in range(self.dims['nny']):
dslice = []
# Add to each row COSMO and OTHER vars from nx = ny = 0
dslice.extend(row[idx_sweep_start:
idx_sweep_start + self.dims['nc'] + self.dims['no']])
# and radar variables from nx = k, ny = l
dslice.extend(row[cur_idx: cur_idx + self.dims['nrv'] *
self.dims['nm']])
dslice = np.array(dslice).astype(float)
if not np.any(np.isnan(dslice)) :
# Add constant info (timestamp, radars, sweep,
# nx, ny)
toAdd = [tstep,stations[ii]]
toAdd.extend([self.radars[i],self.sweeps[j],
self.neighb_x[k],
self.neighb_y[l]])
toAdd.extend(dslice)
rearranged.append(toAdd)
# Update index
cur_idx += self.dims['nrv'] * self.dims['nm']
cols = ['TIMESTAMP','STATION','RADAR','SWEEP','NX','NY']
cols.extend(self.other_variables)
cols.extend(self.cosmo_variables)
for r in self.radar_variables:
for m in self.agg_methods:
cols.extend([r + '_' + m])
rearranged = np.array(rearranged)
if len(rearranged.shape) == 1:
# If rearranged has only one line, expand to 2D
rearranged = np.expand_dims(rearranged, axis = 0)
if compute_hydro:
logging.info('Computing hydrometeor classif')
try:
for m in self.agg_methods:
zh_idx = cols.index('ZH_'+m)
zdr_idx = cols.index('ZDR_'+m)
kdp_idx = cols.index('KDP_'+m)
rhohv_idx = cols.index('RHOHV_'+m)
T_idx = cols.index('T')
hydro = hydroClass_single(rearranged[:,2], # radar
rearranged[:,zh_idx].astype(float),
rearranged[:,zdr_idx].astype(float),
rearranged[:,kdp_idx].astype(float),
rearranged[:,rhohv_idx].astype(float),
rearranged[:,T_idx].astype(float))
rearranged = np.column_stack((rearranged, hydro))
cols.append('HYDRO_'+m)
except:
logging.error("""Could not compute hydrometeor classes, make
sure that the variables ZH, ZDR, KDP, RHOHV and
T (COSMO temp) are specified in the config file
""")
raise # it will be caught later on
return rearranged, cols
def get_agg_operators(self):
'''
Returns all aggregation operators codes needed to aggregate all columns to
10 min resolution, 0 = mean, 1 = log mean
'''
operators = []
for o in self.other_variables:
if o in constants.AVG_BY_VAR:
operators.append(constants.AVG_BY_VAR[o])
else:
operators.append(0)
for c in self.cosmo_variables:
if c in constants.AVG_BY_VAR:
operators.append(constants.AVG_BY_VAR[c])
else:
operators.append(0)
operators_per_neighb = []
for n1 in self.neighb_x:
for n2 in self.neighb_y:
for r in self.radar_variables:
for m in self.agg_methods:
if r in constants.AVG_BY_VAR:
operators_per_neighb.append(constants.AVG_BY_VAR[r])
else:
operators_per_neighb.append(0)
operators.extend(operators_per_neighb)
operators = operators * len(self.sweeps)
return operators
def _data_at_station(radar_object, variables, idx, methods = ['mean'], tidx = None):
'''
Gets polar data at the location of a station, using the indexes of the
lookup table
Parameters
----------
radar_object : Radar object instance as defined in common.radarprocessing
a radar object which contains all radar variables in polar format
variables : list of str
list of all variables to get
idx : list
list of all polar indexes that correspond to the station
methods (optional):
which methods to use to aggregate polar data over the Cartesian
pixel, available methods are 'mean', 'max', 'min'
tidx : int
indicates if a radar 5 min timestep is the first or the second
in the corresponding 10 min gauge period, 1 = first, 2 = second
'''
out = []
if 'max' in methods or 'min' in methods or 'tcount' in variables:
kdp = radar_object.get_field(0, 'KDP')[idx[:,0],idx[:,1]]
zh = radar_object.get_field(0, 'ZH')[idx[:,0],idx[:,1]]
locmaxzh = np.ma.argmax(zh)
locminzh = np.ma.argmin(zh)
locmaxkdp = np.ma.argmax(kdp)
locminkdp = np.ma.argmin(kdp)
for v in variables:
if v == 'HYDRO':
continue # skip hydro is computed only after aggregation
if v == 'TCOUNT':
for m in methods:
out.append(int(tidx * (zh.count() > 0)))
else:
data = np.ma.filled(radar_object.get_field(0,v)[idx[:,0],idx[:,1]],
fill_value = np.nan)
for m in methods:
if m == 'mean':
if v in constants.AVG_BY_VAR:
avg_method = constants.AVG_METHODS[constants.AVG_BY_VAR[v]]
else:
avg_method = constants.AVG_METHODS[0]
tmp = avg_method(data, axis = None)
out.append(float(tmp))
if m == 'max':
if v == 'KDP':
out.append(float(data[locmaxkdp]))
else:
out.append(float(data[locmaxzh]))
if m == 'min':
if v == 'KDP':
out.append(float(data[locminkdp]))
else:
out.append(float(data[locminzh]))
return out
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--configfile", dest = "config_file",
help="Specify the user configuration file to use",
metavar="CONFIG")
parser.add_option("-t", "--taskfile", dest = "task_file", default = None,
help="Specify the task file to process", metavar="TASK")
parser.add_option("-o", "--output", dest = "output_folder", default = '/tmp/',
help="Specify the output directory", metavar="FOLDER")
(options, args) = parser.parse_args()
u = Updater(options.task_file, options.config_file, options.output_folder)
u.process_all_timesteps()
| 42.949669
| 114
| 0.4759
|
28c7404485aca8f6cb316e8f946f730ae18c8a32
| 2,910
|
py
|
Python
|
tests/snuba/eventstream/test_eventstream.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 2
|
2019-03-04T12:45:54.000Z
|
2019-03-04T12:45:55.000Z
|
tests/snuba/eventstream/test_eventstream.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 196
|
2019-06-10T08:34:10.000Z
|
2022-02-22T01:26:13.000Z
|
tests/snuba/eventstream/test_eventstream.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from datetime import datetime, timedelta
import six
import time
import logging
from mock import patch, Mock
from sentry.event_manager import EventManager
from sentry.eventstream.kafka import KafkaEventStream
from sentry.eventstream.snuba import SnubaEventStream
from sentry.testutils import SnubaTestCase, TestCase
from sentry.utils import snuba, json
class SnubaEventStreamTest(TestCase, SnubaTestCase):
def setUp(self):
super(SnubaEventStreamTest, self).setUp()
self.kafka_eventstream = KafkaEventStream()
self.kafka_eventstream.producer = Mock()
@patch('sentry.eventstream.insert')
@patch('sentry.tagstore.delay_index_event_tags')
def test(self, mock_delay_index_event_tags, mock_eventstream_insert):
now = datetime.utcnow()
def _get_event_count():
return snuba.query(
start=now - timedelta(days=1),
end=now + timedelta(days=1),
groupby=['project_id'],
filter_keys={'project_id': [self.project.id]},
).get(self.project.id, 0)
assert _get_event_count() == 0
raw_event = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': time.mktime(now.timetuple()),
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
manager = EventManager(raw_event)
manager.normalize()
event = manager.save(self.project.id)
# verify eventstream was called by EventManager
insert_args, insert_kwargs = list(mock_eventstream_insert.call_args)
assert not insert_args
assert insert_kwargs == {
'event': event,
'group': event.group,
'is_new_group_environment': True,
'is_new': True,
'is_regression': False,
'is_sample': False,
'primary_hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
'skip_consume': False
}
assert mock_delay_index_event_tags.call_count == 1
# pass arguments on to Kafka EventManager
self.kafka_eventstream.insert(*insert_args, **insert_kwargs)
produce_args, produce_kwargs = list(self.kafka_eventstream.producer.produce.call_args)
assert not produce_args
assert produce_kwargs['topic'] == 'events'
assert produce_kwargs['key'] == six.text_type(self.project.id)
version, type_, payload1, payload2 = json.loads(produce_kwargs['value'])
assert version == 2
assert type_ == 'insert'
# insert what would have been the Kafka payload directly
# into Snuba, expect an HTTP 200 and for the event to now exist
snuba_eventstream = SnubaEventStream()
snuba_eventstream._send(self.project.id, 'insert', (payload1, payload2))
assert _get_event_count() == 1
| 34.642857
| 94
| 0.644674
|
e73fad439f0fb0b926d12c3dfd6f25632d777d30
| 9,830
|
py
|
Python
|
test/functional/p2p_blockfilters.py
|
apokalyzr/bitcoin
|
5b4b8f76f3ae11064d4aa3ac157558e364751fd2
|
[
"MIT"
] | 65,371
|
2015-01-01T06:52:02.000Z
|
2022-03-31T23:29:23.000Z
|
test/functional/p2p_blockfilters.py
|
apokalyzr/bitcoin
|
5b4b8f76f3ae11064d4aa3ac157558e364751fd2
|
[
"MIT"
] | 18,259
|
2015-01-01T03:22:57.000Z
|
2022-03-31T23:18:25.000Z
|
test/functional/p2p_blockfilters.py
|
apokalyzr/bitcoin
|
5b4b8f76f3ae11064d4aa3ac157558e364751fd2
|
[
"MIT"
] | 28,827
|
2015-01-01T00:25:45.000Z
|
2022-03-31T23:48:15.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_COMPACT_FILTERS (BIP 157/158).
Tests that a node configured with -blockfilterindex and -peerblockfilters signals
NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts.
"""
from test_framework.messages import (
FILTER_TYPE_BASIC,
NODE_COMPACT_FILTERS,
hash256,
msg_getcfcheckpt,
msg_getcfheaders,
msg_getcfilters,
ser_uint256,
uint256_from_str,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class FiltersClient(P2PInterface):
def __init__(self):
super().__init__()
# Store the cfilters received.
self.cfilters = []
def pop_cfilters(self):
cfilters = self.cfilters
self.cfilters = []
return cfilters
def on_cfilter(self, message):
"""Store cfilters received in a list."""
self.cfilters.append(message)
class CompactFiltersTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.rpc_timeout = 480
self.num_nodes = 2
self.extra_args = [
["-blockfilterindex", "-peerblockfilters"],
["-blockfilterindex"],
]
def run_test(self):
# Node 0 supports COMPACT_FILTERS, node 1 does not.
peer_0 = self.nodes[0].add_p2p_connection(FiltersClient())
peer_1 = self.nodes[1].add_p2p_connection(FiltersClient())
# Nodes 0 & 1 share the same first 999 blocks in the chain.
self.generate(self.nodes[0], 999)
# Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting
self.disconnect_nodes(0, 1)
stale_block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0]
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getblockcount(), 1000)
self.generate(self.nodes[1], 1001, sync_fun=self.no_op)
assert_equal(self.nodes[1].getblockcount(), 2000)
# Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
assert peer_0.nServices & NODE_COMPACT_FILTERS != 0
assert peer_1.nServices & NODE_COMPACT_FILTERS == 0
# Check that the localservices is as expected.
assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0
assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0
self.log.info("get cfcheckpt on chain to be re-orged out.")
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(stale_block_hash, 16),
)
peer_0.send_and_ping(message=request)
response = peer_0.last_message['cfcheckpt']
assert_equal(response.filter_type, request.filter_type)
assert_equal(response.stop_hash, request.stop_hash)
assert_equal(len(response.headers), 1)
self.log.info("Reorg node 0 to a new chain.")
self.connect_nodes(0, 1)
self.sync_blocks(timeout=600)
self.nodes[0].syncwithvalidationinterfacequeue()
main_block_hash = self.nodes[0].getblockhash(1000)
assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"
self.log.info("Check that peers can fetch cfcheckpt on active chain.")
tip_hash = self.nodes[0].getbestblockhash()
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(tip_hash, 16),
)
peer_0.send_and_ping(request)
response = peer_0.last_message['cfcheckpt']
assert_equal(response.filter_type, request.filter_type)
assert_equal(response.stop_hash, request.stop_hash)
main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header']
tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header']
assert_equal(
response.headers,
[int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)],
)
self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(stale_block_hash, 16),
)
peer_0.send_and_ping(request)
response = peer_0.last_message['cfcheckpt']
stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header']
assert_equal(
response.headers,
[int(header, 16) for header in (stale_cfcheckpt, )],
)
self.log.info("Check that peers can fetch cfheaders on active chain.")
request = msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(main_block_hash, 16),
)
peer_0.send_and_ping(request)
response = peer_0.last_message['cfheaders']
main_cfhashes = response.hashes
assert_equal(len(main_cfhashes), 1000)
assert_equal(
compute_last_header(response.prev_header, response.hashes),
int(main_cfcheckpt, 16),
)
self.log.info("Check that peers can fetch cfheaders on stale chain.")
request = msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(stale_block_hash, 16),
)
peer_0.send_and_ping(request)
response = peer_0.last_message['cfheaders']
stale_cfhashes = response.hashes
assert_equal(len(stale_cfhashes), 1000)
assert_equal(
compute_last_header(response.prev_header, response.hashes),
int(stale_cfcheckpt, 16),
)
self.log.info("Check that peers can fetch cfilters.")
stop_hash = self.nodes[0].getblockhash(10)
request = msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(stop_hash, 16),
)
peer_0.send_and_ping(request)
response = peer_0.pop_cfilters()
assert_equal(len(response), 10)
self.log.info("Check that cfilter responses are correct.")
for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)):
block_hash = self.nodes[0].getblockhash(height)
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
assert_equal(cfilter.block_hash, int(block_hash, 16))
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
assert_equal(computed_cfhash, cfhash)
self.log.info("Check that peers can fetch cfilters for stale blocks.")
request = msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(stale_block_hash, 16),
)
peer_0.send_and_ping(request)
response = peer_0.pop_cfilters()
assert_equal(len(response), 1)
cfilter = response[0]
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
assert_equal(computed_cfhash, stale_cfhashes[999])
self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.")
requests = [
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(main_block_hash, 16),
),
msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(main_block_hash, 16),
),
msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(main_block_hash, 16),
),
]
for request in requests:
peer_1 = self.nodes[1].add_p2p_connection(P2PInterface())
peer_1.send_message(request)
peer_1.wait_for_disconnect()
self.log.info("Check that invalid requests result in disconnection.")
requests = [
# Requesting too many filters results in disconnection.
msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=0,
stop_hash=int(main_block_hash, 16),
),
# Requesting too many filter headers results in disconnection.
msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=0,
stop_hash=int(tip_hash, 16),
),
# Requesting unknown filter type results in disconnection.
msg_getcfcheckpt(
filter_type=255,
stop_hash=int(main_block_hash, 16),
),
# Requesting unknown hash results in disconnection.
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=123456789,
),
]
for request in requests:
peer_0 = self.nodes[0].add_p2p_connection(P2PInterface())
peer_0.send_message(request)
peer_0.wait_for_disconnect()
def compute_last_header(prev_header, hashes):
"""Compute the last filter header from a starting header and a sequence of filter hashes."""
header = ser_uint256(prev_header)
for filter_hash in hashes:
header = hash256(ser_uint256(filter_hash) + header)
return uint256_from_str(header)
if __name__ == '__main__':
CompactFiltersTest().main()
| 38.100775
| 99
| 0.643133
|
700711c55e04e147ef552d2de2c20b30f445e707
| 30,527
|
py
|
Python
|
var/spack/repos/builtin/packages/openmpi/package.py
|
greschd/spack
|
ce8cf5d9b037079ae3fd77fcaf84cf2eafbc9fbd
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
var/spack/repos/builtin/packages/openmpi/package.py
|
bbbbpage/spack
|
bc145b2b3f87da8276e4c2e0665e9830de3d0322
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openmpi/package.py
|
bbbbpage/spack
|
bc145b2b3f87da8276e4c2e0665e9830de3d0322
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
import llnl.util.tty as tty
def _verbs_dir():
"""Try to find the directory where the OpenFabrics verbs package is
installed. Return None if not found.
"""
try:
# Try to locate Verbs by looking for a utility in the path
ibv_devices = which("ibv_devices")
# Run it (silently) to ensure it works
ibv_devices(output=str, error=str)
# Get path to executable
path = ibv_devices.exe[0]
# Remove executable name and "bin" directory
path = os.path.dirname(path)
path = os.path.dirname(path)
# There's usually no "/include" on Unix; use "/usr/include" instead
if path == "/":
path = "/usr"
return path
except TypeError:
return None
except ProcessError:
return None
def _mxm_dir():
"""Look for default directory where the Mellanox package is
installed. Return None if not found.
"""
# Only using default directory; make this more flexible in the future
path = "/opt/mellanox/mxm"
if os.path.isdir(path):
return path
else:
return None
def _tm_dir():
"""Look for default directory where the PBS/TM package is
installed. Return None if not found.
"""
# /opt/pbs from PBS 18+; make this more flexible in the future
paths_list = ("/opt/pbs", )
for path in paths_list:
if os.path.isdir(path) and os.path.isfile(path + "/include/tm.h"):
return path
return None
class Openmpi(AutotoolsPackage):
"""An open source Message Passing Interface implementation.
The Open MPI Project is an open source Message Passing Interface
implementation that is developed and maintained by a consortium
of academic, research, and industry partners. Open MPI is
therefore able to combine the expertise, technologies, and
resources from all across the High Performance Computing
community in order to build the best MPI library available.
Open MPI offers advantages for system and software vendors,
application developers and computer science researchers.
"""
homepage = "http://www.open-mpi.org"
url = "https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.0.tar.bz2"
list_url = "http://www.open-mpi.org/software/ompi/"
git = "https://github.com/open-mpi/ompi.git"
maintainers = ['hppritcha']
version('master', branch='master')
# Current
version('4.0.3', sha256='1402feced8c3847b3ab8252165b90f7d1fa28c23b6b2ca4632b6e4971267fd03') # libmpi.so.40.20.3
# Still supported
version('4.0.2', sha256='900bf751be72eccf06de9d186f7b1c4b5c2fa9fa66458e53b77778dffdfe4057') # libmpi.so.40.20.2
version('4.0.1', sha256='cce7b6d20522849301727f81282201d609553103ac0b09162cf28d102efb9709') # libmpi.so.40.20.1
version('4.0.0', sha256='2f0b8a36cfeb7354b45dda3c5425ef8393c9b04115570b615213faaa3f97366b') # libmpi.so.40.20.0
version('3.1.6', preferred=True, sha256='50131d982ec2a516564d74d5616383178361c2f08fdd7d1202b80bdf66a0d279') # libmpi.so.40.10.4
version('3.1.5', sha256='fbf0075b4579685eec8d56d34d4d9c963e6667825548554f5bf308610af72133') # libmpi.so.40.10.4
version('3.1.4', sha256='17a69e0054db530c7dc119f75bd07d079efa147cf94bf27e590905864fe379d6') # libmpi.so.40.10.4
version('3.1.3', sha256='8be04307c00f51401d3fb9d837321781ea7c79f2a5a4a2e5d4eaedc874087ab6') # libmpi.so.40.10.3
version('3.1.2', sha256='c654ed847f34a278c52a15c98add40402b4a90f0c540779f1ae6c489af8a76c5') # libmpi.so.40.10.2
version('3.1.1', sha256='3f11b648dd18a8b878d057e9777f2c43bf78297751ad77ae2cef6db0fe80c77c') # libmpi.so.40.10.1
version('3.1.0', sha256='b25c044124cc859c0b4e6e825574f9439a51683af1950f6acda1951f5ccdf06c') # libmpi.so.40.10.0
version('3.0.5', sha256='f8976b95f305efc435aa70f906b82d50e335e34cffdbf5d78118a507b1c6efe8') # libmpi.so.40.00.5
version('3.0.4', sha256='2ff4db1d3e1860785295ab95b03a2c0f23420cda7c1ae845c419401508a3c7b5') # libmpi.so.40.00.5
version('3.0.3', sha256='fb228e42893fe6a912841a94cd8a0c06c517701ae505b73072409218a12cf066') # libmpi.so.40.00.4
version('3.0.2', sha256='d2eea2af48c1076c53cabac0a1f12272d7470729c4e1cb8b9c2ccd1985b2fb06') # libmpi.so.40.00.2
version('3.0.1', sha256='663450d1ee7838b03644507e8a76edfb1fba23e601e9e0b5b2a738e54acd785d') # libmpi.so.40.00.1
version('3.0.0', sha256='f699bff21db0125d8cccfe79518b77641cd83628725a1e1ed3e45633496a82d7') # libmpi.so.40.00.0
# Retired
version('2.1.6', sha256='98b8e1b8597bbec586a0da79fcd54a405388190247aa04d48e8c40944d4ca86e') # libmpi.so.20.10.3
version('2.1.5', sha256='b807ccab801f27c3159a5edf29051cd3331d3792648919f9c4cee48e987e7794') # libmpi.so.20.10.3
version('2.1.4', sha256='3e03695ca8bd663bc2d89eda343c92bb3d4fc79126b178f5ddcb68a8796b24e2') # libmpi.so.20.10.3
version('2.1.3', sha256='285b3e2a69ed670415524474496043ecc61498f2c63feb48575f8469354d79e8') # libmpi.so.20.10.2
version('2.1.2', sha256='3cc5804984c5329bdf88effc44f2971ed244a29b256e0011b8deda02178dd635') # libmpi.so.20.10.2
version('2.1.1', sha256='bd7badd4ff3afa448c0d7f3ca0ee6ce003b957e9954aa87d8e4435759b5e4d16') # libmpi.so.20.10.1
version('2.1.0', sha256='b169e15f5af81bf3572db764417670f508c0df37ce86ff50deb56bd3acb43957') # libmpi.so.20.10.0
version('2.0.4', sha256='4f82d5f7f294becbd737319f74801206b08378188a95b70abe706fdc77a0c20b') # libmpi.so.20.0.4
version('2.0.3', sha256='b52c0204c0e5954c9c57d383bb22b4181c09934f97783292927394d29f2a808a') # libmpi.so.20.0.3
version('2.0.2', sha256='cae396e643f9f91f0a795f8d8694adf7bacfb16f967c22fb39e9e28d477730d3') # libmpi.so.20.0.2
version('2.0.1', sha256='fed74f4ae619b7ebcc18150bb5bdb65e273e14a8c094e78a3fea0df59b9ff8ff') # libmpi.so.20.0.1
version('2.0.0', sha256='08b64cf8e3e5f50a50b4e5655f2b83b54653787bd549b72607d9312be44c18e0') # libmpi.so.20.0.0
version('1.10.7', sha256='a089ece151fec974905caa35b0a59039b227bdea4e7933069e94bee4ed0e5a90') # libmpi.so.12.0.7
version('1.10.6', sha256='65606184a084a0eda6102b01e5a36a8f02d3195d15e91eabbb63e898bd110354') # libmpi.so.12.0.6
version('1.10.5', sha256='a95fa355ed3a905c7c187bc452529a9578e2d6bae2559d8197544ab4227b759e') # libmpi.so.12.0.5
version('1.10.4', sha256='fb3c0c4c77896185013b6091b306d29ba592eb40d8395533da5c8bc300d922db') # libmpi.so.12.0.4
version('1.10.3', sha256='7484bb664312082fd12edc2445b42362089b53b17fb5fce12efd4fe452cc254d') # libmpi.so.12.0.3
version('1.10.2', sha256='8846e7e69a203db8f50af90fa037f0ba47e3f32e4c9ccdae2db22898fd4d1f59') # libmpi.so.12.0.2
version('1.10.1', sha256='7919ecde15962bab2e26d01d5f5f4ead6696bbcacb504b8560f2e3a152bfe492') # libmpi.so.12.0.1
version('1.10.0', sha256='26b432ce8dcbad250a9787402f2c999ecb6c25695b00c9c6ee05a306c78b6490') # libmpi.so.12.0.0
version('1.8.8', sha256='a28382d1e6a36f4073412dc00836ff2524e42b674da9caf6ca7377baad790b94') # libmpi.so.1.6.3
version('1.8.7', sha256='da629e9bd820a379cfafe15f842ee9b628d7451856085ccc23ee75ab3e1b48c7') # libmpi.so.1.6.2
version('1.8.6', sha256='b9fe3bdfb86bd42cc53448e17f11278531b989b05ff9513bc88ba1a523f14e87') # libmpi.so.1.6.1
version('1.8.5', sha256='4cea06a9eddfa718b09b8240d934b14ca71670c2dc6e6251a585ce948a93fbc4') # libmpi.so.1.6.0
version('1.8.4', sha256='23158d916e92c80e2924016b746a93913ba7fae9fff51bf68d5c2a0ae39a2f8a') # libmpi.so.1.6.0
version('1.8.3', sha256='2ef02dab61febeb74714ff80d508c00b05defc635b391ed2c8dcc1791fbc88b3') # libmpi.so.1.6.0
version('1.8.2', sha256='ab70770faf1bac15ef44301fe2186b02f857646545492dd7331404e364a7d131') # libmpi.so.1.5.2
version('1.8.1', sha256='171427ebc007943265f33265ec32e15e786763952e2bfa2eac95e3e192c1e18f') # libmpi.so.1.5.0
version('1.8', sha256='35d5db86f49c0c64573b2eaf6d51c94ed8a06a9bb23dda475e602288f05e4ecf') # libmpi.so.1.5.0
version('1.7.5', sha256='cb3eef6880537d341d5d098511d390ec853716a6ec94007c03a0d1491b2ac8f2') # libmpi.so.1.4.0
version('1.7.4', sha256='ff8e31046c5bacfc6202d67f2479731ccd8542cdd628583ae75874000975f45c') # libmpi.so.1.3.0
version('1.7.3', sha256='438d96c178dbf5a1bc92fa1d238a8225d87b64af26ce2a07789faaf312117e45') # libmpi.so.1.2.0
version('1.7.2', sha256='82a1c477dcadad2032ab24d9be9e39c1042865965841911f072c49aa3544fd85') # libmpi.so.1.1.2
version('1.7.1', sha256='554583008fa34ecdfaca22e46917cc3457a69cba08c29ebbf53eef4f4b8be171') # libmpi.so.1.1.1
version('1.7', sha256='542e44aaef6546798c0d39c0fd849e9fbcd04a762f0ab100638499643253a513') # libmpi.so.1.1.0
version('1.6.5', sha256='fe37bab89b5ef234e0ac82dc798282c2ab08900bf564a1ec27239d3f1ad1fc85') # libmpi.so.1.0.8
version('1.6.4', sha256='40cb113a27d76e1e915897661579f413564c032dc6e703073e6a03faba8093fa') # libmpi.so.1.0.7
version('1.6.3', sha256='0c30cfec0e420870630fdc101ffd82f7eccc90276bc4e182f8282a2448668798') # libmpi.so.1.0.6
version('1.6.2', sha256='5cc7744c6cc4ec2c04bc76c8b12717c4011822a2bd7236f2ea511f09579a714a') # libmpi.so.1.0.3
version('1.6.1', sha256='077240dd1ab10f0caf26931e585db73848e9815c7119b993f91d269da5901e3a') # libmpi.so.1.0.3
version('1.6', sha256='6e0d8b336543fb9ab78c97d364484923167857d30b266dfde1ccf60f356b9e0e') # libmpi.so.1.0.3
# Ancient
version('1.5.5', sha256='660e6e49315185f88a87b6eae3d292b81774eab7b29a9b058b10eb35d892ff23') # libmpi.so.1.0.3
version('1.5.4', sha256='81126a95a51b8af4bb0ad28790f852c30d22d989713ec30ad22e9e0a79587ef6') # libmpi.so.1.0.2
version('1.5.3', sha256='70745806cdbe9b945d47d9fa058f99e072328e41e40c0ced6dd75220885c5263') # libmpi.so.1.0.1
version('1.5.2', sha256='7123b781a9fd21cc79870e7fe01e9c0d3f36935c444922661e24af523b116ab1') # libmpi.so.1.0.1
version('1.5.1', sha256='c28bb0bd367ceeec08f739d815988fca54fc4818762e6abcaa6cfedd6fd52274') # libmpi.so.1.0.0
version('1.5', sha256='1882b1414a94917ec26b3733bf59da6b6db82bf65b5affd7f0fcbd96efaca506') # libmpi.so.1.0.0
version('1.4.5', sha256='a3857bc69b7d5258cf7fc1ed1581d9ac69110f5c17976b949cb7ec789aae462d') # libmpi.so.0.0.4
version('1.4.4', sha256='9ad125304a89232d5b04da251f463fdbd8dcd997450084ba4227e7f7a095c3ed') # libmpi.so.0.0.3
version('1.4.3', sha256='220b72b1c7ee35469ff74b4cfdbec457158ac6894635143a33e9178aa3981015') # libmpi.so.0.0.2
version('1.4.2', sha256='19129e3d51860ad0a7497ede11563908ba99c76b3a51a4d0b8801f7e2db6cd80') # libmpi.so.0.0.2
version('1.4.1', sha256='d4d71d7c670d710d2d283ea60af50d6c315318a4c35ec576bedfd0f3b7b8c218') # libmpi.so.0.0.1
version('1.4', sha256='fa55edef1bd8af256e459d4d9782516c6998b9d4698eda097d5df33ae499858e') # libmpi.so.0.0.1
version('1.3.4', sha256='fbfe4b99b0c98f81a4d871d02f874f84ea66efcbb098f6ad84ebd19353b681fc') # libmpi.so.0.0.1
version('1.3.3', sha256='e1425853282da9237f5b41330207e54da1dc803a2e19a93dacc3eca1d083e422') # libmpi.so.0.0.0
version('1.3.2', sha256='c93ed90962d879a2923bed17171ed9217036ee1279ffab0925ea7eead26105d8') # libmpi.so.0.0.0
version('1.3.1', sha256='22d18919ddc5f49d55d7d63e2abfcdac34aa0234427e861e296a630c6c11632c') # libmpi.so.0.0.0
version('1.3', sha256='864706d88d28b586a045461a828962c108f5912671071bc3ef0ca187f115e47b') # libmpi.so.0.0.0
version('1.2.9', sha256='0eb36abe09ba7bf6f7a70255974e5d0a273f7f32d0e23419862c6dcc986f1dff') # libmpi.so.0.0.0
version('1.2.8', sha256='75b286cb3b1bf6528a7e64ee019369e0601b8acb5c3c167a987f755d1e41c95c') # libmpi.so.0.0.0
version('1.2.7', sha256='d66c7f0bb11494023451651d0e61afaef9d2199ed9a91ed08f0dedeb51541c36') # libmpi.so.0.0.0
version('1.2.6', sha256='e5b27af5a153a257b1562a97bbf7164629161033934558cefd8e1e644a9f73d3') # libmpi.so.0.0.0
version('1.2.5', sha256='3c3aed872c17165131c77bd7a12fe8aec776cb23da946b7d12840db93ab79322') # libmpi.so.0.0.0
version('1.2.4', sha256='594a3a0af69cc7895e0d8f9add776a44bf9ed389794323d0b1b45e181a97e538') # libmpi.so.0.0.0
version('1.2.3', sha256='f936ca3a197e5b2d1a233b7d546adf07898127683b03c4b37cf31ae22a6f69bb') # libmpi.so.0.0.0
version('1.2.2', sha256='aa763e0e6a6f5fdff8f9d3fc988a4ba0ed902132d292c85aef392cc65bb524e6') # libmpi.so.0.0.0
version('1.2.1', sha256='a94731d84fb998df33960e0b57ea5661d35e7c7cd9d03d900f0b6a5a72e4546c') # libmpi.so.0.0.0
version('1.2', sha256='ba0bfa3dec2ead38a3ed682ca36a0448617b8e29191ab3f48c9d0d24d87d14c0') # libmpi.so.0.0.0
version('1.1.5', sha256='913deaedf3498bd5d06299238ec4d048eb7af9c3afd8e32c12f4257c8b698a91') # libmpi.so.0.0.0
version('1.1.4', sha256='21c37f85df7e959f17cc7cb5571d8db2a94ed2763e3e96e5d052aff2725c1d18') # libmpi.so.0.0.0
version('1.1.3', sha256='c33f8f5e65cfe872173616cca27ae8dc6d93ea66e0708118b9229128aecc174f') # libmpi.so.0.0.0
version('1.1.2', sha256='3bd8d9fe40b356be7f9c3d336013d3865f8ca6a79b3c6e7ef28784f6c3a2b8e6') # libmpi.so.0.0.0
version('1.1.1', sha256='dc31aaec986c4ce436dbf31e73275ed1a9391432dcad7609de8d0d3a78d2c700') # libmpi.so.0.0.0
version('1.1', sha256='ebe14801d2caeeaf47b0e437b43f73668b711f4d3fcff50a0d665d4bd4ea9531') # libmpi.so.0.0.0
version('1.0.2', sha256='ccd1074d7dd9566b73812d9882c84e446a8f4c77b6f471d386d3e3b9467767b8') # libmpi.so.0.0.0
version('1.0.1', sha256='f801b7c8ea6c485ac0709a628a479aeafa718a205ed6bc0cf2c684bc0cc73253') # libmpi.so.0.0.0
version('1.0', sha256='cf75e56852caebe90231d295806ac3441f37dc6d9ad17b1381791ebb78e21564') # libmpi.so.0.0.0
patch('ad_lustre_rwcontig_open_source.patch', when="@1.6.5")
patch('llnl-platforms.patch', when="@1.6.5")
patch('configure.patch', when="@1.10.1")
patch('fix_multidef_pmi_class.patch', when="@2.0.0:2.0.1")
patch('fix-ucx-1.7.0-api-instability.patch', when='@4.0.0:4.0.2')
# Vader Bug: https://github.com/open-mpi/ompi/issues/5375
# Haven't release fix for 2.1.x
patch('btl_vader.patch', when='@2.1.3:2.1.5')
# Fixed in 3.0.3 and 3.1.3
patch('btl_vader.patch', when='@3.0.1:3.0.2')
patch('btl_vader.patch', when='@3.1.0:3.1.2')
# Reported upstream: https://github.com/open-mpi/ompi/pull/6378
# We support only versions based on Libtool 2.4.6.
patch('nag_ltmain_1.patch', when='@2.1.4:2.1.999,3.0.1:4%nag')
patch('nag_ltmain_2.patch', when='@2.1.2:2.1.3,3.0.0%nag')
patch('nag_ltmain_3.patch', when='@2.0.0:2.1.1%nag')
patch('nag_ltmain_4.patch', when='@1.10.4:1.10.999%nag')
variant(
'fabrics',
values=disjoint_sets(
('auto',), ('psm', 'psm2', 'verbs', 'mxm', 'ucx', 'libfabric')
).with_non_feature_values('auto', 'none'),
description="List of fabrics that are enabled; "
"'auto' lets openmpi determine",
)
variant(
'schedulers',
values=disjoint_sets(
('auto',), ('alps', 'lsf', 'tm', 'slurm', 'sge', 'loadleveler')
).with_non_feature_values('auto', 'none'),
description="List of schedulers for which support is enabled; "
"'auto' lets openmpi determine",
)
# Additional support options
variant('atomics', default=False, description='Enable built-in atomics')
variant('java', default=False, description='Build Java support')
variant('static', default=True, description='Build static libraries')
variant('sqlite3', default=False, description='Build SQLite3 support')
variant('vt', default=True, description='Build VampirTrace support')
variant('thread_multiple', default=False,
description='Enable MPI_THREAD_MULTIPLE support')
variant('cuda', default=False, description='Enable CUDA support')
variant('pmi', default=False, description='Enable PMI support')
variant('runpath', default=True, description='Enable wrapper runpath')
variant('cxx', default=False, description='Enable C++ MPI bindings')
variant('cxx_exceptions', default=False, description='Enable C++ Exception support')
variant('gpfs', default=True, description='Enable GPFS support (if present)')
# Adding support to build a debug version of OpenMPI that activates
# Memchecker, as described here:
#
# https://www.open-mpi.org/faq/?category=debugging#memchecker_what
#
# This option degrades run-time support, and thus is disabled by default
variant(
'memchecker',
default=False,
description='Memchecker support for debugging [degrades performance]'
)
variant(
'legacylaunchers',
default=False,
description='Do not remove mpirun/mpiexec when building with slurm'
)
provides('mpi')
provides('mpi@:2.2', when='@1.6.5')
provides('mpi@:3.0', when='@1.7.5:')
provides('mpi@:3.1', when='@2.0.0:')
if sys.platform != 'darwin':
depends_on('numactl')
depends_on('autoconf', type='build', when='@develop')
depends_on('automake', type='build', when='@develop')
depends_on('libtool', type='build', when='@develop')
depends_on('m4', type='build', when='@develop')
depends_on('perl', type='build', when='@develop')
depends_on('hwloc')
# ompi@:3.0.0 doesn't support newer hwloc releases:
# "configure: error: OMPI does not currently support hwloc v2 API"
# Future ompi releases may support it, needs to be verified.
# See #7483 for context.
depends_on('hwloc@:1.999')
depends_on('hwloc +cuda', when='+cuda')
depends_on('java', when='+java')
depends_on('sqlite', when='+sqlite3@:1.11')
depends_on('zlib', when='@3.0.0:')
depends_on('valgrind~mpi', when='+memchecker')
depends_on('ucx', when='fabrics=ucx')
depends_on('ucx +thread_multiple', when='fabrics=ucx +thread_multiple')
depends_on('ucx +thread_multiple', when='@3.0.0: fabrics=ucx')
depends_on('libfabric', when='fabrics=libfabric')
depends_on('slurm', when='schedulers=slurm')
depends_on('lsf', when='schedulers=lsf')
depends_on('binutils+libiberty', when='fabrics=mxm')
conflicts('+cuda', when='@:1.6') # CUDA support was added in 1.7
conflicts('fabrics=psm2', when='@:1.8') # PSM2 support was added in 1.10.0
conflicts('fabrics=mxm', when='@:1.5.3') # MXM support was added in 1.5.4
conflicts('+pmi', when='@:1.5.4') # PMI support was added in 1.5.5
conflicts('schedulers=slurm ~pmi', when='@1.5.4:',
msg='+pmi is required for openmpi(>=1.5.5) to work with SLURM.')
conflicts('schedulers=loadleveler', when='@3.0.0:',
msg='The loadleveler scheduler is not supported with '
'openmpi(>=3.0.0).')
conflicts('+cxx', when='@5:',
msg='C++ MPI bindings are removed in 5.0.X release')
conflicts('+cxx_exceptions', when='@5:',
msg='C++ exceptions are removed in 5.0.X release')
filter_compiler_wrappers('openmpi/*-wrapper-data*', relative_root='share')
conflicts('fabrics=libfabric', when='@:1.8') # libfabric support was added in 1.10.0
# It may be worth considering making libfabric an exclusive fabrics choice
def url_for_version(self, version):
url = "http://www.open-mpi.org/software/ompi/v{0}/downloads/openmpi-{1}.tar.bz2"
return url.format(version.up_to(2), version)
@property
def headers(self):
hdrs = HeaderList(find(self.prefix.include, 'mpi.h', recursive=False))
if not hdrs:
hdrs = HeaderList(find(self.prefix, 'mpi.h', recursive=True))
return hdrs or None
@property
def libs(self):
query_parameters = self.spec.last_query.extra_parameters
libraries = ['libmpi']
if 'cxx' in query_parameters:
libraries = ['libmpi_cxx'] + libraries
return find_libraries(
libraries, root=self.prefix, shared=True, recursive=True
)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('MPICC', join_path(self.prefix.bin, 'mpicc'))
env.set('MPICXX', join_path(self.prefix.bin, 'mpic++'))
env.set('MPIF77', join_path(self.prefix.bin, 'mpif77'))
env.set('MPIF90', join_path(self.prefix.bin, 'mpif90'))
env.set('OMPI_CC', spack_cc)
env.set('OMPI_CXX', spack_cxx)
env.set('OMPI_FC', spack_fc)
env.set('OMPI_F77', spack_f77)
def setup_dependent_package(self, module, dependent_spec):
self.spec.mpicc = join_path(self.prefix.bin, 'mpicc')
self.spec.mpicxx = join_path(self.prefix.bin, 'mpic++')
self.spec.mpifc = join_path(self.prefix.bin, 'mpif90')
self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
self.spec.mpicxx_shared_libs = [
join_path(self.prefix.lib, 'libmpi_cxx.{0}'.format(dso_suffix)),
join_path(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
def with_or_without_verbs(self, activated):
# Up through version 1.6, this option was previously named
# --with-openib
opt = 'openib'
# In version 1.7, it was renamed to be --with-verbs
if self.spec.satisfies('@1.7:'):
opt = 'verbs'
# If the option has not been activated return
# --without-openib or --without-verbs
if not activated:
return '--without-{0}'.format(opt)
line = '--with-{0}'.format(opt)
path = _verbs_dir()
if (path is not None) and (path not in ('/usr', '/usr/local')):
line += '={0}'.format(path)
return line
def with_or_without_mxm(self, activated):
opt = 'mxm'
# If the option has not been activated return --without-mxm
if not activated:
return '--without-{0}'.format(opt)
line = '--with-{0}'.format(opt)
path = _mxm_dir()
if path is not None:
line += '={0}'.format(path)
return line
def with_or_without_tm(self, activated):
opt = 'tm'
# If the option has not been activated return --without-tm
if not activated:
return '--without-{0}'.format(opt)
line = '--with-{0}'.format(opt)
path = _tm_dir()
if path is not None:
line += '={0}'.format(path)
return line
@run_before('autoreconf')
def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError(
'OpenMPI requires both C and Fortran compilers!'
)
@when('@develop')
def autoreconf(self, spec, prefix):
perl = which('perl')
perl('autogen.pl')
def setup_build_environment(self, env):
if '~gpfs' in self.spec:
env.set('ac_cv_header_gpfs_h', 'no')
env.set('ac_cv_header_gpfs_fcntl_h', 'no')
def configure_args(self):
spec = self.spec
config_args = [
'--enable-shared',
'--disable-silent-rules'
]
# Add extra_rpaths dirs from compilers.yaml into link wrapper
rpaths = [self.compiler.cc_rpath_arg + path
for path in self.compiler.extra_rpaths]
config_args.extend([
'--with-wrapper-ldflags={0}'.format(' '.join(rpaths))
])
if '+atomics' in spec:
config_args.append('--enable-builtin-atomics')
else:
config_args.append('--disable-builtin-atomics')
# According to this comment on github:
#
# https://github.com/open-mpi/ompi/issues/4338#issuecomment-383982008
#
# adding --enable-static silently disables slurm support via pmi/pmi2
# for versions older than 3.0.3,3.1.3,4.0.0
# Presumably future versions after 11/2018 should support slurm+static
if spec.satisfies('schedulers=slurm'):
config_args.append('--with-pmi={0}'.format(spec['slurm'].prefix))
if spec.satisfies('@3.1.3:') or spec.satisfies('@3.0.3'):
if '+static' in spec:
config_args.append('--enable-static')
else:
if '+static' in spec:
config_args.append('--enable-static')
else:
config_args.append('--disable-static')
config_args.extend(self.with_or_without('pmi'))
if spec.satisfies('@3.0.0:', strict=True):
config_args.append('--with-zlib={0}'.format(spec['zlib'].prefix))
if spec.satisfies('@4.0.0:4.0.2'):
# uct btl doesn't work with some UCX versions so just disable
config_args.append('--enable-mca-no-build=btl-uct')
# some scientific packages ignore deprecated/remove symbols. Re-enable
# them for now, for discussion see
# https://github.com/open-mpi/ompi/issues/6114#issuecomment-446279495
if spec.satisfies('@4.0.1:'):
config_args.append('--enable-mpi1-compatibility')
# Fabrics
if 'fabrics=auto' not in spec:
config_args.extend(self.with_or_without('fabrics'))
# The wrappers fail to automatically link libfabric. This will cause
# undefined references unless we add the appropriate flags.
if 'fabrics=libfabric' in spec:
config_args.append('--with-wrapper-ldflags=-L{0} -Wl,-rpath={0}'
.format(spec['libfabric'].prefix.lib))
config_args.append('--with-wrapper-libs=-lfabric')
# Schedulers
if 'schedulers=auto' not in spec:
config_args.extend(self.with_or_without('schedulers'))
config_args.extend(self.enable_or_disable('memchecker'))
if spec.satisfies('+memchecker', strict=True):
config_args.extend([
'--enable-debug',
'--with-valgrind={0}'.format(spec['valgrind'].prefix),
])
# Hwloc support
if spec.satisfies('@1.5.2:'):
config_args.append('--with-hwloc={0}'.format(spec['hwloc'].prefix))
# Java support
if spec.satisfies('@1.7.4:'):
if '+java' in spec:
config_args.extend([
'--enable-java',
'--enable-mpi-java',
'--with-jdk-dir={0}'.format(spec['java'].home)
])
else:
config_args.extend([
'--disable-java',
'--disable-mpi-java'
])
# SQLite3 support
if spec.satisfies('@1.7.3:1.999'):
if '+sqlite3' in spec:
config_args.append('--with-sqlite3')
else:
config_args.append('--without-sqlite3')
# VampirTrace support
if spec.satisfies('@1.3:1.999'):
if '+vt' not in spec:
config_args.append('--enable-contrib-no-build=vt')
# Multithreading support
if spec.satisfies('@1.5.4:2.999'):
if '+thread_multiple' in spec:
config_args.append('--enable-mpi-thread-multiple')
else:
config_args.append('--disable-mpi-thread-multiple')
# CUDA support
# See https://www.open-mpi.org/faq/?category=buildcuda
if spec.satisfies('@1.7:'):
if '+cuda' in spec:
# OpenMPI dynamically loads libcuda.so, requires dlopen
config_args.append('--enable-dlopen')
# Searches for header files in DIR/include
config_args.append('--with-cuda={0}'.format(
spec['cuda'].prefix))
if spec.satisfies('@1.7:1.7.2'):
# This option was removed from later versions
config_args.append('--with-cuda-libdir={0}'.format(
spec['cuda'].libs.directories[0]))
if spec.satisfies('@1.7.2'):
# There was a bug in 1.7.2 when --enable-static is used
config_args.append('--enable-mca-no-build=pml-bfo')
if spec.satisfies('%pgi^cuda@7.0:7.999'):
# OpenMPI has problems with CUDA 7 and PGI
config_args.append(
'--with-wrapper-cflags=-D__LP64__ -ta:tesla')
if spec.satisfies('%pgi@:15.8'):
# With PGI 15.9 and later compilers, the
# CFLAGS=-D__LP64__ is no longer needed.
config_args.append('CFLAGS=-D__LP64__')
else:
config_args.append('--without-cuda')
if '+runpath' in spec:
config_args.append('--enable-wrapper-rpath')
config_args.append('--enable-wrapper-runpath')
else:
config_args.append('--disable-wrapper-rpath')
config_args.append('--disable-wrapper-runpath')
if spec.satisfies('@:4'):
if '+cxx' in spec:
config_args.append('--enable-mpi-cxx')
else:
config_args.append('--disable-mpi-cxx')
if '+cxx_exceptions' in spec:
config_args.append('--enable-cxx-exceptions')
else:
config_args.append('--disable-cxx-exceptions')
return config_args
@run_after('install')
def delete_mpirun_mpiexec(self):
# The preferred way to run an application when Slurm is the
# scheduler is to let Slurm manage process spawning via PMI.
#
# Deleting the links to orterun avoids users running their
# applications via mpirun or mpiexec, and leaves srun as the
# only sensible choice (orterun is still present, but normal
# users don't know about that).
if '@1.6: ~legacylaunchers schedulers=slurm' in self.spec:
exe_list = [self.prefix.bin.mpirun,
self.prefix.bin.mpiexec,
self.prefix.bin.shmemrun,
self.prefix.bin.oshrun
]
script_stub = join_path(os.path.dirname(__file__),
"nolegacylaunchers.sh")
for exe in exe_list:
try:
os.remove(exe)
except OSError:
tty.debug("File not present: " + exe)
else:
copy(script_stub, exe)
| 50.878333
| 132
| 0.664723
|
51f6d08709e679824f88d724557ae6327740cddd
| 171
|
py
|
Python
|
pacote-download/PythonTeste/desafio060.py
|
Mariana02Santos/Python
|
a7bf3489b58d691578f888719c1ad6227ad39d74
|
[
"MIT"
] | null | null | null |
pacote-download/PythonTeste/desafio060.py
|
Mariana02Santos/Python
|
a7bf3489b58d691578f888719c1ad6227ad39d74
|
[
"MIT"
] | null | null | null |
pacote-download/PythonTeste/desafio060.py
|
Mariana02Santos/Python
|
a7bf3489b58d691578f888719c1ad6227ad39d74
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um valor inteiro e eu calcularei o fatorial desse número: '))
x = n
m = n
while n > 1:
m = m * (n-1)
n = n - 1
print('{}! = {}'.format(x,m))
| 21.375
| 83
| 0.54386
|
d0c743261507903f811f01c5d50da6a2c14fecff
| 278
|
py
|
Python
|
opac/models/transactions/__init__.py
|
rimphyd/Django-OPAC
|
d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb
|
[
"MIT"
] | 1
|
2020-11-26T05:25:46.000Z
|
2020-11-26T05:25:46.000Z
|
opac/models/transactions/__init__.py
|
rimphyd/Django-OPAC
|
d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb
|
[
"MIT"
] | null | null | null |
opac/models/transactions/__init__.py
|
rimphyd/Django-OPAC
|
d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb
|
[
"MIT"
] | null | null | null |
from opac.models.transactions.holding import Holding # noqa: F401
from opac.models.transactions.lending import Lending # noqa: F401
from opac.models.transactions.renewing import Renewing # noqa: F401
from opac.models.transactions.reservation import Reservation # noqa: F401
| 55.6
| 74
| 0.81295
|
5582ebc5bb602cc9a5110b4266fb004c0dff70fd
| 1,030
|
py
|
Python
|
workspaces/migrations/0003_auto_20200427_0851.py
|
ralphleyga/sleeknotes
|
b92c8dd12021eb0bfddcdf5dc046b8173930b68b
|
[
"MIT"
] | null | null | null |
workspaces/migrations/0003_auto_20200427_0851.py
|
ralphleyga/sleeknotes
|
b92c8dd12021eb0bfddcdf5dc046b8173930b68b
|
[
"MIT"
] | 4
|
2021-03-19T02:32:55.000Z
|
2021-06-10T19:03:45.000Z
|
workspaces/migrations/0003_auto_20200427_0851.py
|
ralphleyga/sleeknotes
|
b92c8dd12021eb0bfddcdf5dc046b8173930b68b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-27 08:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('workspaces', '0002_workspacechannel_channel_id'),
]
operations = [
migrations.CreateModel(
name='WorkSpaceUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=200)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='note',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workspaces.WorkSpaceUser'),
),
]
| 34.333333
| 144
| 0.643689
|
fc2435f286d44770818ebf13c3f8ebae06ec3817
| 2,438
|
py
|
Python
|
tracker/dmlc_tracker/ssh.py
|
rasomuro/rabit
|
a19a15cfaabbc43aa886abcfce08e4b73b917354
|
[
"Apache-2.0"
] | 1
|
2020-07-31T15:32:43.000Z
|
2020-07-31T15:32:43.000Z
|
tracker/dmlc_tracker/ssh.py
|
rasomuro/rabit
|
a19a15cfaabbc43aa886abcfce08e4b73b917354
|
[
"Apache-2.0"
] | null | null | null |
tracker/dmlc_tracker/ssh.py
|
rasomuro/rabit
|
a19a15cfaabbc43aa886abcfce08e4b73b917354
|
[
"Apache-2.0"
] | 1
|
2020-07-31T15:32:44.000Z
|
2020-07-31T15:32:44.000Z
|
#!/usr/bin/env python
"""
DMLC submission script by ssh
One need to make sure all slaves machines are ssh-able.
"""
from __future__ import absolute_import
import os, subprocess, logging
from threading import Thread
from . import tracker
def sync_dir(local_dir, slave_node, slave_dir):
"""
sync the working directory from root node into slave node
"""
remote = slave_node + ':' + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no" %s %s' % (
local_dir, remote)
subprocess.check_call([prog], shell = True)
def get_env(pass_envs):
envs = []
# get system envs
keys = ['LD_LIBRARY_PATH', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in pass_envs.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def submit(args):
assert args.host_file is not None
with open(args.host_file) as f:
tmp = f.readlines()
assert len(tmp) > 0
hosts=[]
for h in tmp:
if len(h.strip()) > 0:
hosts.append(h.strip())
def ssh_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# thread func to run the job
def run(prog):
subprocess.check_call(prog, shell = True)
# sync programs if necessary
local_dir = os.getcwd()+'/'
working_dir = local_dir
if args.sync_dst_dir is not None and args.sync_dst_dir != 'None':
working_dir = args.sync_dst_dir
for h in hosts:
sync_dir(local_dir, h, working_dir)
# launch jobs
for i in range(nworker + nserver):
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
node = hosts[i % len(hosts)]
prog = get_env(pass_envs) + ' cd ' + working_dir + '; ' + (' '.join(args.command))
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' \'' + prog + '\''
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
return ssh_submit
tracker.submit(args.num_workers, args.num_servers,
fun_submit=ssh_submit,
pscmd=(' '.join(args.command)))
| 31.662338
| 94
| 0.578343
|
7a2ed416f9a18e3b193a4ddfce592da821af0aab
| 491
|
py
|
Python
|
ABC_C/ABC088_C.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
ABC_C/ABC088_C.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
ABC_C/ABC088_C.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
C = []
for _ in range(3):
c1, c2, c3 = map(int, input().split())
tmp = [c1, c2, c3]
C.append(tmp)
if not(C[0][0]-C[0][1] == C[1][0]-C[1][1] == C[2][0]-C[2][1]):
print('No')
exit(0)
if not(C[0][1]-C[0][2] == C[1][1]-C[1][2] == C[2][1]-C[2][2]):
print('No')
exit(0)
if not(C[0][0]-C[1][0] == C[0][1]-C[1][1] == C[0][2]-C[1][2]):
print('No')
exit(0)
if not(C[1][0]-C[2][0] == C[1][1]-C[2][1] == C[1][2]-C[2][2]):
print('No')
exit(0)
print('Yes')
| 21.347826
| 62
| 0.411405
|
d77ea06c1f9e89252d5f699a11cf94fdcbd58591
| 24,586
|
py
|
Python
|
scripts_for_improve_Elevation/smote.py
|
bm2-lab/CRISPR-off-target-data-imbalance
|
344dc93da988057c877d686a744afe740c416fd4
|
[
"MIT"
] | 2
|
2018-12-26T06:54:14.000Z
|
2018-12-26T06:54:20.000Z
|
scripts_for_improve_Elevation/smote.py
|
bm2-lab/CRISPR-off-target-data-imbalance
|
344dc93da988057c877d686a744afe740c416fd4
|
[
"MIT"
] | null | null | null |
scripts_for_improve_Elevation/smote.py
|
bm2-lab/CRISPR-off-target-data-imbalance
|
344dc93da988057c877d686a744afe740c416fd4
|
[
"MIT"
] | 1
|
2021-03-12T03:40:01.000Z
|
2021-03-12T03:40:01.000Z
|
import elevation.prediction_pipeline as pp
import elevation
import random
random.seed(233)
from sklearn.neighbors import NearestNeighbors
import numpy as np
import elevation
import pandas
import azimuth
import joblib
import logging
from joblib import Memory
from elevation.model_comparison import *
import copy
import scipy.stats as ss
from sklearn.grid_search import ParameterGrid
import sklearn.linear_model
import scipy as sp
import scipy.stats
import elevation.models
import elevation.features
# import GPy
import socket
from elevation.stacker import *
import elevation.util as ut
from sklearn.metrics import auc, roc_curve
from elevation import settings
import sklearn.isotonic
from sklearn.cross_validation import StratifiedKFold
import sklearn.pipeline
import sklearn.preprocessing
import pandas as pd
from elevation.cmds.predict import Predict
from elevation import options
import os
import pickle
import matplotlib
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score
def filter_pam_out_of_muts(data, i):
tmp_muts = data['mut positions'].iloc[i]
# because Hsu-Zhang ignores alternate PAMs which we have encoded with '22'
pam_pos = 22
if pam_pos in tmp_muts:
tmp_muts.remove(pam_pos)
tmp_muts = np.array(tmp_muts)
num_m = len(tmp_muts)
return num_m, tmp_muts
def predict(model, data, learn_options, learn_options_override=None, verbose=False):
if learn_options_override is None:
learn_options_override = learn_options
predictions, model, learn_options, _tmpdata, feature_names, all_predictions_ind = predict_elevation(data=data,
model=(model,
learn_options),
model_file=None,
pam_audit=False,
learn_options_override=learn_options_override,
force_zero_intercept=False,
naive_bayes_combine=True,
verbose=verbose)
return predictions, all_predictions_ind
class Smote:
"""
SMOTE
Parameters:
-----------
k: int
the num.
sampling_rate: int
, attention sampling_rate < k.
newindex: int
"""
def __init__(self, sampling_rate=5, k=5):
self.sampling_rate = sampling_rate
self.k = k
self.newindex = 0
#
def synthetic_samples(self, X, i, k_neighbors, y=None):
for j in range(self.sampling_rate):
#
neighbor = np.random.choice(k_neighbors)
#
diff = X[neighbor] - X[i]
#
self.synthetic_X[self.newindex] = X[i] + random.random() * diff
self.synthetic_y[self.newindex] = y[i] + random.random() * (y[neighbor] - y[i])
self.newindex += 1
def fit(self, X, y=None):
if y is not None:
negative_X = X[y == 0]
X = X[y != 0]
n_samples, n_features = X.shape
#
self.synthetic_X = np.zeros((n_samples * self.sampling_rate, n_features))
self.synthetic_y = np.zeros(n_samples * self.sampling_rate)
#
knn = NearestNeighbors(n_neighbors=self.k).fit(X)
for i in range(len(X)):
print(i)
k_neighbors = knn.kneighbors(X[i].reshape(1, -1),
return_distance=False)[0]
#
# sampling_rate
self.synthetic_samples(X, i, k_neighbors, y)
if y is not None:
return (np.concatenate((self.synthetic_X, X, negative_X), axis=0),
np.concatenate((self.synthetic_y, y[y != 0], y[y == 0]), axis=0))
def stacked_predictions(data, preds_base_model,
models=['product', 'CFD', 'constant-power', 'linear-raw-stacker', 'linreg-stacker',
'RF-stacker', 'GP-stacker', 'raw GP'],
truth=None, guideseq_data=None, preds_guideseq=None, prob_calibration_model=None,
learn_options=None, return_model=False, trained_model=None,
models_to_calibrate=None, return_residuals=False): # , dnase_train=None, dnase_test=None):
predictions = dict([(m, None) for m in models])
num_mismatches = np.array([len(t) for t in data["Annotation"].values])
# if ('use_mut_distances' in learn_options.keys() and learn_options['use_mut_distances']):
data = elevation.features.extract_mut_positions_stats(data)
if guideseq_data is not None:
y = guideseq_data['GUIDE-SEQ Reads'].values[:, None]
num_annot = np.array([len(t) for t in guideseq_data["Annotation"].values])
if 'logistic stacker' in models:
X = preds_guideseq.copy()
Xtest = preds_base_model.copy()
m = Stacker(y, X, warp_out=False)
m.maximize()
predictions['logistic stacker'] = m.predict(Xtest)
if 'CFD' in models:
# predicting
if 'cfd_table_file' not in learn_options.keys():
learn_options['cfd_table_file'] = settings.pj(settings.offtarget_data_dir,
"STable 19 FractionActive_dlfc_lookup.xlsx")
cfd = elevation.models.CFDModel(cfd_table_file=learn_options['cfd_table_file'])
predictions['CFD'] = cfd.predict(data["Annotation"].values, learn_options["num_proc"])[:, None]
if 'product' in models:
predictions['product'] = np.nanprod(preds_base_model, axis=1)[:, None]
if 'constant-power' in models:
predictions['constant-power'] = np.power(0.5, num_mismatches)
if 'CCTOP' in models:
# predicting
term1 = np.zeros((data.shape[0], 1))
for i in range(len(term1)):
num_m, tmp_muts = filter_pam_out_of_muts(data, i)
term1[i] = np.sum(1.2 ** np.array(tmp_muts))
predictions['CCTOP'] = -term1.flatten()
if 'HsuZhang' in models:
# predicting
W = [0.0, 0.0, 0.014, 0.0, 0.0, 0.395, 0.317, 0, 0.389, 0.079, 0.445, 0.508, 0.613, 0.851, 0.732, 0.828, 0.615,
0.804, 0.685, 0.583]
pred = np.zeros((data.shape[0], 1))
for i in range(len(pred)):
num_m, tmp_muts = filter_pam_out_of_muts(data, i)
if len(tmp_muts) == 0:
pred[i] = 1.0
else:
d = ut.get_pairwise_distance_mudra(tmp_muts)
term1 = np.prod(1. - np.array(W)[tmp_muts - 1])
if num_m > 1:
term2 = 1. / (((19 - d) / 19) * 4 + 1)
else:
term2 = 1
term3 = 1. / (num_m) ** 2
pred[i] = term1 * term2 * term3
predictions['HsuZhang'] = pred.flatten()
if 'linear-raw-stacker' in models or 'GBRT-raw-stacker' in models:
if trained_model is None:
# put together the training data
X = preds_guideseq.copy()
X[np.isnan(X)] = 1.0
feature_names = ['pos%d' % (i + 1) for i in range(X.shape[1])]
# adding product, num. annots and sum to log of itself
X = np.concatenate((np.log(X), np.prod(X, axis=1)[:, None], num_annot[:, None], np.sum(X, axis=1)[:, None]),
axis=1)
feature_names.extend(['product', 'num. annotations', 'sum'])
# X = np.log(X)
# Only product
# X = np.prod(X, axis=1)[:, None]
# feature_names = ['product']
Xtest = preds_base_model.copy()
Xtest[np.isnan(Xtest)] = 1.0
Xtest = np.concatenate(
(np.log(Xtest), np.prod(Xtest, axis=1)[:, None], num_mismatches[:, None], np.sum(Xtest, axis=1)[:, None]),
axis=1)
# Xtest = np.log(Xtest)
# Xtest = np.prod(Xtest, axis=1)[:, None]
if ('use_mut_distances' in learn_options.keys() and learn_options['use_mut_distances']):
guideseq_data = elevation.features.extract_mut_positions_stats(guideseq_data)
X_dist = guideseq_data[
['mut mean abs distance', 'mut min abs distance', 'mut max abs distance', 'mut sum abs distance',
'mean consecutive mut distance', 'min consecutive mut distance', 'max consecutive mut distance',
'sum consecutive mut distance']].values
Xtest_dist = data[
['mut mean abs distance', 'mut min abs distance', 'mut max abs distance', 'mut sum abs distance',
'mean consecutive mut distance', 'min consecutive mut distance', 'max consecutive mut distance',
'sum consecutive mut distance']].values
X = np.concatenate((X, X_dist), axis=1)
Xtest = np.concatenate((Xtest, Xtest_dist), axis=1)
if 'azimuth_score_in_stacker' in learn_options.keys() and learn_options['azimuth_score_in_stacker']:
azimuth_score = elevation.model_comparison.get_on_target_predictions(guideseq_data, ['WT'])[0]
X = np.concatenate((X, azimuth_score[:, None]), axis=1)
azimuth_score_test = elevation.model_comparison.get_on_target_predictions(data, ['WT'])[0]
Xtest = np.concatenate((Xtest, azimuth_score_test[:, None]), axis=1)
if 'linear-raw-stacker' in models:
dnase_type = [key for key in learn_options.keys() if 'dnase' in key]
assert len(dnase_type) <= 1
if len(dnase_type) == 1:
dnase_type = dnase_type[0]
use_dnase = learn_options[dnase_type]
else:
use_dnase = False
if use_dnase:
dnase_train = guideseq_data["dnase"].values
dnase_test = data["dnase"].values
assert dnase_train.shape[0] == X.shape[0]
assert dnase_test.shape[0] == Xtest.shape[0]
if dnase_type == 'dnase:default':
# simple appending (Melih)
X = np.concatenate((X, dnase_train[:, None]), axis=1)
Xtest = np.concatenate((Xtest, dnase_test[:, None]), axis=1)
elif dnase_type == 'dnase:interact':
# interaction with original features
X = np.concatenate((X, X * dnase_train[:, None]), axis=1)
Xtest = np.concatenate((Xtest, Xtest * dnase_test[:, None]), axis=1)
elif dnase_type == 'dnase:only':
# use only the dnase
X = dnase_train[:, None]
Xtest = dnase_test[:, None]
elif dnase_type == 'dnase:onlyperm':
# use only the dnase
pind = np.random.permutation(dnase_train.shape[0])
pind_test = np.random.permutation(dnase_test.shape[0])
X = dnase_train[pind, None]
Xtest = dnase_test[pind_test, None]
else:
raise NotImplementedError("no such dnase type: %s" % dnase_type)
normX = True
strength = 1.0
# train the model
if trained_model is None:
# subsample the data for more balanced training
ind_zero = np.where(y == 0)[0]
ind_keep = (y != 0).flatten()
nn = ind_keep.sum()
# take every kth' zero
increment = int(ind_zero.shape[0] / float(nn))
sampling_rate = increment - 1 # 比例的选择
k = 20 # k近邻的选择
smote = Smote(sampling_rate=sampling_rate, k=k)
X, y = smote.fit(X, y.flatten()) # 进行smote的变换后得到的数据
print
X.shape
print
y.shape
y = y.reshape(len(y), 1)
# ----- debug
# ind_zero = np.where(y==0)[0]
# ind_keep2 = (y!=0).flatten()
# ind_keep2[np.random.permutation(ind_zero)[0:nn]] = True
# -----
# from IPython.core.debugger import Tracer; Tracer()()
# what been using up until 9/12/2016
# clf = sklearn.linear_model.LassoCV(cv=10, fit_intercept=True, normalize=True)
# now using this:
num_fold = 10
kfold = StratifiedKFold(y.flatten() == 0, num_fold, random_state=learn_options['seed'])
# kfold2 = StratifiedKFold(y[ind_keep2].flatten()==0, num_fold, random_state=learn_options['seed'])
clf = sklearn.linear_model.LassoCV(cv=kfold, fit_intercept=True, normalize=(~normX), n_jobs=num_fold,
random_state=learn_options['seed'])
# clf2 = sklearn.linear_model.LassoCV(cv=kfold2, fit_intercept=True, normalize=(~normX),n_jobs=num_fold, random_state=learn_options['seed'])
if normX:
clf = sklearn.pipeline.Pipeline(
[['scaling', sklearn.preprocessing.StandardScaler()], ['lasso', clf]])
# clf2 = sklearn.pipeline.Pipeline([['scaling', sklearn.preprocessing.StandardScaler()], ['lasso', clf2]])
# y_transf = st.boxcox(y[ind_keep] - y[ind_keep].min() + 0.001)[0]
# scale to be between 0 and 1 first
y_new = (y - np.min(y)) / (np.max(y) - np.min(y))
# plt.figure(); plt.plot(y_new[ind_keep], '.');
y_transf = st.boxcox(y_new - y_new.min() + 0.001)[0]
# when we do renormalize, we konw that these values are mostly negative (see Teams on 6/27/2017),
# so lets just make them go entirely negative(?)
# y_transf = y_transf - np.max(y_transf)
# plt.figure(); plt.plot(y_transf, '.'); #plt.title("w out renorm, w box cox, then making all negative"); plt.show()
# import ipdb; ipdb.set_trace()
# y_transf = np.log(y[ind_keep] - y[ind_keep].min() + 0.001)
# y_transf = y[ind_keep]
# debugging
# y_transf2 = st.boxcox(y[ind_keep2] - y[ind_keep2].min() + 0.001)[0]
# y_transf2 = y[ind_keep2]
print
"train data set size is N=%d" % len(y_transf)
clf.fit(X, y_transf)
# clf2.fit(X[ind_keep2], y_transf2)
# clf.fit(X_keep, tmpy)
# tmp = clf.predict(X)
# sp.stats.spearmanr(tmp[ind_keep],y_transf.flatten())[0]
# sp.stats.spearmanr(tmp[ind_keep], y[ind_keep])[0]
# sp.stats.spearmanr(tmp, y)[0]
# sp.stats.pearsonr(tmp[ind_keep],y_transf.flatten())[0]
# clf.fit(X, y.flatten())
# clf.fit(X, y, sample_weight=weights)
else:
clf = trained_model
# if normX:
# predictions['linear-raw-stacker'] = clf.predict(normalizeX(Xtest, strength, None))
# else:
predictions['linear-raw-stacker'] = clf.predict(Xtest)
# residuals = np.log(y[ind_keep].flatten()+0.001) - clf.predict(X[ind_keep])
if 'linreg-stacker' in models:
m_stacker = StackerFeat()
m_stacker.fit(preds_guideseq, y, model='linreg', normalize_feat=False)
predictions['linreg-stacker'] = m_stacker.predict(preds_base_model)
if 'RF-stacker' in models:
m_stacker = StackerFeat()
m_stacker.fit(preds_guideseq, y, model='RFR', normalize_feat=False)
predictions['RF-stacker'] = m_stacker.predict(preds_base_model)
if 'GP-stacker' in models:
m_stacker = StackerFeat()
m_stacker.fit(preds_guideseq, y, model='GP', normalize_feat=False)
predictions['GP-stacker'] = m_stacker.predict(preds_base_model)
if 'raw GP' in models:
X = preds_guideseq.copy()
X[np.isnan(X)] = 1.0
D_base_predictions = X.shape[1]
X = np.concatenate((np.prod(X, axis=1)[:, None],
num_annot[:, None],
np.sum(X, axis=1)[:, None],
X), axis=1)
Xtest = preds_base_model.copy()
Xtest[np.isnan(Xtest)] = 1.0
Xtest = np.concatenate((np.prod(Xtest, axis=1)[:, None],
num_mismatches[:, None],
np.sum(Xtest, axis=1)[:, None],
Xtest), axis=1)
K = GPy.kern.RBF(1, active_dims=[0]) + GPy.kern.RBF(1, active_dims=[1]) + GPy.kern.Linear(1, active_dims=[
2]) + GPy.kern.RBF(D_base_predictions, active_dims=range(3, D_base_predictions + 3))
m = GPy.models.GPRegression(X, np.log(y), kernel=K)
m.optimize_restarts(5, messages=0)
predictions['raw GP'] = m.predict(Xtest)[0]
if 'combine' in models:
predictions['combine'] = np.ones_like(predictions[predictions.keys()[0]])
for c_model in models:
if c_model != 'combine':
predictions['combine'] += predictions[c_model].flatten()[:, None]
predictions['combine'] /= len(models) - 1
if 'ensemble' in models:
predictions['ensemble'] = (predictions['product'].flatten() + predictions['linear-raw-stacker'].flatten()) / 2.
if prob_calibration_model is not None:
if models_to_calibrate is None:
models_to_calibrate = ['linear-raw-stacker']
for m in models:
if False: # m == 'linear-raw-stacker':
pred = np.exp(predictions[m].flatten()[:, None]) - 0.001 # undo log transformation
else:
pred = predictions[m].flatten()[:, None]
if m in models_to_calibrate:
cal_pred = prob_calibration_model[m].predict_proba(pred)[:, 1]
# cal_pred = prob_calibration_model[m].predict_proba(pred)[:, 0]
if len(pred) > 10:
assert np.allclose(sp.stats.spearmanr(pred, cal_pred)[0],
1.0) # or np.allclose(sp.stats.spearmanr(pred, cal_pred)[0], -1.0)
predictions[m] = cal_pred
if truth is not None:
res_str = "Spearman r: "
for m in models:
res_str += "%s=%.3f " % (m, sp.stats.spearmanr(truth, predictions[m])[0])
print
res_str
res_str = "NDCG: "
for m in models:
res_str += "%s=%.3f " % (
m, azimuth.metrics.ndcg_at_k_ties(truth.values.flatten(), predictions[m].flatten(), truth.shape[0]))
print
res_str
if return_model:
if return_residuals:
return predictions, clf, feature_names, residuals
else:
return predictions, clf, feature_names
return predictions
def train_prob_calibration_model(cd33_data, guideseq_data, preds_guideseq, base_model, learn_options,
which_stacker_model='linear-raw-stacker', other_calibration_models=None):
assert which_stacker_model == 'linear-raw-stacker', "only LRS can be calibrated right now"
# import ipdb; ipdb.set_trace()
# if cd33_data is not None:
Y_bin = cd33_data['Day21-ETP-binarized'].values
Y = cd33_data['Day21-ETP'].values
# else:
# ind = np.zeros_like(guideseq_data['GUIDE-SEQ Reads'].values)
# ind[guideseq_data['GUIDE-SEQ Reads'].values > 0] = True
# ind_zero = np.where(guideseq_data['GUIDE-SEQ Reads'].values==0)[0]
# ind[ind_zero[::ind_zero.shape[0]/float(ind.sum())]] = True
# ind = ind==True
# Y = guideseq_data[ind]['GUIDE-SEQ Reads'].values
# cd33_data = guideseq_data[ind]
# X_guideseq = predict(base_model, cd33_data, learn_options)[0]
nb_pred, individual_mut_pred_cd33 = predict(base_model, cd33_data, learn_options)
# # This the models in the ensemble have to be calibrated as well, so we rely on
# # having previously-calibrated models available in a dictionary
# if which_model == 'ensemble':
# models = ['CFD', 'HsuZhang', 'product', 'linear-raw-stacker', 'ensemble']
# models_to_calibrate = ['product', 'linear-raw-stacker']
# calibration_models = other_calibration_models
# else:
# models = [which_model]
# models_to_calibrate = None
# calibration_models = None
# get linear-raw-stacker (or other model==which_model) predictions, including training of that model if appropriate (e.g. linear-raw-stacker)
X_guideseq, clf_stacker_model, feature_names_stacker_model = stacked_predictions(cd33_data,
individual_mut_pred_cd33,
models=[which_stacker_model],
guideseq_data=guideseq_data,
preds_guideseq=preds_guideseq,
learn_options=learn_options,
models_to_calibrate=None,
prob_calibration_model=None,
return_model=True)
X_guideseq = X_guideseq[which_stacker_model]
clf = sklearn.linear_model.LogisticRegression(fit_intercept=True, solver='lbfgs')
# fit the linear-raw-stacker (or whatever model is being calibrated) predictions on cd33 to the actual binary cd33 values
clf.fit(X_guideseq[:, None], Y_bin)
y_pred = clf.predict_proba(X_guideseq[:, None])[:, 1]
# y_pred = clf.predict_proba(X_guideseq[:, None])[:, 0]
# import ipdb; ipdb.set_trace()
expected_sign = np.sign(sp.stats.spearmanr(X_guideseq, Y_bin)[0])
assert np.allclose(sp.stats.spearmanr(y_pred, X_guideseq)[0], 1.0 * expected_sign, atol=1e-2)
return clf
def excute(wildtype, offtarget, calibration_models, base_model, guideseq_data, preds_guideseq,
learn_options): # the function for testing model.
start = time.time()
wt = wildtype
mut = offtarget
df = pd.DataFrame(columns=['30mer', '30mer_mut', 'Annotation'], index=range(len(wt)))
df['30mer'] = wt
df['30mer_mut'] = mut
annot = []
for i in range(len(wt)):
annot.append(elevation.load_data.annot_from_seqs(wt[i], mut[i]))
df['Annotation'] = annot
# print "Time spent parsing input: ", time.time() - start
base_model_time = time.time()
nb_pred, individual_mut_pred = elevation.prediction_pipeline.predict(base_model, df, learn_options)
# print "Time spent in base model predict(): ", time.time() - base_model_time
start = time.time()
pred = stacked_predictions(df, individual_mut_pred,
learn_options=learn_options,
guideseq_data=guideseq_data,
preds_guideseq=preds_guideseq,
prob_calibration_model=calibration_models,
models=['HsuZhang', 'CFD', 'CCTOP', 'linear-raw-stacker'])
return pred
| 44.299099
| 157
| 0.537338
|
866ab2f4841637b7d9dc5234f8c8f0a13969535c
| 634
|
py
|
Python
|
contrib/fate_script/utils/conf_variable.py
|
chenj133/FATE
|
7065fc73ab83f83e699efec69ff8efb499159ef4
|
[
"Apache-2.0"
] | 32
|
2020-06-12T08:39:58.000Z
|
2022-03-20T06:57:08.000Z
|
contrib/fate_script/utils/conf_variable.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | 14
|
2020-01-28T23:02:45.000Z
|
2022-02-10T00:22:08.000Z
|
contrib/fate_script/utils/conf_variable.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | 16
|
2020-06-12T06:51:46.000Z
|
2022-03-29T10:23:42.000Z
|
from arch.api.utils import file_utils
class ConfVar:
def __init__(self):
self.iter_num = 2
self.batch_num = 1
self.learning_rate = 0.15
self.eps = 1e-4
def init_conf(self, role):
conf_path = file_utils.load_json_conf('contrib/fate_script/conf/' + str(role) + '_runtime_conf.json')
self.iter_num = conf_path.get("FATEScriptLRParam").get("iter_num")
self.batch_num = conf_path.get("FATEScriptLRParam").get("batch_num")
self.learning_rate = conf_path.get("FATEScriptLRParam").get("learning_rate")
self.eps = conf_path.get("FATEScriptLRParam").get("eps")
| 39.625
| 109
| 0.670347
|
f4b2404cc3d18b6d0186244a3dff8bb08426023b
| 1,693
|
py
|
Python
|
src/signalflow_algorithms/algorithms/loop_group.py
|
naaeef/signalflowgrapher
|
0ac3b4a8f11830e5cd7665264ab4e723b5ddaad9
|
[
"Artistic-2.0"
] | 17
|
2020-09-15T11:25:32.000Z
|
2022-02-02T18:20:45.000Z
|
src/signalflow_algorithms/algorithms/loop_group.py
|
hzhikai97/signalflowgrapher
|
928a1c072d22bc606d03197ca3c326dd8e0ae89e
|
[
"Artistic-2.0"
] | 29
|
2020-09-16T16:23:28.000Z
|
2021-05-08T17:29:34.000Z
|
src/signalflow_algorithms/algorithms/loop_group.py
|
hzhikai97/signalflowgrapher
|
928a1c072d22bc606d03197ca3c326dd8e0ae89e
|
[
"Artistic-2.0"
] | 6
|
2020-09-21T06:32:19.000Z
|
2022-03-05T17:33:14.000Z
|
from typing import List
from signalflow_algorithms.algorithms.graph import Branch, Node
class LoopGroup(object):
def __init__(self, loop_count=0, loops=list(), nodes=list()):
self.loop_count = loop_count
self.loops: List[List[Branch]] = loops
self.nodes: List[Node] = nodes
def append_loop(self, loops: List[List[Branch]], nodes: List[Node]):
"""Append new loop to LoopGroup and return new group"""
next_loops = list(loops)
next_loops.extend(self.loops)
next_nodes = list(nodes)
next_nodes.extend(self.nodes)
return LoopGroup(self.loop_count + 1,
next_loops,
next_nodes)
def find_loop_groups(loops: List[List[Branch]]) -> List[LoopGroup]:
"""Build all groups of loops with no nodes in common"""
return list(__find_loop_groups(LoopGroup(), loops[:]))
def __find_loop_groups(loop_group: LoopGroup,
available_loops: List[List[Branch]]):
# Do not output empty results (on first call)
if loop_group.loop_count:
yield loop_group
while available_loops:
next_loop = available_loops.pop(0)
next_loop_nodes: List[Node] = []
for branch in next_loop:
next_loop_nodes.append(branch.start)
next_loop_nodes.append(branch.end)
if len(__intersect(loop_group.nodes, next_loop_nodes)) == 0:
yield from __find_loop_groups(
loop_group.append_loop([next_loop], next_loop_nodes),
available_loops[:])
def __intersect(a, b):
if a is None or b is None:
return list()
return [item for item in a if item in b]
| 31.943396
| 72
| 0.635558
|
1be3850f89ec8a01613339972c2abc63f2ad2cfe
| 5,781
|
py
|
Python
|
dh_utils/tei/refsdecl_generator.py
|
andredelft/dh-utils
|
6b4582de96de2e4a32e7b54ed850e03b888d353e
|
[
"MIT"
] | 1
|
2020-12-25T21:04:10.000Z
|
2020-12-25T21:04:10.000Z
|
dh_utils/tei/refsdecl_generator.py
|
andredelft/dh-utils
|
6b4582de96de2e4a32e7b54ed850e03b888d353e
|
[
"MIT"
] | null | null | null |
dh_utils/tei/refsdecl_generator.py
|
andredelft/dh-utils
|
6b4582de96de2e4a32e7b54ed850e03b888d353e
|
[
"MIT"
] | null | null | null |
import logging
import os
from contextlib import contextmanager
from copy import copy
from dataclasses import dataclass
from itertools import chain
from pathlib import Path
import argparse
from lxml import etree
from anytree import Node, LevelOrderGroupIter, RenderTree
from lxml.etree import Element, dump, ParseError
from typing import List
TEI_XPATH = "/tei:TEI/"
CTS_VERSION_XPATH = "tei:text/tei:body/tei:div[@type][@n]"
CTS_TEXTPART_XPATHS = ["tei:div[@type][@n]", "tei:l[@n]", "tei:ab/tei:l[@n]"]
NSMAP = {"tei": "http://www.tei-c.org/ns/1.0"}
class RefNode(Node):
element: Element
n: str
type: str = None
subtype: str = None
xpath_match: str = None
class TextpartNode(RefNode):
pass
@dataclass
class TextpartPath:
segments: List[str]
type: str
def is_textpart(node):
return isinstance(node, TextpartNode)
def findall(el, xpath):
"""Find all child elements that match xpath query"""
return el.findall(xpath, namespaces=NSMAP)
def build_node(element, *, parent=None, node_type=RefNode, xpath_match=None):
type_ = element.attrib.get("type") or node_type.type
subtype_ = element.attrib.get("subtype") or node_type.subtype
return node_type(
name=f"{type_} ({subtype_})" if subtype_ else type_,
element=element,
parent=parent,
type=type_ or "textpart",
subtype=subtype_ or "line",
n=element.attrib.get("n"),
xpath_match=xpath_match,
)
def build_ref_tree(el):
work = build_node(el, parent=None)
for version_el in findall(el, f"./{CTS_VERSION_XPATH}"):
version = build_node(version_el, parent=work)
build_textpart_tree(version_el, parent=version)
return work
def build_textpart_tree(el, parent):
for xpath_match, textpart_el in find_textparts(el):
textpart = build_node(
element=textpart_el,
parent=parent,
node_type=TextpartNode,
xpath_match=xpath_match,
)
build_textpart_tree(textpart_el, parent=textpart)
def find_textparts(el):
return chain(
*(
[(path, element) for element in findall(el, f"./{path}")]
for path in CTS_TEXTPART_XPATHS
)
)
def textpart_levels(tree):
levels, current_level = [], []
for group in filter(None, LevelOrderGroupIter(tree, filter_=is_textpart)):
current_level.append(group[0])
levels.append(copy(current_level))
return levels
def textpart_paths(tree):
for ref in textpart_levels(tree):
segments, type_ = [], None
for i, node_ in enumerate(ref, start=1):
type_ = node_.subtype
segments.append(node_.xpath_match.replace("[@n]", f"[@n='${i}']"))
yield TextpartPath(segments=segments, type=type_)
def debug_tree(tree):
for pre, fill, node in RenderTree(tree):
print("%s%s" % (pre, node.name))
def build_refs_decl(tree, path_root):
element = etree.Element("refsDecl", {"n": "CTS"})
for path in reversed([path for path in textpart_paths(tree)]):
attrib = {
"n": path.type,
"matchPattern": r"\.".join([r"(\w+)"] * len(path.segments)),
"replacementPattern": f"#xpath({os.path.join(path_root, *path.segments)})",
}
element.append(etree.Element("cRefPattern", attrib))
return element
def update_refsdecl(tree, refs_decl, path):
encoding_desc = tree.find("//tei:encodingDesc", NSMAP)
if encoding_desc is None:
raise Exception("Missing enveloping element 'encodingDesc'")
existing_refsdecl = encoding_desc.find("./tei:refsDecl", NSMAP)
if existing_refsdecl is not None:
encoding_desc.remove(existing_refsdecl)
encoding_desc.append(refs_decl)
with open(path, "wb") as writer:
tree.write(writer, encoding="utf-8")
@contextmanager
def read_xml(path):
with open(path, "rb") as reader:
try:
root_node = etree.fromstring(reader.read())
yield etree.ElementTree(root_node)
except ParseError as e:
logging.exception(Exception(f"Could not parse: {path}", e))
yield etree.ElementTree()
def xml_paths(path):
for subdir, dirs, files in os.walk(path):
yield from (
os.path.join(subdir, file) for file in files if file.endswith(".xml")
)
def is_tei_xml(tree):
return tree.getroot() is not None and tree.find("//tei:*", NSMAP) is not None
def generate_for_file(path, update):
with read_xml(path) as tree:
# filter out all non-tei files
if not is_tei_xml(tree):
return
element = build_refs_decl(
tree=build_ref_tree(el=tree.getroot()),
path_root=os.path.join(TEI_XPATH, CTS_VERSION_XPATH),
)
if update:
update_refsdecl(tree, element, path)
print(f"Succesfully updated {path}")
return element
def generate_for_path(path, update):
path_ = Path(path)
for path_ in ([path_] if path_.is_file() else xml_paths(path_)):
element = generate_for_file(path_, update)
if element is not None:
yield element
def generate(args):
for refsdecl in generate_for_path(path=args.path, update=args.update):
if not args.update:
dump(refsdecl)
def parse_args():
parser = argparse.ArgumentParser(
description="A tool to dynamically generate refsDecl definitions for TEI files"
)
parser.add_argument("path", help="Path to TEI file")
parser.add_argument(
"--update",
action="store_true",
help="Updates the file with the newly generated refsDecl",
)
return parser.parse_args()
if __name__ == "__main__":
generate(args=parse_args())
| 26.640553
| 87
| 0.64712
|
755058473ee8eb18185698c43462f2b34cdddab1
| 1,771
|
py
|
Python
|
amgut/handlers/download.py
|
zoechallacombe/american-gut-web
|
800f0045b98764b4ecfe5f442a03ca8938769eb5
|
[
"BSD-3-Clause"
] | 5
|
2015-02-10T18:01:12.000Z
|
2021-01-15T16:19:00.000Z
|
amgut/handlers/download.py
|
zoechallacombe/american-gut-web
|
800f0045b98764b4ecfe5f442a03ca8938769eb5
|
[
"BSD-3-Clause"
] | 346
|
2015-01-03T00:12:17.000Z
|
2019-11-20T00:51:16.000Z
|
amgut/handlers/download.py
|
zoechallacombe/american-gut-web
|
800f0045b98764b4ecfe5f442a03ca8938769eb5
|
[
"BSD-3-Clause"
] | 16
|
2015-11-10T21:53:52.000Z
|
2019-10-21T18:00:47.000Z
|
from tornado.web import authenticated, HTTPError
from os.path import join, exists
from .base_handlers import BaseHandler
from amgut import AMGUT_CONFIG
from amgut.connections import ag_data
# {filetype from get: (local path, file suffix)}
FILETYPES = {'taxa': ('taxa-summaries', '.txt'),
'result-pdf': ('pdfs', '.pdf')}
class DownloadHandler(BaseHandler):
@authenticated
def get(self, *args, **kwargs):
barcode = self.get_argument('barcode', None)
filetype = self.get_argument('filetype', None)
if barcode is None or filetype is None:
raise HTTPError(400, "Incorrectly formed GET request")
# Check access to file
has_access = ag_data.check_access(self.current_user, barcode)
if not has_access:
self.set_status(403)
self.render("403.html", skid=self.current_user)
return
if filetype not in FILETYPES:
raise HTTPError(400, "Unrecognized filetype")
filetype_path, filetype_suffix = FILETYPES[filetype]
fname = barcode + filetype_suffix
fullpath = join(filetype_path, fname)
if not exists(join(AMGUT_CONFIG.base_data_dir, fullpath)):
raise HTTPError(400, "File %s is not available" % fullpath)
self.set_header('Content-Description', 'File Transfer')
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Transfer-Encoding', 'binary')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache')
self.set_header('X-Accel-Redirect', '/protected/' + fullpath)
self.set_header('Content-Disposition',
'attachment; filename=%s' % fname)
self.finish()
| 34.057692
| 71
| 0.648786
|
ce2d0782414a09cfbc3fdd46b68689e9164827a6
| 5,250
|
py
|
Python
|
desktop/core/ext-py/cryptography-2.1.4/src/_cffi_src/openssl/asn1.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
desktop/core/ext-py/cryptography-2.1.4/src/_cffi_src/openssl/asn1.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
desktop/core/ext-py/cryptography-2.1.4/src/_cffi_src/openssl/asn1.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2
|
2019-12-05T17:24:36.000Z
|
2021-11-22T21:21:32.000Z
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/asn1.h>
"""
TYPES = """
typedef int... time_t;
typedef int ASN1_BOOLEAN;
typedef ... ASN1_INTEGER;
struct asn1_string_st {
int length;
int type;
unsigned char *data;
long flags;
};
typedef struct asn1_string_st ASN1_OCTET_STRING;
typedef struct asn1_string_st ASN1_IA5STRING;
typedef struct asn1_string_st ASN1_BIT_STRING;
typedef struct asn1_string_st ASN1_TIME;
typedef ... ASN1_OBJECT;
typedef struct asn1_string_st ASN1_STRING;
typedef struct asn1_string_st ASN1_UTF8STRING;
typedef ... ASN1_TYPE;
typedef ... ASN1_GENERALIZEDTIME;
typedef ... ASN1_ENUMERATED;
typedef ... ASN1_ITEM;
typedef ... ASN1_VALUE;
typedef ... ASN1_ITEM_EXP;
typedef ... ASN1_UTCTIME;
static const int V_ASN1_GENERALIZEDTIME;
static const int MBSTRING_FLAG;
static const int MBSTRING_ASC;
static const int MBSTRING_BMP;
static const int MBSTRING_UTF8;
static const int MBSTRING_UNIV;
"""
FUNCTIONS = """
ASN1_OBJECT *ASN1_OBJECT_new(void);
void ASN1_OBJECT_free(ASN1_OBJECT *);
/* ASN1 OBJECT IDENTIFIER */
ASN1_OBJECT *d2i_ASN1_OBJECT(ASN1_OBJECT **, const unsigned char **, long);
/* ASN1 STRING */
ASN1_STRING *ASN1_STRING_new(void);
ASN1_STRING *ASN1_STRING_type_new(int);
void ASN1_STRING_free(ASN1_STRING *);
unsigned char *ASN1_STRING_data(ASN1_STRING *);
int ASN1_STRING_set(ASN1_STRING *, const void *, int);
/* ASN1 OCTET STRING */
ASN1_OCTET_STRING *ASN1_OCTET_STRING_new(void);
void ASN1_OCTET_STRING_free(ASN1_OCTET_STRING *);
int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *, const unsigned char *, int);
/* ASN1 IA5STRING */
ASN1_IA5STRING *ASN1_IA5STRING_new(void);
/* ASN1 INTEGER */
ASN1_INTEGER *ASN1_INTEGER_new(void);
void ASN1_INTEGER_free(ASN1_INTEGER *);
int ASN1_INTEGER_set(ASN1_INTEGER *, long);
/* ASN1 TIME */
ASN1_TIME *ASN1_TIME_new(void);
void ASN1_TIME_free(ASN1_TIME *);
ASN1_TIME *ASN1_TIME_set(ASN1_TIME *, time_t);
int ASN1_TIME_set_string(ASN1_TIME *, const char *);
/* ASN1 UTCTIME */
ASN1_UTCTIME *ASN1_UTCTIME_new(void);
void ASN1_UTCTIME_free(ASN1_UTCTIME *);
int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *, time_t);
ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *, time_t);
/* ASN1 GENERALIZEDTIME */
int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *, const char *);
ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set(ASN1_GENERALIZEDTIME *, time_t);
void ASN1_GENERALIZEDTIME_free(ASN1_GENERALIZEDTIME *);
int i2d_ASN1_GENERALIZEDTIME(ASN1_GENERALIZEDTIME *, unsigned char **);
/* ASN1 ENUMERATED */
ASN1_ENUMERATED *ASN1_ENUMERATED_new(void);
void ASN1_ENUMERATED_free(ASN1_ENUMERATED *);
int ASN1_ENUMERATED_set(ASN1_ENUMERATED *, long);
int i2d_ASN1_ENUMERATED(ASN1_ENUMERATED *, unsigned char **);
ASN1_VALUE *ASN1_item_d2i(ASN1_VALUE **, const unsigned char **, long,
const ASN1_ITEM *);
int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *, int, int);
/* These became const ASN1_* in 1.1.0 */
int i2d_ASN1_OBJECT(ASN1_OBJECT *, unsigned char **);
int ASN1_STRING_type(ASN1_STRING *);
int ASN1_STRING_to_UTF8(unsigned char **, ASN1_STRING *);
long ASN1_ENUMERATED_get(ASN1_ENUMERATED *);
int i2a_ASN1_INTEGER(BIO *, ASN1_INTEGER *);
/* This became const ASN1_TIME in 1.1.0f */
ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(ASN1_TIME *,
ASN1_GENERALIZEDTIME **);
ASN1_UTF8STRING *ASN1_UTF8STRING_new(void);
void ASN1_UTF8STRING_free(ASN1_UTF8STRING *);
ASN1_BIT_STRING *ASN1_BIT_STRING_new(void);
void ASN1_BIT_STRING_free(ASN1_BIT_STRING *);
int i2d_ASN1_BIT_STRING(ASN1_BIT_STRING *, unsigned char **);
int i2d_ASN1_OCTET_STRING(ASN1_OCTET_STRING *, unsigned char **);
int i2d_ASN1_INTEGER(ASN1_INTEGER *, unsigned char **);
/* This is not a macro, but is const on some versions of OpenSSL */
int ASN1_BIT_STRING_get_bit(ASN1_BIT_STRING *, int);
const ASN1_ITEM *ASN1_ITEM_ptr(ASN1_ITEM_EXP *);
/* These aren't macros these arguments are all const X on openssl > 1.0.x */
int ASN1_TIME_print(BIO *, ASN1_TIME *);
int ASN1_STRING_length(ASN1_STRING *);
ASN1_STRING *ASN1_STRING_dup(ASN1_STRING *);
int ASN1_STRING_cmp(ASN1_STRING *, ASN1_STRING *);
int ASN1_UTCTIME_print(BIO *, ASN1_UTCTIME *);
ASN1_OCTET_STRING *ASN1_OCTET_STRING_dup(ASN1_OCTET_STRING *);
int ASN1_OCTET_STRING_cmp(ASN1_OCTET_STRING *, ASN1_OCTET_STRING *);
ASN1_INTEGER *ASN1_INTEGER_dup(ASN1_INTEGER *);
int ASN1_INTEGER_cmp(ASN1_INTEGER *, ASN1_INTEGER *);
long ASN1_INTEGER_get(ASN1_INTEGER *);
BIGNUM *ASN1_INTEGER_to_BN(ASN1_INTEGER *, BIGNUM *);
ASN1_INTEGER *BN_to_ASN1_INTEGER(BIGNUM *, ASN1_INTEGER *);
/* These isn't a macro the arg is const on openssl 1.0.2+ */
int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *);
int ASN1_UTCTIME_check(ASN1_UTCTIME *);
int ASN1_TIME_check(ASN1_TIME *);
/* Not a macro, const on openssl 1.0 */
int ASN1_STRING_set_default_mask_asc(char *);
int i2d_ASN1_TYPE(ASN1_TYPE *, unsigned char **);
ASN1_TYPE *d2i_ASN1_TYPE(ASN1_TYPE **, const unsigned char **, long);
"""
CUSTOMIZATIONS = """
"""
| 33.018868
| 79
| 0.770857
|
c40822fcb6a4e725ec10f2777a19b82fbc2752a8
| 22,690
|
py
|
Python
|
cube4.py
|
Horia73/MultiCuberX
|
6087e2732c1d28c1d2244441ebb687b907ce86ac
|
[
"MIT"
] | null | null | null |
cube4.py
|
Horia73/MultiCuberX
|
6087e2732c1d28c1d2244441ebb687b907ce86ac
|
[
"MIT"
] | null | null | null |
cube4.py
|
Horia73/MultiCuberX
|
6087e2732c1d28c1d2244441ebb687b907ce86ac
|
[
"MIT"
] | null | null | null |
from subprocess import check_output
from time import sleep
import logging
import imutils
import serial
import time
import cv2
global Down, Medium1, Medium2, Up, FlUp, FlDown, coord, l, start, i
start = True
i = 0
l = []
coord = []
log = logging.getLogger(__name__)
Down = True
Medium1 = False
medium = False
Medium2 = False
Up = False
FlUp = True
FlDown = False
def prepare():
global s1, camera
print("Conectarea la Arduino Mega...")
s1 = serial.Serial('/dev/tty.usbmodem21401', 9600)
print("Conectat! - MegaA")
print(" ")
camera = cv2.VideoCapture(0)
sleep(1.5)
(retval, img) = camera.read()
cv2.imshow("Capture", img)
print("Gata! Introdu un cub 4x4x4 amestecat iar apoi apasa 'Solve' pentru a rezolva cubul!")
def photo(name, angle):
global camera
sleep(0.35)
(retval, img) = camera.read()
name = '/Users/horia/MultiCuber/CubeScan/' + str(name)
name = str(name)
print(name)
angle = int(angle)
img = imutils.rotate(img, angle)
cv2.imshow("Capture", img)
cv2.waitKey(1)
cv2.imwrite(name, img)
def status():
s1.write(b'M')
while (1):
r = s1.read()
r = r.decode()
if r == 'A':
break
def slow():
s1.write(b'4')
s1.write(b'7')
def normal():
s1.write(b'4')
s1.write(b'8')
def fast():
s1.write(b'4')
s1.write(b'9')
def ElevatorDown():
global Down, Medium1, medium, Medium2, Up
if Down:
pass
elif Medium1:
s1.write(b'g')
elif medium:
s1.write(b'n')
elif Medium2:
s1.write(b'd')
elif Up:
s1.write(b'e')
if Down == False:
Down = True
Medium1 = False
medium = False
Medium2 = False
Up = False
status()
def Elevator1():
global Down, Medium1, medium, Medium2, Up
if Down:
s1.write(b'G')
elif Medium1:
pass
elif medium:
s1.write(b'j')
elif Medium2:
s1.write(b'p')
elif Up:
s1.write(b'o')
if Medium1 == False:
Down = False
Medium1 = True
medium = False
Medium2 = False
Up = False
status()
def ElevatorMedium():
global Down, Medium1, medium, Medium2, Up
if Down:
s1.write(b'N')
elif Medium1:
s1.write(b'J')
elif medium:
pass
elif Medium2:
s1.write(b'j')
elif Up:
s1.write(b'p')
if medium == False:
Down = False
Medium1 = False
medium = True
Medium2 = False
Up = False
status()
def Elevator2():
global Down, Medium1, medium, Medium2, Up
if Down:
s1.write(b'D')
elif Medium1:
s1.write(b'P')
elif medium:
s1.write(b'J')
elif Medium2:
pass
elif Up:
s1.write(b'j')
if Medium2 == False:
Down = False
Medium1 = False
medium = False
Medium2 = True
Up = False
status()
def ElevatorUp():
global Down, Medium1, medium, Medium2, Up
if Down:
s1.write(b'E')
elif Medium1:
s1.write(b'O')
elif medium:
s1.write(b'P')
elif Medium2:
s1.write(b'J')
elif Up:
pass
if Up == False:
Down = False
Medium1 = False
medium = False
Medium2 = False
Up = True
status()
def ElevatorUpScan():
s1.write(b'S')
status()
def ElevatorDownScan():
s1.write(b's')
status()
def RotatorPositive():
s1.write(b'R')
if Medium2 or Up or (medium and (l[0] == 'D' or (l[0] == 'B' and start))):
for n, i in enumerate(l):
if i == 'F':
l[n] = 'L'
elif i == 'L':
l[n] = 'B'
elif i == 'B':
l[n] = 'R'
elif i == 'R':
l[n] = 'F'
status()
def RotatorNegative():
s1.write(b'r')
if Medium2 or Up or (medium and (l[0] == 'D' or (l[0] == 'B' and start))):
for n, i in enumerate(l):
if i == 'F':
l[n] = 'R'
elif i == 'L':
l[n] = 'F'
elif i == 'B':
l[n] = 'L'
elif i == 'R':
l[n] = 'B'
status()
def RotatorDouble():
s1.write(b'B')
if Medium2 or Up or (medium and (l[0] == 'D' or (l[0] == 'B' and start))):
for n, i in enumerate(l):
if i == 'F':
l[n] = 'B'
elif i == 'L':
l[n] = 'R'
elif i == 'B':
l[n] = 'F'
elif i == 'R':
l[n] = 'L'
status()
def FlipperUp():
global FlUp, FlDown
if FlDown:
if Down:
s1.write(b'F')
for n, i in enumerate(l):
if i == 'F':
l[n] = 'U'
elif i == 'U':
l[n] = 'B'
elif i == 'B':
l[n] = 'D'
elif i == 'D':
l[n] = 'F'
elif Down == False:
s1.write(b'X')
elif FlUp:
pass
FlUp = True
FlDown = False
status()
def FlipperDown():
global FlUp, FlDown
if FlUp:
if Down:
s1.write(b'f')
for n, i in enumerate(l):
if i == 'F':
l[n] = 'D'
elif i == 'U':
l[n] = 'F'
elif i == 'B':
l[n] = 'U'
elif i == 'D':
l[n] = 'B'
elif Down == False:
s1.write(b'x')
elif FlDown:
pass
FlDown = True
FlUp = False
status()
def close():
global camera
s1.write(b'H')
FlipperUp()
sleep(0.2)
s1.write(b'h')
del (camera)
camera = None
s1.write(b'Q')
def pattern1():
global l
l = []
l.extend(("B", "'", "R", "2", "R", "w", "2", "L", "w", "2", "L", "2", "U", "2", "R", "2", "R", "w", "2", "L", "w",
"2", "L", "2", "B", "F", "2", "R", "U", "'", "R", "U", "R", "2", "U", "R", "2", "F", "'", "U", "F", "'",
"U", "w", "L", "w", "U", "w", "'", "F", "w", "2", "D", "w", "R", "w", "'", "U", "w", "F", "w", "D", "w",
"2", "R", "w", "2"))
solver()
def pattern2():
global l
l = []
l.extend(("U", "D", "'", "R", "L", "'", "F", "B", "'", "U", "D", "'"))
solver()
def pattern3():
global l
l = []
l.extend(("U", "F", "B", "'", "L", "2", "U", "2", "L", "2", "F", "'", "B", "U", "2", "L", "2", "U"))
solver()
def pattern4():
global l
l = []
l.extend(("R", "2", "L", "'", "D", "F", "2", "R", "'", "D", "'", "R", "'", "L", "U", "'", "D", "R", "D", "B", "2",
"R", "'", "U", "D", "2"))
solver()
def scanner():
global l, coord, b1, a1, b2, a2, b3, a3
a1 = time.time()
s1.write(b'H')
FlipperDown()
photo('rubiks-side-U.png', '270')
ElevatorUpScan()
FlipperUp()
ElevatorDownScan()
FlipperDown()
photo('rubiks-side-R.png', '180')
ElevatorUpScan()
FlipperUp()
ElevatorDownScan()
FlipperDown()
photo('rubiks-side-D.png', '90')
ElevatorUpScan()
FlipperUp()
ElevatorDownScan()
FlipperDown()
photo('rubiks-side-L.png', '0')
ElevatorUp()
RotatorNegative()
ElevatorDown()
FlipperUp()
FlipperDown()
photo('rubiks-side-B.png', '0')
ElevatorUp()
RotatorDouble()
ElevatorDown()
FlipperUp()
FlipperDown()
photo('rubiks-side-F.png', '0')
s1.write(b'h')
b1 = time.time()
def analyzer():
global l, coord, b1, a1, b2, a2, b3, a3, q
a2 = time.time()
cmd1 = ("cd ~/MultiCuber/rubiks-cube-tracker/usr/bin; python3 rubiks-cube-tracker.py --directory ~/MultiCuber/CubeScan")
log.info(cmd1)
output1 = check_output(cmd1, shell=True)
output1 = str(output1)
output1 = output1[2:]
output1 = output1.rstrip(output1[-1])
output1 = output1.rstrip(output1[-1])
output1 = output1.rstrip(output1[-1])
cmd2 = ("cd ~/MultiCuber/rubiks-color-resolver/usr/bin; python3 rubiks-color-resolver.py --json --rgb" + " " + "'" + output1 + "'")
log.info(cmd2)
output2 = check_output(cmd2, shell=True)
output2 = str(output2)
contents = output2[22:118]
print(contents)
cmd = ("cd ~/MultiCuber/TPR-4x4x4-Solver-master; java -cp .:threephase.jar:twophase.jar solver " + contents)
output3 = check_output(cmd, shell=True)
output3 = str(output3)
l = list(output3)
del l[:124]
l = [ele for ele in l if ele.strip()]
l.pop()
l.pop()
l.pop()
l.append('Terminat!')
print(l)
print("Scanarea si gasirea algoritmului s-a finlizat!")
print("Incepem sa rezolvam cubul!")
c1 = l.count("w")
print("Mutari pentru stratul mijlociu (w):")
print(c1)
c2 = l.count("'")
print("Mutari prime ('):")
print(c2)
c3 = l.count('2')
print("Mutari duble:")
print(c3)
c4 = len(l)
q = c4 - c3 - c2 - c1
print("Mutari totale:")
print(q)
b2 = time.time()
def solver():
global l, coord, b1, a1, b2, a2, b3, a3, start, i
a3 = time.time()
s1.write(b'H')
for x in range(q):
if x > 1 and x < 3:
start = False
if l[0] == "F" and l[1] == "w" and l[2] == "'":
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0] == "F" and l[1] == "w" and l[2] == "2":
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0] == "R" and l[1] == "w" and l[2] == "'":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0] == "R" and l[1] == "w" and l[2] == "2":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0] == "U" and l[1] == "w" and l[2] == "'":
ElevatorMedium()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0] == "U" and l[1] == "w" and l[2] == "2":
ElevatorMedium()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0] == "B" and l[1] == "w" and l[2] == "'":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorNegative()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
ElevatorMedium()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0] == "B" and l[1] == "w" and l[2] == "2":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorDouble()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
ElevatorMedium()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0] == "D" and l[1] == "w" and l[2] == "'":
ElevatorMedium()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0] == "D" and l[1] == "w" and l[2] == "2":
ElevatorMedium()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0] == "L" and l[1] == "w" and l[2] == "'":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0] == "L" and l[1] == "w" and l[2] == "2":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0] == "F" and l[1] == "w":
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorPositive()
del l[0]
del l[0]
elif l[0] == "R" and l[1] == "w":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorPositive()
del l[0]
del l[0]
elif l[0] == "U" and l[1] == "w":
ElevatorMedium()
RotatorPositive()
del l[0]
del l[0]
elif l[0] == "B" and l[1] == "w":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorPositive()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
ElevatorMedium()
RotatorPositive()
del l[0]
del l[0]
elif l[0] == "D" and l[1] == "w":
ElevatorMedium()
RotatorPositive()
del l[0]
del l[0]
elif l[0] == "L" and l[1] == "w":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium()
RotatorPositive()
del l[0]
del l[0]
elif l[0] == "F" and l[1] == "'":
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0] == "F" and l[1] == "2":
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0] == "R" and l[1] == "'":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0] == "R" and l[1] == "2":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0] == "U" and l[1] == "'":
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0] == "U" and l[1] == "2":
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0] == "B" and l[1] == "'":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator2()
RotatorNegative()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0] == "B" and l[1] == "2":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator2()
RotatorDouble()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0] == "D" and l[1] == "'":
Elevator2()
RotatorNegative()
del l[0]
del l[0]
elif l[0] == "D" and l[1] == "2":
Elevator2()
RotatorDouble()
del l[0]
del l[0]
elif l[0] == "L" and l[1] == "'":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0] == "L" and l[1] == "2":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0] == "F":
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorPositive()
del l[0]
elif l[0] == "R":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorPositive()
del l[0]
elif l[0] == "U":
Elevator1()
RotatorPositive()
del l[0]
elif l[0] == "B":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator2()
RotatorPositive()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
Elevator1()
RotatorPositive()
del l[0]
elif l[0] == "D":
Elevator2()
RotatorPositive()
del l[0]
elif l[0] == "L":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorPositive()
del l[0]
elif l[0] == "x" and l[1] == "'":
del l[0]
del l[0]
for n, i in enumerate(l):
if i == 'F':
l[n] = 'D'
elif i == 'U':
l[n] = 'F'
elif i == 'D':
l[n] = 'B'
elif i == 'B':
l[n] = 'U'
elif l[0] == "x" and l[1] == "2":
del l[0]
del l[0]
for n, i in enumerate(l):
if i == 'F':
l[n] = 'B'
elif i == 'U':
l[n] = 'D'
elif i == 'D':
l[n] = 'U'
elif i == 'B':
l[n] = 'F'
elif l[0] == "x":
del l[0]
for n, i in enumerate(l):
if i == 'F':
l[n] = 'U'
elif i == 'U':
l[n] = 'B'
elif i == 'D':
l[n] = 'F'
elif i == 'B':
l[n] = 'D'
elif l[0] == "y" and l[1] == "'":
del l[0]
del l[0]
for n, i in enumerate(l):
if i == 'F':
l[n] = 'R'
elif i == 'R':
l[n] = 'B'
elif i == 'L':
l[n] = 'F'
elif i == 'B':
l[n] = 'L'
elif l[0] == "y" and l[1] == "2":
del l[0]
del l[0]
for n, i in enumerate(l):
if i == 'F':
l[n] = 'B'
elif i == 'R':
l[n] = 'L'
elif i == 'L':
l[n] = 'R'
elif i == 'B':
l[n] = 'F'
elif l[0] == "y":
del l[0]
for n, i in enumerate(l):
if i == 'F':
l[n] = 'L'
elif i == 'R':
l[n] = 'F'
elif i == 'L':
l[n] = 'B'
elif i == 'B':
l[n] = 'R'
elif l[0] == "z" and l[1] == "'":
del l[0]
del l[0]
for n, i in enumerate(l):
if i == 'R':
l[n] = 'U'
elif i == 'U':
l[n] = 'L'
elif i == 'L':
l[n] = 'D'
elif i == 'D':
l[n] = 'R'
elif l[0] == "z" and l[1] == "2":
del l[0]
del l[0]
for n, i in enumerate(l):
if i == 'R':
l[n] = 'L'
elif i == 'U':
l[n] = 'D'
elif i == 'L':
l[n] = 'R'
elif i == 'D':
l[n] = 'U'
elif l[0] == "z":
del l[0]
for n, i in enumerate(l):
if i == 'R':
l[n] = 'D'
elif i == 'U':
l[n] = 'R'
elif i == 'L':
l[n] = 'U'
elif i == 'D':
l[n] = 'L'
elif l[0] == "Terminat!":
del l[0]
print("Cubul a fost rezolvat! Introdu alt cub si apasa 'Solve' pentru a-l rezolva!")
print(" ")
ElevatorDown()
FlipperDown()
FlipperUp()
status()
s1.write(b'h')
b3 = time.time()
t1 = b1 - a1
t2 = b2 - a2
t3 = b3 - a3
t = t1 + t2 + t3
if q == 1:
med = 0
else:
med = t3 / (q - 1)
print('Scanarea a durat ' + str(round(t1, 2)) + ' secunde.')
print('Analizarea imaginilor si cautarea solutiei a durat ' + str(round(t2, 2)) + ' secunde.')
print('Rezolvarea cubului a durat ' + str(round(t3, 2)) + ' secunde.')
print('Timp mediu pe mutare: ' + str(round(med, 2)) + ' secunde.')
print('Timp total: ' + str(round(t, 2)) + ' secunde.')
else:
i = i + 1
print("Prea multe mutari:" + i)
return (t)
| 21.754554
| 135
| 0.385148
|
162053c27733768806f8dc45354942f7a5c0fba8
| 1,031
|
py
|
Python
|
SWIM-Executables/Windows/pyinstaller-2.0 for windows/buildtests/libraries/test_pyodbc.py
|
alexsigaras/SWIM
|
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
|
[
"MIT"
] | 47
|
2020-03-08T08:43:28.000Z
|
2022-03-18T18:51:55.000Z
|
SWIM-Executables/Windows/pyinstaller-2.0 for windows/buildtests/libraries/test_pyodbc.py
|
alexsigaras/SWIM
|
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
|
[
"MIT"
] | null | null | null |
SWIM-Executables/Windows/pyinstaller-2.0 for windows/buildtests/libraries/test_pyodbc.py
|
alexsigaras/SWIM
|
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
|
[
"MIT"
] | 16
|
2020-03-08T08:43:30.000Z
|
2022-01-10T22:05:57.000Z
|
#
# Copyright (C) 2012, Martin Zibricky
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# pyodbc is a binary Python module. On Windows when installed with easy_install
# it is installed as zipped Python egg. This binary module is extracted
# to PYTHON_EGG_CACHE directory. PyInstaller should find the binary there and
# include it with frozen executable.
import pyodbc
| 39.653846
| 79
| 0.774976
|
eebfc21f48d2bfe9b21efa4264c756a160a51112
| 486
|
py
|
Python
|
base/migrations/0003_auto_20211207_2325.py
|
SergioVzqz/Devmorize
|
e4e5ddaf6b8d0f66873e345e986661daae504cf7
|
[
"MIT"
] | null | null | null |
base/migrations/0003_auto_20211207_2325.py
|
SergioVzqz/Devmorize
|
e4e5ddaf6b8d0f66873e345e986661daae504cf7
|
[
"MIT"
] | null | null | null |
base/migrations/0003_auto_20211207_2325.py
|
SergioVzqz/Devmorize
|
e4e5ddaf6b8d0f66873e345e986661daae504cf7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-12-07 23:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0002_alter_task_options'),
]
operations = [
migrations.AlterModelOptions(
name='task',
options={'ordering': ['completed']},
),
migrations.RenameField(
model_name='task',
old_name='complete',
new_name='completed',
),
]
| 21.130435
| 48
| 0.557613
|
cbe0cfbd0de7186da4d1d64546c5f27e7c608d0b
| 736
|
py
|
Python
|
oops_fhir/r4/value_set/detected_issue_mitigation_action.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/detected_issue_mitigation_action.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/detected_issue_mitigation_action.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_act_code import v3ActCode
__all__ = ["DetectedIssueMitigationAction"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class DetectedIssueMitigationAction(v3ActCode):
"""
Detected Issue Mitigation Action
Kinds of mitigating actions and observations that can be associated with
a detected issue or contraindication, such as 'added concurrent
therapy', 'prior therapy documented', etc.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/detectedissue-mitigation-action
"""
class Meta:
resource = _resource
| 23.741935
| 76
| 0.764946
|
0abe0200fd3367a6edbbca2a09b6ec3245e0fab0
| 1,973
|
py
|
Python
|
python_files/import_and_normalize.py
|
AleksCipri/DeepMergeDomainAdaptation
|
2ffb6ce6ebdaae60a8cdc23c5a138dad3c47b4c8
|
[
"BSD-2-Clause"
] | 6
|
2020-06-15T16:22:07.000Z
|
2021-12-06T16:57:56.000Z
|
python_files/import_and_normalize.py
|
AleksCipri/DeepMergeDomainAdaptation
|
2ffb6ce6ebdaae60a8cdc23c5a138dad3c47b4c8
|
[
"BSD-2-Clause"
] | 17
|
2020-06-12T21:44:29.000Z
|
2022-02-10T03:36:02.000Z
|
python_files/import_and_normalize.py
|
AleksCipri/DeepMergeDomainAdaptation
|
2ffb6ce6ebdaae60a8cdc23c5a138dad3c47b4c8
|
[
"BSD-2-Clause"
] | 4
|
2020-06-23T18:32:34.000Z
|
2021-03-03T15:50:49.000Z
|
#Importing needed packages
import torch
import numpy as np
from torch.utils.data import Dataset, TensorDataset, DataLoader
import torchvision.transforms as transform
import matplotlib.pyplot as plt
def array_to_tensor(name):
data_list = list(np.load(str(name)))
return(torch.Tensor(data_list))
def normalization(t):
mean1 = t[:,0].mean().item()
mean2 = t[:,1].mean().item()
mean3 = t[:,2].mean().item()
std1 = t[:,0].std().item()
std2 = t[:,1].std().item()
std3 = t[:,2].std().item()
return np.array([[mean1, mean2, mean3], [std1, std2, std3]])
def update(t1, t2):
# find pixel means and stds for a given dataset
# Use this for regularr training without transfer learning
pristine = normalization(t1)
noisy = normalization(t2)
pr_trf = transform.Normalize(mean = pristine[0], std = pristine[1], inplace=True)
no_trf = transform.Normalize(mean = noisy[0], std = noisy[1], inplace=True)
## for transfer learning we have to normalize new images to old means and stds
## we just directly input these numbers here
# pristine = [[0.0375126, 0.03326255, 0.06786563],[1.30893517, 1.02839041, 1.12307501]]
# noisy = [[0.03749673, 0.03331003, 0.06772753],[1.37418461, 1.16330922, 1.19831419]]
# pr_trf = transform.Normalize(mean = pristine[0], std = pristine[1], inplace=True)
# no_trf = transform.Normalize(mean = noisy[0], std = noisy[1], inplace=True)
for i in range(0, len(t1)-1):
pr_trf(t1[i])
for i in range(0, len(t2)-1):
no_trf(t2[i])
"""
Distant galaxies z=2:
pristine: [[0.0375126 0.03326255 0.06786563]
[1.30893517 1.02839041 1.12307501]] mean
noisy: [[0.03749673 0.03331003 0.06772753]
[1.37418461 1.16330922 1.19831419]] std
Nearby galaxies z=0 (0.25 time window dusty images) and SDSS (postmergers):
pristine: [[0.60699224 0.63178635 0.56252038]
[1.8455255 1.84083951 1.4652102 ]] mean
noisy: [[25.81263733 21.12583733 14.25884247]
[35.08432007 29.94885445 19.8165493 ]] std
"""
| 32.883333
| 89
| 0.687785
|
58876ceeae5c564b6b8aded4c396e99e0808d225
| 426
|
py
|
Python
|
python_trick/permutation_lib_largest_no.py
|
sandeepjindal/algos
|
e649c82bfb6b986c8462b09d28e07c2069e48792
|
[
"MIT"
] | 1
|
2019-09-10T17:45:58.000Z
|
2019-09-10T17:45:58.000Z
|
python_trick/permutation_lib_largest_no.py
|
sandeepjindal/algos
|
e649c82bfb6b986c8462b09d28e07c2069e48792
|
[
"MIT"
] | null | null | null |
python_trick/permutation_lib_largest_no.py
|
sandeepjindal/algos
|
e649c82bfb6b986c8462b09d28e07c2069e48792
|
[
"MIT"
] | null | null | null |
# Python3 implementation this is to use itertools.
# permutations as coded below:
from itertools import permutations
def largest(l):
lst = []
for i in permutations(l, len(l)):
# provides all permutations of the list values,
# store them in list to find max
lst.append("".join(map(str,i)))
return max(lst)
print(largest([54, 546, 548, 60])) #Output 6054854654
# This code is contributed by Raman Monga
| 25.058824
| 54
| 0.699531
|
68a03bb405a668c4b5ac5bead02c1e7f4aa7e571
| 2,951
|
py
|
Python
|
python/examples/example5B.py
|
JacekPierzchlewski/cosniwa
|
d7b86dd452ab3df06f42ac205c2d1bcfbae2f288
|
[
"BSD-2-Clause"
] | 2
|
2017-03-18T12:54:13.000Z
|
2017-05-02T09:51:17.000Z
|
python/examples/example5B.py
|
JacekPierzchlewski/cosniwa
|
d7b86dd452ab3df06f42ac205c2d1bcfbae2f288
|
[
"BSD-2-Clause"
] | null | null | null |
python/examples/example5B.py
|
JacekPierzchlewski/cosniwa
|
d7b86dd452ab3df06f42ac205c2d1bcfbae2f288
|
[
"BSD-2-Clause"
] | null | null | null |
#
# example5B.py [version 1.0]
# CoSniWa: COde SNIppet stopWAtch [Python port] - example 5B
#
# Example5A: Using Cosniwa with classes (versionB)
#
# read more on: www.speedupcode.com
#
# (c) Jacek Pierzchlewski, 2017 jacek@pierzchlewski.com
# license: BSD-2-Clause.
#
try:
import cCosniwa as csw
except ImportError:
print("\nERROR: cCosniwa was not found! \n")
class Fibonacci():
"""
'Fibonacci': Class which computes n-th element
of the Fibonacci sequence.
"""
def __init__(self, iIndex_, Cosniwa_):
"""
INITIALISATION METHOD.
Arguments:
iIndex_: [int] Index of a class
Cosniwa_: [Cosniwa handle] Handle to Cosniwa object
"""
# Store the index of the class and the handle to Cosniwa
self.iIndex = iIndex_
self.hCsw = Cosniwa_
# Create a class name
self.strName = 'Fibonacci'
# Register the class in Cosniwa
self.iCswReg = csw.xreg_code(self.hCsw, self.strName)
def run(self, iNEl):
"""
run: COMPUTE THE n-TH ELEMENT OF THE FIBONACCI SEQUENCE.
Function computes the n-th element of the Fibonacci sequence by
iterating through all the sequence until n-th elements.
It is preassumed that the 1st (index 0) and the 2nd (index 1)
elements of the sequence equal 1.
Arguments:
iNLen: [int] Index of the element to be computed
Returns:
iFibo: [int] n-th element of the Fibonacci sequence
"""
iFibo = 1
iFiboPrev = 1
iFiboPrevPrev = 1
# Start the Cosniwa stopwatch
csw.xcall_start(self.hCsw, self.iCswReg)
# 1st and 2nd element equals 1
if (iNEl < 2):
return 1
# Loop unitl n-th element
for inxFib in range(iNEl - 2):
iFiboPrevPrev = iFiboPrev
iFiboPrev = iFibo
iFibo = iFiboPrev + iFiboPrevPrev
# Stop the Cosniwa stopwatch
csw.xcall_stop(self.hCsw, self.iCswReg)
return iFibo
def main():
# Get handle to Cosniwa module
hCsw = csw.get_handle()
# Start the main CoSniWa time
csw.start()
# Generate 10 Fibonacci classes
f0 = Fibonacci(0, hCsw)
f1 = Fibonacci(1, hCsw)
f2 = Fibonacci(2, hCsw)
f3 = Fibonacci(3, hCsw)
f4 = Fibonacci(4, hCsw)
f5 = Fibonacci(5, hCsw)
f6 = Fibonacci(6, hCsw)
f7 = Fibonacci(7, hCsw)
f8 = Fibonacci(8, hCsw)
f9 = Fibonacci(9, hCsw)
# Compute Fibonacci sequences with different number of elements
f0.run(100000)
f1.run(90000)
f2.run(80000)
f3.run(70000)
f4.run(60000)
f5.run(50000)
f6.run(40000)
f7.run(30000)
f8.run(20000)
f9.run(1000)
# Stop the main CoSniWa time
csw.stop()
# Print out the timing results
csw.resultc()
if __name__ == '__main__':
main()
| 23.798387
| 72
| 0.596747
|
2141502c58faffe459b58c153fba8d8d2419825b
| 1,491
|
py
|
Python
|
Scapy Gui/Backend_Process.py
|
Ash-Shaun/PacketCrafter
|
f30d1ac664fdde1edd001e53cb8d33ac4207d6b7
|
[
"MIT"
] | null | null | null |
Scapy Gui/Backend_Process.py
|
Ash-Shaun/PacketCrafter
|
f30d1ac664fdde1edd001e53cb8d33ac4207d6b7
|
[
"MIT"
] | null | null | null |
Scapy Gui/Backend_Process.py
|
Ash-Shaun/PacketCrafter
|
f30d1ac664fdde1edd001e53cb8d33ac4207d6b7
|
[
"MIT"
] | null | null | null |
import netifaces
from scapy.all import *
from temp import text
IP_addr = ['172.18.0.1', '172.19.0.1', '172.17.0.1', '192.168.38.138', '127.0.0.1']
MAC = ['00:0c:29:79:14:67', '00:00:00:00:00:00', '02:42:2b:9e:1a:6d', '02:42:11:16:25:ea', 'ff:ff:ff:ff:ff:ff']
def getinterfaces():
iface = netifaces.interfaces()
return iface
def arp_send(ifc,IP,a):
if a is False:
success, fail = srp(Ether(dst = "ff:ff:ff:ff:ff:ff")/ARP(pdst = IP), timeout = 2, iface = ifc, inter = 0.1)
else:
success, fail = srp(Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=IP, op = 2), timeout=2, iface=ifc, inter=0.1)
return success
def ip_header(ethsrc,ethdst,ipsrc,ipdst,ifc):
a = Ether(src = ethsrc, dst = ethdst)/IP(src = ipsrc, dst = ipdst)
success, fail = srp(a, iface= ifc, timeout= 2)
return success
def eth(source,dest,ifc):
success, fail= srp(Ether(src = source, dst= dest),iface= ifc, timeout=2)
return success
def transport_layer(protocol, portsrc, portdst, flag, payload, ipsrc, ipdes, ifc):
if protocol == "ICMP":
text = 'success, fail = srp(IP(src = "{}", dst = "{}")/ICMP(), timeout = 2, iface = "{}")'.format(ipsrc, ipdes, ifc)
elif protocol == "TCP":
text = 'success, fail = srp(IP(src = "{}", dst = "{}")/TCP(sport = {}, dport = {}, flags = "{}"), timeout = 2, iface = "{}")'.format(ipsrc,ipdes,portsrc,portdst,flag,iface)
elif protocol == "UDP":
text = 'success, fail = srp(IP()/UDP())'
return text
| 39.236842
| 180
| 0.599598
|
633e9e9dbafc775d697aa58bbe66d3cdf0942301
| 853
|
py
|
Python
|
Python/11.py
|
GeneralLi95/leetcode
|
f42392f2283e19ec76273d81b2912944f9039568
|
[
"MIT"
] | null | null | null |
Python/11.py
|
GeneralLi95/leetcode
|
f42392f2283e19ec76273d81b2912944f9039568
|
[
"MIT"
] | null | null | null |
Python/11.py
|
GeneralLi95/leetcode
|
f42392f2283e19ec76273d81b2912944f9039568
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# @Date : 2022/2/4
# @Filename : 11.py 盛最多水的容器
# @Tag : 贪心,双指针,数组
# @Autor : LI YAO
# @Difficulty : Medium
from heapq import *
from typing import List, Optional
from collections import defaultdict, deque, Counter
from itertools import product,combinations,permutations,accumulate
from random import choice, randrange,randint
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# -------------------------
class Solution:
def maxArea(self, height: List[int]) -> int:
l = 0
r = len(height) - 1
contain = 0
while l < r:
contain = max(contain, (r-l) * min(height[l], height[r]))
if height[l] <= height[r]:
l += 1
else:
r -= 1
return contain
# -------------------------
a = Solution()
b = [1,8,6,2,5,4,8,3,7]
print(a.maxArea(b))
| 21.871795
| 66
| 0.592028
|
cceac61f392a8c5fe59df2dc784746d80afdb17a
| 47,876
|
py
|
Python
|
pandas/tseries/period.py
|
vemel/pandas
|
1508491df35039ba2ca8b5c8ceecff28464c3bfe
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2021-02-16T10:01:48.000Z
|
2021-02-16T10:01:48.000Z
|
pandas/tseries/period.py
|
vemel/pandas
|
1508491df35039ba2ca8b5c8ceecff28464c3bfe
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/tseries/period.py
|
vemel/pandas
|
1508491df35039ba2ca8b5c8ceecff28464c3bfe
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date
import numpy as np
from pandas.core.base import PandasObject
from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.core.base import DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object)
from pandas import compat
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
from pandas.compat import map, zip, u
#---------------
# Period logic
def _period_field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field(alias, self.ordinal, base)
f.__name__ = name
return property(f)
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field_arr(alias, self.values, base)
f.__name__ = name
return property(f)
class Period(PandasObject):
"""
Represents an period of time
Parameters
----------
value : Period or compat.string_types, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday. Must be a singular rule-code (e.g. 5T is not
allowed).
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
__slots__ = ['freq', 'ordinal']
_comparables = ['name','freqstr']
def __init__(self, value=None, freq=None, ordinal=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if ordinal is not None and value is not None:
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
self.ordinal = ordinal
elif value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
self.ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif com._is_null_datelike_scalar(value) or value in tslib._nat_strings:
self.ordinal = tslib.iNaT
if freq is None:
raise ValueError("If value is NaT, freq cannot be None "
"because it cannot be inferred")
elif isinstance(value, compat.string_types) or com.is_integer(value):
if com.is_integer(value):
value = str(value)
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
if freq is None:
try:
freq = _freq_mod.Resolution.get_freq(reso)
except KeyError:
raise ValueError("Invalid frequency or could not infer: %s" % reso)
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, date):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if mult != 1:
# TODO: Better error message - this is slightly confusing
raise ValueError('Only mult == 1 supported')
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.microsecond, 0,
base)
self.freq = _freq_mod._get_freq_str(base)
def __eq__(self, other):
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot compare non-conforming periods")
if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT:
return False
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.ordinal, self.freq))
def __add__(self, other):
if com.is_integer(other):
if self.ordinal == tslib.iNaT:
ordinal = self.ordinal
else:
ordinal = self.ordinal + other
return Period(ordinal=ordinal, freq=self.freq)
else: # pragma: no cover
return NotImplemented
def __sub__(self, other):
if com.is_integer(other):
if self.ordinal == tslib.iNaT:
ordinal = self.ordinal
else:
ordinal = self.ordinal - other
return Period(ordinal=ordinal, freq=self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT:
return Period(ordinal=tslib.iNaT, freq=self.freq)
return self.ordinal - other.ordinal
else: # pragma: no cover
return NotImplemented
def _comp_method(func, name):
def f(self, other):
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot compare non-conforming periods")
if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT:
return False
return func(self.ordinal, other.ordinal)
else:
raise TypeError(other)
f.__name__ = name
return f
__lt__ = _comp_method(operator.lt, '__lt__')
__le__ = _comp_method(operator.le, '__le__')
__gt__ = _comp_method(operator.gt, '__gt__')
__ge__ = _comp_method(operator.ge, '__ge__')
def asfreq(self, freq, how='E'):
"""
Convert Period to desired frequency, either at the start or end of the
interval
Parameters
----------
freq : string
how : {'E', 'S', 'end', 'start'}, default 'end'
Start or end of the timespan
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_ordinal = tslib.period_asfreq(self.ordinal, base1, base2, end)
return Period(ordinal=new_ordinal, freq=base2)
@property
def start_time(self):
return self.to_timestamp(how='S')
@property
def end_time(self):
if self.ordinal == tslib.iNaT:
ordinal = self.ordinal
else:
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
def to_timestamp(self, freq=None, how='start', tz=None):
"""
Return the Timestamp representation of the Period at the target
frequency at the specified end (how) of the Period
Parameters
----------
freq : string or DateOffset, default is 'D' if self.freq is week or
longer and 'S' otherwise
Target frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = _freq_mod.get_to_timestamp_base(base)
base, mult = _gfc(freq)
val = self.asfreq(freq, how)
dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64, tz=tz)
year = _period_field_accessor('year', 0)
month = _period_field_accessor('month', 3)
day = _period_field_accessor('day', 4)
hour = _period_field_accessor('hour', 5)
minute = _period_field_accessor('minute', 6)
second = _period_field_accessor('second', 7)
weekofyear = _period_field_accessor('week', 8)
week = weekofyear
dayofweek = _period_field_accessor('dayofweek', 10)
weekday = dayofweek
dayofyear = _period_field_accessor('dayofyear', 9)
quarter = _period_field_accessor('quarter', 2)
qyear = _period_field_accessor('qyear', 1)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = tslib.period_format(self.ordinal, base)
freqstr = _freq_mod._reverse_period_code_map[base]
if not compat.PY3:
encoding = com.get_option("display.encoding")
formatted = formatted.encode(encoding)
return "Period('%s', '%s')" % (formatted, freqstr)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
base, mult = _gfc(self.freq)
formatted = tslib.period_format(self.ordinal, base)
value = ("%s" % formatted)
return value
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
return tslib.period_format(self.ordinal, base, fmt)
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return tslib.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return tslib.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = getattr(self.values, opname)(other.values)
mask = (com.mask_missing(self.values, tslib.iNaT) |
com.mask_missing(other.values, tslib.iNaT))
if mask.any():
result[mask] = nat_result
return result
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
if other.ordinal == tslib.iNaT:
result.fill(nat_result)
mask = self.values == tslib.iNaT
if mask.any():
result[mask] = nat_result
return result
return wrapper
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int or array, default None
month : int or array, default None
quarter : int or array, default None
day : int or array, default None
hour : int or array, default None
minute : int or array, default None
second : int or array, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_allow_period_index_ops = True
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, year=None, month=None,
quarter=None, day=None, hour=None, minute=None, second=None,
tz=None):
freq = _freq_mod.get_standard_freq(freq)
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
fields = [year, month, quarter, day, hour, minute, second]
data, freq = cls._generate_range(start, end, periods,
freq, fields)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = com._count_not_none(*fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
y, mth, q, d, h, minute, s = fields
subarr, freq = _range_from_fields(year=y, month=mth, quarter=q,
day=d, hour=h, minute=minute,
second=s, freq=freq)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, np.ndarray):
if np.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = tslib.period_asfreq_arr(data.values, base1,
base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name, freq=None, **kwargs):
result = values.view(cls)
result.name = name
result.freq = freq
return result
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
@property
def _box_func(self):
return lambda x: Period(ordinal=x, freq=self.freq)
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
def __iter__(self):
for val in self.values:
yield Period(ordinal=val, freq=self.freq)
def searchsorted(self, key, side='left'):
if isinstance(key, compat.string_types):
key = Period(key, freq=self.freq).ordinal
return self.values.searchsorted(key, side=side)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
@property
def freqstr(self):
return self.freq
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
freq = _freq_mod.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_data = tslib.period_asfreq_arr(self.values, base1, base2, end)
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
_year = _field_accessor('year', 0)
_month = _field_accessor('month', 3)
_day = _field_accessor('day', 4)
_hour = _field_accessor('hour', 5)
_minute = _field_accessor('minute', 6)
_second = _field_accessor('second', 7)
_weekofyear = _field_accessor('week', 8)
_week = _weekofyear
_dayofweek = _field_accessor('dayofweek', 10)
_weekday = _dayofweek
_dayofyear = day_of_year = _field_accessor('dayofyear', 9)
_quarter = _field_accessor('quarter', 2)
_qyear = _field_accessor('qyear', 1)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
if not isinstance(result, np.ndarray):
raise TypeError
return result
except Exception:
return _algos.arrmap_object(self.asobject, f)
def _get_object_array(self):
freq = self.freq
boxfunc = lambda x: Period(ordinal=x, freq=freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(self.values)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
return np.array_equal(self.asi8, other.asi8)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = _freq_mod.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = tslib.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
freq : freq string
Returns
-------
shifted : PeriodIndex
"""
mask = self.values == tslib.iNaT
values = self.values + n
values[mask] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
def __add__(self, other):
try:
return self.shift(other)
except TypeError:
# self.values + other raises TypeError for invalid input
return NotImplemented
def __sub__(self, other):
try:
return self.shift(-other)
except TypeError:
return NotImplemented
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = _freq_mod._infer_period_group(reso)
freqn = _freq_mod._period_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
key = Period(key, self.freq)
try:
return self._engine.get_loc(key.ordinal)
except KeyError:
raise KeyError(key)
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, compat.string_types) or isinstance(end, compat.string_types):
try:
if start:
start_loc = self._get_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._get_string_slice(end).stop
else:
end_loc = len(self)
return start_loc, end_loc
except KeyError:
pass
if isinstance(start, datetime) and isinstance(end, datetime):
ordinals = self.values
t1 = Period(start, freq=self.freq)
t2 = Period(end, freq=self.freq)
left = ordinals.searchsorted(t1.ordinal, side='left')
right = ordinals.searchsorted(t2.ordinal, side='right')
return left, right
return Int64Index.slice_locs(self, start, end)
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = _freq_mod._infer_period_group(reso)
freqn = _freq_mod._period_group(self.freq)
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day' and grp < freqn:
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour' and grp < freqn:
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute' and grp < freqn:
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second' and grp < freqn:
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second,
freq='S')
else:
raise KeyError(key)
ordinals = self.values
t2 = t1.asfreq(self.freq, how='end')
t1 = t1.asfreq(self.freq, how='start')
left = ordinals.searchsorted(t1.ordinal, side='left')
right = ordinals.searchsorted(t2.ordinal, side='right')
return slice(left, right)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
raise ValueError('Only like-indexed PeriodIndexes compatible '
'for join (for now)')
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = rawarr.view(PeriodIndex)
rawarr.freq = self.freq
return rawarr
def __getitem__(self, key):
"""Override numpy.ndarray's __getitem__ method to work as desired"""
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return Period(ordinal=val, freq=self.freq)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
result = arr_idx[key]
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_with_header(self, header, **kwargs):
return header + self._format_native_types(**kwargs)
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([u('%s') % dt for dt in values[imask]])
return values.tolist()
def __array_finalize__(self, obj):
if not self.ndim: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def _format_footer(self):
tagline = 'Length: %d, Freq: %s'
return tagline % (len(self), self.freqstr)
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return self._simple_new(taken, self.name, freq=self.freq)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = (self.name, self.freq)
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
self.name = own_state[0]
try: # backcompat
self.freq = own_state[1]
except:
pass
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def _get_ordinal_range(start, end, periods, freq):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if ((is_start_per and start.ordinal == tslib.iNaT) or
(is_end_per and end.ordinal == tslib.iNaT)):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if base != FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = tslib.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(tslib.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list))
else np.repeat(x, length) for x in fields]
return arrays
def _ordinal_from_fields(year, month, quarter, day, hour, minute,
second, freq):
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if quarter is not None:
year, month = _quarter_to_myear(year, quarter, freq)
return tslib.period_ordinal(year, month, day, hour, minute, second, 0, 0, base)
def _quarter_to_myear(year, quarter, freq):
if quarter is not None:
if quarter <= 0 or quarter > 4:
raise ValueError('Quarter must be 1 <= q <= 4')
mnum = _month_numbers[_freq_mod._get_rule_month(freq)] + 1
month = (mnum + (quarter - 1) * 3) % 12 + 1
if month > mnum:
year -= 1
return year, month
def _validate_end_alias(how):
how_dict = {'S': 'S', 'E': 'E',
'START': 'S', 'FINISH': 'E',
'BEGIN': 'S', 'END': 'E'}
how = how_dict.get(str(how).upper())
if how not in set(['S', 'E']):
raise ValueError('How must be one of S or E')
return how
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start :
end :
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
| 35.781764
| 97
| 0.520741
|
e4efaaf075ac4b46c7729189645ae2dfba5ce561
| 3,252
|
py
|
Python
|
apps/users/tests/test_views.py
|
sgazda94/dj_schulx
|
c6d2086201b80bb007df34be6b623e3001bf7446
|
[
"MIT"
] | null | null | null |
apps/users/tests/test_views.py
|
sgazda94/dj_schulx
|
c6d2086201b80bb007df34be6b623e3001bf7446
|
[
"MIT"
] | 1
|
2022-03-28T22:19:01.000Z
|
2022-03-28T22:19:01.000Z
|
apps/users/tests/test_views.py
|
sgazda94/dj_schulx
|
c6d2086201b80bb007df34be6b623e3001bf7446
|
[
"MIT"
] | null | null | null |
import pytest
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.http import HttpRequest, HttpResponseRedirect
from django.test import RequestFactory
from django.urls import reverse
from apps.users.forms import UserAdminChangeForm
from apps.users.models import User
from apps.users.tests.factories import UserFactory
from apps.users.views import UserRedirectView, UserUpdateView, user_detail_view
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def dummy_get_response(self, request: HttpRequest):
return None
def test_get_success_url(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
def test_form_valid(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
# Add the session/message middleware to the request
SessionMiddleware(self.dummy_get_response).process_request(request)
MessageMiddleware(self.dummy_get_response).process_request(request)
request.user = user
view.request = request
# Initialize the form
form = UserAdminChangeForm()
form.cleaned_data = []
view.form_valid(form)
messages_sent = [m.message for m in messages.get_messages(request)]
assert messages_sent == ["Information successfully updated"]
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, rf: RequestFactory):
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
class TestUserDetailView:
def test_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory()
response = user_detail_view(request, username=user.username)
assert response.status_code == 200
def test_not_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = AnonymousUser()
response = user_detail_view(request, username=user.username)
login_url = reverse(settings.LOGIN_URL)
assert isinstance(response, HttpResponseRedirect)
assert response.status_code == 302
assert response.url == f"{login_url}?next=/fake-url/"
| 32.848485
| 79
| 0.697417
|
24a67a56a2df6eb47bd8d9741e269c83e4f0035e
| 7,187
|
py
|
Python
|
airflow/providers/amazon/aws/transfers/redshift_to_s3.py
|
aliotta/airflow
|
bca097703233536b4d8e55cd500e9be14596fb7f
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/amazon/aws/transfers/redshift_to_s3.py
|
aliotta/airflow
|
bca097703233536b4d8e55cd500e9be14596fb7f
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/amazon/aws/transfers/redshift_to_s3.py
|
aliotta/airflow
|
bca097703233536b4d8e55cd500e9be14596fb7f
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transfers data from AWS Redshift into a S3 Bucket."""
from typing import TYPE_CHECKING, Iterable, List, Mapping, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.redshift_sql import RedshiftSQLHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.utils.redshift import build_credentials_block
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedshiftToS3Operator(BaseOperator):
"""
Executes an UNLOAD command to s3 as a CSV with headers
:param s3_bucket: reference to a specific S3 bucket
:type s3_bucket: str
:param s3_key: reference to a specific S3 key. If ``table_as_file_name`` is set
to False, this param must include the desired file name
:type s3_key: str
:param schema: reference to a specific schema in redshift database
Applicable when ``table`` param provided.
:type schema: str
:param table: reference to a specific table in redshift database
Used when ``select_query`` param not provided.
:type table: str
:param select_query: custom select query to fetch data from redshift database
:type select_query: str
:param redshift_conn_id: reference to a specific redshift database
:type redshift_conn_id: str
:param aws_conn_id: reference to a specific S3 connection
If the AWS connection contains 'aws_iam_role' in ``extras``
the operator will use AWS STS credentials with a token
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param unload_options: reference to a list of UNLOAD options
:type unload_options: list
:param autocommit: If set to True it will automatically commit the UNLOAD statement.
Otherwise it will be committed right before the redshift connection gets closed.
:type autocommit: bool
:param include_header: If set to True the s3 file contains the header columns.
:type include_header: bool
:param parameters: (optional) the parameters to render the SQL query with.
:type parameters: dict or iterable
:param table_as_file_name: If set to True, the s3 file will be named as the table.
Applicable when ``table`` param provided.
:type table_as_file_name: bool
"""
template_fields: Sequence[str] = (
's3_bucket',
's3_key',
'schema',
'table',
'unload_options',
'select_query',
)
template_ext = ('.sql',)
template_fields_renderers = {'select_query': 'sql'}
ui_color = '#ededed'
def __init__(
self,
*,
s3_bucket: str,
s3_key: str,
schema: Optional[str] = None,
table: Optional[str] = None,
select_query: Optional[str] = None,
redshift_conn_id: str = 'redshift_default',
aws_conn_id: str = 'aws_default',
verify: Optional[Union[bool, str]] = None,
unload_options: Optional[List] = None,
autocommit: bool = False,
include_header: bool = False,
parameters: Optional[Union[Mapping, Iterable]] = None,
table_as_file_name: bool = True, # Set to True by default for not breaking current workflows
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = f'{s3_key}/{table}_' if (table and table_as_file_name) else s3_key
self.schema = schema
self.table = table
self.redshift_conn_id = redshift_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.unload_options = unload_options or [] # type: List
self.autocommit = autocommit
self.include_header = include_header
self.parameters = parameters
self.table_as_file_name = table_as_file_name
if select_query:
self.select_query = select_query
elif self.schema and self.table:
self.select_query = f"SELECT * FROM {self.schema}.{self.table}"
else:
raise ValueError(
'Please provide both `schema` and `table` params or `select_query` to fetch the data.'
)
if self.include_header and 'HEADER' not in [uo.upper().strip() for uo in self.unload_options]:
self.unload_options = list(self.unload_options) + [
'HEADER',
]
def _build_unload_query(
self, credentials_block: str, select_query: str, s3_key: str, unload_options: str
) -> str:
return f"""
UNLOAD ('{select_query}')
TO 's3://{self.s3_bucket}/{s3_key}'
credentials
'{credentials_block}'
{unload_options};
"""
def execute(self, context: 'Context') -> None:
redshift_hook = RedshiftSQLHook(redshift_conn_id=self.redshift_conn_id)
conn = S3Hook.get_connection(conn_id=self.aws_conn_id)
credentials_block = None
if conn.extra_dejson.get('role_arn', False):
credentials_block = f"aws_iam_role={conn.extra_dejson['role_arn']}"
else:
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
credentials = s3_hook.get_credentials()
credentials_block = build_credentials_block(credentials)
unload_options = '\n\t\t\t'.join(self.unload_options)
unload_query = self._build_unload_query(
credentials_block, self.select_query, self.s3_key, unload_options
)
self.log.info('Executing UNLOAD command...')
redshift_hook.run(unload_query, self.autocommit, parameters=self.parameters)
self.log.info("UNLOAD command complete...")
| 42.526627
| 106
| 0.671212
|
b7bf5e39b22b9b1e49adfc0ed55a88092acb609b
| 806
|
py
|
Python
|
LeetCode/1-1000/101-200/101-125/108. Convert Sorted Array to Binary Search Tree/solution-python.py
|
adubois85/coding_challenge_websites
|
7867a05847a216661eff3b24b1cb1480fb7d3030
|
[
"Apache-2.0"
] | null | null | null |
LeetCode/1-1000/101-200/101-125/108. Convert Sorted Array to Binary Search Tree/solution-python.py
|
adubois85/coding_challenge_websites
|
7867a05847a216661eff3b24b1cb1480fb7d3030
|
[
"Apache-2.0"
] | null | null | null |
LeetCode/1-1000/101-200/101-125/108. Convert Sorted Array to Binary Search Tree/solution-python.py
|
adubois85/coding_challenge_websites
|
7867a05847a216661eff3b24b1cb1480fb7d3030
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# [1, 2, 3, 4, 5, 6, 7, 8, 9]
# 5
# 3 7
# 2 4 6 8
# 1 9
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
end = len(nums)
mid = end//2
temp = TreeNode()
# go left
if 0 < mid < end:
temp.left = self.sortedArrayToBST(nums[0:mid])
# go right
if mid < (mid + end)//2 < end:
temp.right = self.sortedArrayToBST(nums[(mid + 1):end])
temp.val = nums[mid]
return temp
| 25.1875
| 67
| 0.485112
|
4253bbb40238e3f83ff6591d12a095d080034d2d
| 12,572
|
py
|
Python
|
python/ccxt/async_support/base/exchange.py
|
bitkub/ccxt
|
6bc708ba3d653c1f74fb7b72c705cb370dc6c498
|
[
"MIT"
] | 4
|
2021-01-10T09:14:17.000Z
|
2022-02-15T19:09:52.000Z
|
python/ccxt/async_support/base/exchange.py
|
bitkub/ccxt
|
6bc708ba3d653c1f74fb7b72c705cb370dc6c498
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/base/exchange.py
|
bitkub/ccxt
|
6bc708ba3d653c1f74fb7b72c705cb370dc6c498
|
[
"MIT"
] | 4
|
2021-06-02T16:40:35.000Z
|
2022-03-14T04:50:31.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.28.78'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle(self.rateLimit)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| 41.906667
| 355
| 0.61287
|
637521a9f2c0bdad2a85184ebecf8113f817fd33
| 342
|
py
|
Python
|
108-Convert-Sorted-Array-to-Binary-Search-Tree/solution.py
|
alfmunny/leetcode
|
e35d2164c7e6e66410309fe1667ceab5a7689bef
|
[
"MIT"
] | null | null | null |
108-Convert-Sorted-Array-to-Binary-Search-Tree/solution.py
|
alfmunny/leetcode
|
e35d2164c7e6e66410309fe1667ceab5a7689bef
|
[
"MIT"
] | null | null | null |
108-Convert-Sorted-Array-to-Binary-Search-Tree/solution.py
|
alfmunny/leetcode
|
e35d2164c7e6e66410309fe1667ceab5a7689bef
|
[
"MIT"
] | null | null | null |
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
mid = len(nums)//2
root = TreeNode(nums[mid])
root.left = self.sortedArrayToBST(nums[:mid])
root.right = self.sortedArrayToBST(nums[mid+1:])
return root
| 26.307692
| 60
| 0.54386
|
c46392868f58df9cdb2a918ee9de2cd8368e30b2
| 28
|
py
|
Python
|
sportsstats/tests/config.example.py
|
badfeatures/sports-stats
|
1bd953d2245008774943056c2e56e44d6dedd107
|
[
"MIT"
] | null | null | null |
sportsstats/tests/config.example.py
|
badfeatures/sports-stats
|
1bd953d2245008774943056c2e56e44d6dedd107
|
[
"MIT"
] | null | null | null |
sportsstats/tests/config.example.py
|
badfeatures/sports-stats
|
1bd953d2245008774943056c2e56e44d6dedd107
|
[
"MIT"
] | null | null | null |
app_key = ''
app_secret = ''
| 14
| 15
| 0.607143
|
f1b68a28c1926b02754a56cdfdb630eae271d1c2
| 6,861
|
py
|
Python
|
src/sagemaker/pytorch/model.py
|
matthewfollegot/sagemaker-python-sdk
|
5182be3c147e8d765208a3548a55df99e3013748
|
[
"Apache-2.0"
] | 1
|
2019-12-28T00:47:41.000Z
|
2019-12-28T00:47:41.000Z
|
src/sagemaker/pytorch/model.py
|
matthewfollegot/sagemaker-python-sdk
|
5182be3c147e8d765208a3548a55df99e3013748
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/pytorch/model.py
|
matthewfollegot/sagemaker-python-sdk
|
5182be3c147e8d765208a3548a55df99e3013748
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import logging
import pkg_resources
import sagemaker
from sagemaker.fw_utils import create_image_uri, model_code_key_prefix, python_deprecation_warning
from sagemaker.model import FrameworkModel, MODEL_SERVER_WORKERS_PARAM_NAME
from sagemaker.pytorch.defaults import PYTORCH_VERSION, PYTHON_VERSION
from sagemaker.predictor import RealTimePredictor, npy_serializer, numpy_deserializer
logger = logging.getLogger("sagemaker")
class PyTorchPredictor(RealTimePredictor):
"""A RealTimePredictor for inference against PyTorch Endpoints.
This is able to serialize Python lists, dictionaries, and numpy arrays to
multidimensional tensors for PyTorch inference.
"""
def __init__(self, endpoint_name, sagemaker_session=None):
"""Initialize an ``PyTorchPredictor``.
Args:
endpoint_name (str): The name of the endpoint to perform inference
on.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
"""
super(PyTorchPredictor, self).__init__(
endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer
)
class PyTorchModel(FrameworkModel):
"""An PyTorch SageMaker ``Model`` that can be deployed to a SageMaker
``Endpoint``.
"""
__framework_name__ = "pytorch"
_LOWEST_MMS_VERSION = "1.2"
def __init__(
self,
model_data,
role,
entry_point,
image=None,
py_version=PYTHON_VERSION,
framework_version=PYTORCH_VERSION,
predictor_cls=PyTorchPredictor,
model_server_workers=None,
**kwargs
):
"""Initialize an PyTorchModel.
Args:
model_data (str): The S3 location of a SageMaker model data
``.tar.gz`` file.
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
entry_point (str): Path (absolute or relative) to the Python source
file which should be executed as the entry point to model
hosting. This should be compatible with either Python 2.7 or
Python 3.5.
image (str): A Docker image URI (default: None). If not specified, a
default image for PyTorch will be used.
py_version (str): Python version you want to use for executing your
model training code (default: 'py3').
framework_version (str): PyTorch version you want to use for
executing your model training code.
predictor_cls (callable[str, sagemaker.session.Session]): A function
to call to create a predictor with an endpoint name and
SageMaker ``Session``. If specified, ``deploy()`` returns the
result of invoking this function on the created endpoint name.
model_server_workers (int): Optional. The number of worker processes
used by the inference server. If None, server will use one
worker per vCPU.
**kwargs: Keyword arguments passed to the ``FrameworkModel``
initializer.
.. tip::
You can find additional parameters for initializing this class at
:class:`~sagemaker.model.FrameworkModel` and
:class:`~sagemaker.model.Model`.
"""
super(PyTorchModel, self).__init__(
model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs
)
if py_version == "py2":
logger.warning(python_deprecation_warning(self.__framework_name__))
self.py_version = py_version
self.framework_version = framework_version
self.model_server_workers = model_server_workers
def prepare_container_def(self, instance_type, accelerator_type=None):
"""Return a container definition with framework configuration set in
model environment variables.
Args:
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
accelerator_type (str): The Elastic Inference accelerator type to
deploy to the instance for loading and making inferences to the
model. For example, 'ml.eia1.medium'.
Returns:
dict[str, str]: A container definition object usable with the
CreateModel API.
"""
lowest_mms_version = pkg_resources.parse_version(self._LOWEST_MMS_VERSION)
framework_version = pkg_resources.parse_version(self.framework_version)
is_mms_version = framework_version >= lowest_mms_version
deploy_image = self.image
if not deploy_image:
region_name = self.sagemaker_session.boto_session.region_name
framework_name = self.__framework_name__
if is_mms_version:
framework_name += "-serving"
deploy_image = create_image_uri(
region_name,
framework_name,
instance_type,
self.framework_version,
self.py_version,
accelerator_type=accelerator_type,
)
deploy_key_prefix = model_code_key_prefix(self.key_prefix, self.name, deploy_image)
self._upload_code(deploy_key_prefix, repack=is_mms_version)
deploy_env = dict(self.env)
deploy_env.update(self._framework_env_vars())
if self.model_server_workers:
deploy_env[MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(self.model_server_workers)
return sagemaker.container_def(
deploy_image, self.repacked_model_data or self.model_data, deploy_env
)
| 42.092025
| 98
| 0.663752
|
739e6f943ea181c77e32bbe9c3200d8194424ecc
| 2,760
|
py
|
Python
|
tempest/api/network/base_routers.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/network/base_routers.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/network/base_routers.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | 1
|
2020-07-21T02:18:23.000Z
|
2020-07-21T02:18:23.000Z
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
class BaseRouterTest(base.BaseAdminNetworkTest):
# NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
# as some router operations, such as enabling or disabling SNAT
# require admin credentials by default
def _cleanup_router(self, router):
self.delete_router(router)
self.routers.remove(router)
def _create_router(self, name, admin_state_up=False,
external_network_id=None, enable_snat=None):
# associate a cleanup with created routers to avoid quota limits
router = self.create_router(name, admin_state_up,
external_network_id, enable_snat)
self.addCleanup(self._cleanup_router, router)
return router
def _delete_router(self, router_id, network_client=None):
client = network_client or self.client
client.delete_router(router_id)
# Asserting that the router is not found in the list
# after deletion
list_body = self.client.list_routers()
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertNotIn(router_id, routers_list)
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
interface = self.client.add_router_interface_with_subnet_id(
router_id, subnet_id)
self.addCleanup(self._remove_router_interface_with_subnet_id,
router_id, subnet_id)
self.assertEqual(subnet_id, interface['subnet_id'])
return interface
def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
body = self.client.remove_router_interface_with_subnet_id(
router_id, subnet_id)
self.assertEqual(subnet_id, body['subnet_id'])
def _remove_router_interface_with_port_id(self, router_id, port_id):
body = self.client.remove_router_interface_with_port_id(router_id,
port_id)
self.assertEqual(port_id, body['port_id'])
| 43.125
| 78
| 0.68913
|
8463211945c4f1e45cd17dcc468233d447dee07b
| 1,155
|
py
|
Python
|
scripts/get_countour.py
|
jorenver/proyecto_investigacion_postes
|
7c483a0bad7359a890da8def566b301cdef584e4
|
[
"MIT"
] | null | null | null |
scripts/get_countour.py
|
jorenver/proyecto_investigacion_postes
|
7c483a0bad7359a890da8def566b301cdef584e4
|
[
"MIT"
] | null | null | null |
scripts/get_countour.py
|
jorenver/proyecto_investigacion_postes
|
7c483a0bad7359a890da8def566b301cdef584e4
|
[
"MIT"
] | null | null | null |
from osgeo import gdal
from skimage.morphology import erosion
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
#import tifffile as tiff
import sys
from scipy import ndimage
import numpy as np
#filename = sys.argv[1]
#dst_filename=sys.argv[2]
filename="raster_intersects_recortado.tif"
dst_filename="raster_intersects_recortado_dist.tif"
raster =gdal.Open(filename)
#img = tiff.imread(filename)
band = raster.GetRasterBand(1)
img = band.ReadAsArray()
#tiff.imsave('test.tif',np.float32(img))
selem = disk(1)
eroded = erosion(img, selem)
distance_img = ndimage.distance_transform_edt(eroded)
#tiff.imsave('distance_img',np.float32(distance_img))
# georeference the image and set the projection
driver = gdal.GetDriverByName('GTiff')
y_pixels=len(distance_img)
x_pixels=len(distance_img[0])
dataset = driver.Create(dst_filename,x_pixels,y_pixels,1,gdal.GDT_Float32, )
transform= raster.GetGeoTransform()
proyection=raster.GetProjection()
dataset.SetProjection(proyection)
dataset.setTramsform(transform)
dataset.GetRasterBand(1).WriteArray(distance_img)
dataset.FlushCache() # Write to disk.
| 33
| 76
| 0.813853
|
17cf9a0237b60b0fc0e1e7ee995da2cdaf985bf1
| 5,956
|
py
|
Python
|
extensions/donjayamanne.python-0.6.9/pythonFiles/preview/jedi/settings.py
|
Sunshengjin/RoboWare-Studio
|
aa2c309bd5e79f92ba7bf456ff86ad0573975c49
|
[
"BSD-3-Clause"
] | 239
|
2018-04-20T06:58:32.000Z
|
2022-03-22T18:06:08.000Z
|
extensions/donjayamanne.python-0.6.9/pythonFiles/preview/jedi/settings.py
|
Sunshengjin/RoboWare-Studio
|
aa2c309bd5e79f92ba7bf456ff86ad0573975c49
|
[
"BSD-3-Clause"
] | 10
|
2018-12-09T13:49:06.000Z
|
2021-07-03T00:38:53.000Z
|
extensions/donjayamanne.python-0.6.9/pythonFiles/preview/jedi/settings.py
|
Sunshengjin/RoboWare-Studio
|
aa2c309bd5e79f92ba7bf456ff86ad0573975c49
|
[
"BSD-3-Clause"
] | 99
|
2018-07-20T09:16:13.000Z
|
2022-03-20T11:58:56.000Z
|
"""
This module contains variables with global |jedi| settings. To change the
behavior of |jedi|, change the variables defined in :mod:`jedi.settings`.
Plugins should expose an interface so that the user can adjust the
configuration.
Example usage::
from jedi import settings
settings.case_insensitive_completion = True
Completion output
~~~~~~~~~~~~~~~~~
.. autodata:: case_insensitive_completion
.. autodata:: add_dot_after_module
.. autodata:: add_bracket_after_function
.. autodata:: no_completion_duplicates
Filesystem cache
~~~~~~~~~~~~~~~~
.. autodata:: cache_directory
.. autodata:: use_filesystem_cache
Parser
~~~~~~
.. autodata:: fast_parser
Dynamic stuff
~~~~~~~~~~~~~
.. autodata:: dynamic_array_additions
.. autodata:: dynamic_params
.. autodata:: dynamic_params_for_other_modules
.. autodata:: additional_dynamic_modules
.. autodata:: auto_import_modules
.. _settings-recursion:
Recursions
~~~~~~~~~~
Recursion settings are important if you don't want extremly
recursive python code to go absolutely crazy. First of there is a
global limit :data:`max_executions`. This limit is important, to set
a maximum amount of time, the completion may use.
The default values are based on experiments while completing the |jedi| library
itself (inception!). But I don't think there's any other Python library that
uses recursion in a similarly extreme way. These settings make the completion
definitely worse in some cases. But a completion should also be fast.
.. autodata:: max_until_execution_unique
.. autodata:: max_function_recursion_level
.. autodata:: max_executions_without_builtins
.. autodata:: max_executions
.. autodata:: max_dynamic_params_depth
.. autodata:: scale_call_signatures
Caching
~~~~~~~
.. autodata:: star_import_cache_validity
.. autodata:: call_signatures_validity
"""
import os
import platform
# ----------------
# completion output settings
# ----------------
case_insensitive_completion = True
"""
The completion is by default case insensitive.
"""
add_bracket_after_function = False
"""
Adds an opening bracket after a function, because that's normal behaviour.
Removed it again, because in VIM that is not very practical.
"""
no_completion_duplicates = True
"""
If set, completions with the same name don't appear in the output anymore,
but are in the `same_name_completions` attribute.
"""
# ----------------
# Filesystem cache
# ----------------
use_filesystem_cache = True
"""
Use filesystem cache to save once parsed files with pickle.
"""
if platform.system().lower() == 'windows':
_cache_directory = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Jedi',
'Jedi')
elif platform.system().lower() == 'darwin':
_cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi')
else:
_cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache',
'jedi')
cache_directory = os.path.expanduser(_cache_directory)
"""
The path where all the caches can be found.
On Linux, this defaults to ``~/.cache/jedi/``, on OS X to
``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``.
On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
``$XDG_CACHE_HOME/jedi`` is used instead of the default one.
"""
# ----------------
# parser
# ----------------
fast_parser = True
"""
Use the fast parser. This means that reparsing is only being done if
something has been changed e.g. to a function. If this happens, only the
function is being reparsed.
"""
# ----------------
# dynamic stuff
# ----------------
dynamic_array_additions = True
"""
check for `append`, etc. on arrays: [], {}, () as well as list/set calls.
"""
dynamic_params = True
"""
A dynamic param completion, finds the callees of the function, which define
the params of a function.
"""
dynamic_params_for_other_modules = True
"""
Do the same for other modules.
"""
additional_dynamic_modules = []
"""
Additional modules in which |jedi| checks if statements are to be found. This
is practical for IDEs, that want to administrate their modules themselves.
"""
dynamic_flow_information = True
"""
Check for `isinstance` and other information to infer a type.
"""
auto_import_modules = [
'hashlib', # setattr
]
"""
Modules that are not analyzed but imported, although they contain Python code.
This improves autocompletion for libraries that use ``setattr`` or
``globals()`` modifications a lot.
"""
# ----------------
# recursions
# ----------------
max_until_execution_unique = 50
"""
This limit is probably the most important one, because if this limit is
exceeded, functions can only be one time executed. So new functions will be
executed, complex recursions with the same functions again and again, are
ignored.
"""
max_function_recursion_level = 5
"""
`max_function_recursion_level` is more about whether the recursions are
stopped in deepth or in width. The ratio beetween this and
`max_until_execution_unique` is important here. It stops a recursion (after
the number of function calls in the recursion), if it was already used
earlier.
"""
max_executions_without_builtins = 200
"""
.. todo:: Document this.
"""
max_executions = 250
"""
A maximum amount of time, the completion may use.
"""
scale_call_signatures = 0.1
"""
Because call_signatures is normally used on every single key hit, it has
to be faster than a normal completion. This is the factor that is used to
scale `max_executions` and `max_until_execution_unique`:
"""
# ----------------
# caching validity (time)
# ----------------
star_import_cache_validity = 60.0
"""
In huge packages like numpy, checking all star imports on every completion
might be slow, therefore we do a star import caching, that lasts a certain
time span (in seconds).
"""
call_signatures_validity = 3.0
"""
Finding function calls might be slow (0.1-0.5s). This is not acceptible for
normal writing. Therefore cache it for a short time.
"""
| 25.452991
| 79
| 0.711887
|
951b8bbc9e1b5a31b860216d4e7a0b1d13a0b732
| 4,865
|
py
|
Python
|
correlateSNPsMethylationPlusPlusRand.py
|
imk1/MethylationQTLCode
|
d193f518994074a29a39e081d470b1c67a95a527
|
[
"MIT"
] | null | null | null |
correlateSNPsMethylationPlusPlusRand.py
|
imk1/MethylationQTLCode
|
d193f518994074a29a39e081d470b1c67a95a527
|
[
"MIT"
] | null | null | null |
correlateSNPsMethylationPlusPlusRand.py
|
imk1/MethylationQTLCode
|
d193f518994074a29a39e081d470b1c67a95a527
|
[
"MIT"
] | null | null | null |
def makeIntDict(intDictFileName):
# Make a dictionary from pairs of ints in a file
intDictFile = open(intDictFileName)
intDict = {}
for line in intDictFile:
# Iterate through the lines of the int dictionary file and enter each into the dictionary
lineElements = line.strip().split("\t")
intDict[int(lineElements[0])] = int(lineElements[1])
intDictFile.close()
return intDict
def sufficientMinor(numFirst, numSecond, vecLen, minReadsCutoffSingleMinor, readsToMinMinorReads):
# Compute if there are enough reads with the minor allele or methylation state
if vecLen >= minReadsCutoffSingleMinor:
# Need only 1 minor allele or methylation state for a significant p-value to be possible
if (numFirst > 0) and (numSecond > 0):
# There are enough reads for a significant p-value to be possible
return True
else:
return False
else:
minMinorReads = readsToMinMinorReads[vecLen]
if (numFirst >= minMinorReads) and (numSecond >= minMinorReads):
# There are enough reads for a significant p-value to be possible
return True
return False
def outputCorrPlusPlusRand(SNP, methyl, SNPVec, methylVec, minReadsCutoff, minReadsCutoffSingleMinor, SNPMethylCorrsFile):
# Compute and output the correlation if the reads and MAF cutoffs are satisfied
vecLen = len(methylVec)
if vecLen >= minReadsCutoff:
# The minimum reads cutoff is satisfied
numRefAlleles = SNPVec.count(0)
numAltAlleles = SNPVec.count(1)
if sufficientMinor(numRefAlleles, numAltAlleles, vecLen, minReadsCutoffSingleMinor, readsToMinMinorReads) == True:
# Both alleles have sufficiently high numbers of reads
numMethyl = methylVec.count(1)
numUnmethyl = methylVec.count(0)
if sufficientMinor(numMethyl, numUnmethyl, vecLen, minReadsCutoffSingleMinor, readsToMinMinorReads) == True:
# C is methylated and unmethylated a sufficient fraction of the time
random.shuffle(methylVec) #Shuffles the methylation data
corr = scipy.stats.pearsonr(SNPVec, methylVec)[0] # REQUIRES SCIPY 12+ (scipy 8 maybe o.k.)
SNPMethylCorrsFile.write(SNP[0] + "\t" + str(SNP[1]) + "\t" + methyl[0] + "\t" + str(methyl[1]) + "\t" + str(corr) + "\n")
def correlateSNPsMethylationPlusPlusRand(SNPMethylFileName, SNPMethylCorrsFileName, readsToMinMinorReads):
# Compute the correlation between genotype and methylation
# ASSUMES THAT SNPMethylFile IS SORTED BY METHYLATION CHROM., METHYLATION POSITION, SNP CHROM., SNP POSITION
# SNPMethylCorrsFile will have the following information:
# 1. SNP chromosome
# 2. SNP position in chromosome
# 3. Methylation chromosome
# 4. Methylation position in chromosome
# 5. Correlation between genotype and methylation
# For each SNP, methylation location pair, there will be a line in SNPVecFile and a line in methylVecFile
# These lines will contain the SNP/methylation chromosome, position, and vector used in computing the correlation
# Reference allele will be recorded as 0; alternate allele will be recorded as 1
# Methylated will be recorded as 1; unmethylated will be recorded as 0
# Excludes SNP, methylation pairs that do not have at least the minimum number of reads with the minor allele and methylation status for their number of reads
SNPMethylFile = gzip.open(SNPMethylFileName, 'rb')
SNPMethylCorrsFile = open(SNPMethylCorrsFileName, 'wb')
minReadsCutoff = min(readsToMinMinorReads.keys())
minReadsCutoffSingleMinor = max(readsToMinMinorReads.keys()) + 1
lastSNP = ("", 0)
lastMethyl = ("", 0)
SNPVec = []
methylVec = []
random.seed()
for line in SNPMethylFile:
# Iterate through the lines of the SNP methylation file and compute the correlation for each SNP, C pair
lineElements = line.strip().split("\t")
currentSNP = (lineElements[1], int(lineElements[2]))
currentMethyl = (lineElements[4], int(lineElements[5]))
if (currentSNP != lastSNP) or (currentMethyl != lastMethyl):
# At a new SNP or methylation location, so find the correlation for the previous one
outputCorrPlusPlusRand(lastSNP, lastMethyl, SNPVec, methylVec, minReadsCutoff, minReadsCutoffSingleMinor, SNPMethylCorrsFile)
lastSNP = currentSNP
lastMethyl = currentMethyl
SNPVec = []
methylVec = []
SNPVec.append(int(lineElements[3]))
methylVec.append(int(lineElements[6]))
outputCorrPlusPlusRand(lastSNP, lastMethyl, SNPVec, methylVec, minReadsCutoff, minReadsCutoffSingleMinor, SNPMethylCorrsFile)
SNPMethylFile.close()
SNPMethylCorrsFile.close()
if __name__=="__main__":
import sys
import scipy
from scipy import stats
import gzip
import random
SNPMethylFileName = sys.argv[1] # Should end with .gz
SNPMethylCorrsFileName = sys.argv[2]
readsToMinMinorReadsFileName = sys.argv[3]
readsToMinMinorReads = makeIntDict(readsToMinMinorReadsFileName)
correlateSNPsMethylationPlusPlusRand(SNPMethylFileName, SNPMethylCorrsFileName, readsToMinMinorReads)
| 44.633028
| 159
| 0.767112
|
6311bc31c859f14edeea066eee01bbf396ebeff5
| 6,959
|
py
|
Python
|
datasets/dataset_prepare/models/counters/Res50_SCAR.py
|
Linfeng-Lee/IIM
|
c63bf8b023ccc6750e178112662972f721dcabe1
|
[
"MIT"
] | 81
|
2020-12-10T02:38:03.000Z
|
2022-03-23T04:27:39.000Z
|
datasets/dataset_prepare/models/counters/Res50_SCAR.py
|
Linfeng-Lee/IIM
|
c63bf8b023ccc6750e178112662972f721dcabe1
|
[
"MIT"
] | 29
|
2020-12-15T09:07:00.000Z
|
2022-03-22T10:00:28.000Z
|
datasets/dataset_prepare/models/counters/Res50_SCAR.py
|
Linfeng-Lee/IIM
|
c63bf8b023ccc6750e178112662972f721dcabe1
|
[
"MIT"
] | 24
|
2020-12-14T02:05:16.000Z
|
2022-03-10T01:26:54.000Z
|
import torch.nn as nn
import torch
from torchvision import models
import torch.nn.functional as F
from misc.utils import *
import pdb
# model_path = '../PyTorch_Pretrained/resnet101-5d3b4d8f.pth'
class Res50_SCAR(nn.Module):
def __init__(self, pretrained=True):
super(Res50_SCAR, self).__init__()
self.seen = 0
self.backend_feat = [512, 512, 512,256,128,64]
self.frontend = []
self.backend = make_layers(self.backend_feat,in_channels = 1024,dilation = True)
self.output_layer = SCAModule(64, 1)
# self.output_layer = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1),nn.ReLU())
initialize_weights(self.modules())
res = models.resnet50(pretrained=pretrained)
self.frontend = nn.Sequential(
res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2
)
self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 6, stride=1)
self.own_reslayer_3.load_state_dict(res.layer3.state_dict())
def forward(self,x):
x = self.frontend(x)
x = self.own_reslayer_3(x)
# pdb.set_trace()
x = self.backend(x)
x = self.output_layer(x)
x = F.interpolate(x,scale_factor=8, mode='nearest')
return x
def make_layers(cfg, in_channels = 3,batch_norm=False,dilation = False):
if dilation:
d_rate = 2
else:
d_rate = 1
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate,dilation = d_rate)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def make_res_layer(block, planes, blocks, stride=1):
downsample = None
inplanes=512
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL='relu', same_padding=False, bn=True, bias=True):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) // 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
self.bn = nn.BatchNorm2d(out_channels) if bn else None
if NL == 'relu' :
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class SCAModule(nn.Module):
def __init__(self, inn, out):
super(SCAModule, self).__init__()
base = inn // 4
self.conv_sa = nn.Sequential(Conv2d(inn, base, 3, same_padding=True, bias=False),
SAM(base),
Conv2d(base, base, 3, same_padding=True, bias=False)
)
self.conv_ca = nn.Sequential(Conv2d(inn, base, 3, same_padding=True, bias=False),
CAM(base),
Conv2d(base, base, 3, same_padding=True, bias=False)
)
self.conv_cat = Conv2d(base*2, out, 1, same_padding=True, bn=False)
def forward(self, x):
sa_feat = self.conv_sa(x)
ca_feat = self.conv_ca(x)
cat_feat = torch.cat((sa_feat,ca_feat),1)
cat_feat = self.conv_cat(cat_feat)
return cat_feat
class SAM(nn.Module):
def __init__(self, channel):
super(SAM, self).__init__()
self.para_lambda = nn.Parameter(torch.zeros(1))
self.query_conv = Conv2d(channel, channel//8, 1, NL='none')
self.key_conv = Conv2d(channel, channel//8, 1, NL='none')
self.value_conv = Conv2d(channel, channel, 1, NL='none')
def forward(self, x):
N, C, H, W = x.size()
proj_query = self.query_conv(x).view(N, -1, W*H).permute(0, 2, 1)
proj_key = self.key_conv(x).view(N, -1, W*H)
energy = torch.bmm(proj_query, proj_key)
attention = F.softmax(energy,dim=-1)
proj_value = self.value_conv(x).view(N, -1, W*H)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(N, C, H, W)
out = self.para_lambda*out + x
return out
class CAM(nn.Module):
def __init__(self, in_dim):
super(CAM, self).__init__()
self.para_mu = nn.Parameter(torch.zeros(1))
def forward(self,x):
N, C, H, W = x.size()
proj_query = x.view(N, C, -1)
proj_key = x.view(N, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = F.softmax(energy,dim=-1)
proj_value = x.view(N, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(N, C, H, W)
out = self.para_mu*out + x
return out
| 32.671362
| 124
| 0.573215
|
9ec955f7d416dd1da673d13740cd90fc9e317029
| 732
|
py
|
Python
|
JumpScale9Portal/tools/codegentools/CodeGeneratorEveModel.py
|
Jumpscale/portal9
|
fd4c9718daf51b877a6a98bd0d1ff4bc0b272f67
|
[
"Apache-2.0"
] | 1
|
2017-06-07T08:12:09.000Z
|
2017-06-07T08:12:09.000Z
|
JumpScale9Portal/tools/codegentools/CodeGeneratorEveModel.py
|
Jumpscale/portal9
|
fd4c9718daf51b877a6a98bd0d1ff4bc0b272f67
|
[
"Apache-2.0"
] | 36
|
2017-05-18T10:54:44.000Z
|
2019-03-27T11:24:20.000Z
|
JumpScale9Portal/tools/codegentools/CodeGeneratorEveModel.py
|
Jumpscale/portal9
|
fd4c9718daf51b877a6a98bd0d1ff4bc0b272f67
|
[
"Apache-2.0"
] | 1
|
2018-06-12T05:18:01.000Z
|
2018-06-12T05:18:01.000Z
|
from js9 import j
from .CodeGeneratorBase import CodeGeneratorBase
class CodeGeneratorEveModel(CodeGeneratorBase):
def __init__(self, spec, typecheck=True, dieInGenCode=True, codepath=''):
CodeGeneratorBase.__init__(self, spec, typecheck, dieInGenCode)
self.type = "EveModel"
def generate(self):
properties = ''
for prop in self.spec.properties:
properties += self._generateProperty(prop)
schema = '''
%s = {
'scehma': {
%s
}
}
''' % (self.spec.name, properties)
return schema
def _generateProperty(self, prop):
result = "'%s': {" % prop.name
result += "'type': %s" % prop.type
result += '},'
return result
| 25.241379
| 77
| 0.603825
|
a421b654484cd3126340fff30868e0e6db5440ab
| 22,713
|
py
|
Python
|
setup2.py
|
OnApp/cdn-bsddb3-python
|
fbb1a877b652091ae0d060d835ec5bcf08595606
|
[
"BSD-3-Clause"
] | null | null | null |
setup2.py
|
OnApp/cdn-bsddb3-python
|
fbb1a877b652091ae0d060d835ec5bcf08595606
|
[
"BSD-3-Clause"
] | null | null | null |
setup2.py
|
OnApp/cdn-bsddb3-python
|
fbb1a877b652091ae0d060d835ec5bcf08595606
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright (c) 2008-2020, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import os
import re
import sys
import glob
if (sys.version_info[0] < 3) and (sys.version_info >= (2, 6)) :
# Silence deprecation warnings during "setup"
import warnings
warnings.filterwarnings('ignore',
message='in 3.x, bsddb has been removed; ' \
'please use the pybsddb project instead',
category=DeprecationWarning)
warnings.filterwarnings('ignore',
message='in 3.x, the bsddb module has been removed; ' \
'please use the pybsddb project instead',
category=DeprecationWarning)
if sys.version_info[:2] == (2, 7) :
with warnings.catch_warnings() :
# Python 2.7.0
warnings.filterwarnings('ignore',
message='The CObject type is marked Pending Deprecation ' \
'in Python 2.7. Please use capsule objects instead.',
category=PendingDeprecationWarning)
# Python 2.7.1
warnings.filterwarnings('ignore',
message='CObject type is not supported in 3.x. ' \
'Please use capsule objects instead.',
category=DeprecationWarning)
try : # 'bsddb' library could be not available
import bsddb # Import the 2.7 version, that uses CObject
except ImportError :
pass
# setuptools warnings
warnings.filterwarnings('ignore',
message='tuple parameter unpacking has been removed in 3.x',
category=SyntaxWarning)
warnings.filterwarnings('ignore',
message='the dl module has been removed in Python 3.0; ' \
'use the ctypes module instead',
category=DeprecationWarning)
try :
from setuptools import setup, Extension
except :
from distutils.core import setup, Extension
from distutils.dep_util import newer
import distutils.ccompiler
# read the module version number out of the .c file
VERSION = None
_ver_re = re.compile('^#\s*define\s+PY_BSDDB_VERSION\s+"(\d+\.\d+\.\d+.*)"')
try:
_srcFile = open('Modules/bsddb.h', 'r')
except IOError:
print "Could not open module source to read the version number."
raise
for line in _srcFile.readlines():
m = _ver_re.match(line)
if m:
VERSION = m.group(1)
continue
# We close first, to avoid a "ResourceWarning"
_srcFile.close()
del _srcFile
del _ver_re
del m
if not VERSION:
raise RuntimeError, "could not find PY_BSDDB_VERSION in Modules/bsddb.h"
#----------------------------------------------------------------------
debug = '--debug' in sys.argv or '-g' in sys.argv
lflags_arg = []
if os.name == 'posix':
# Allow setting the DB dir and additional link flags either in
# the environment or on the command line.
# First check the environment...
BERKELEYDB_INCDIR = os.environ.get('BERKELEYDB_INCDIR', '')
BERKELEYDB_LIBDIR = os.environ.get('BERKELEYDB_LIBDIR', '')
BERKELEYDB_DIR = os.environ.get('BERKELEYDB_DIR', '')
LFLAGS = os.environ.get('LFLAGS', [])
LIBS = os.environ.get('LIBS', [])
# ...then the command line.
# Handle --berkeley-db=[PATH] and --lflags=[FLAGS]
args = sys.argv[:]
for arg in args:
if arg.startswith('--berkeley-db-incdir='):
BERKELEYDB_INCDIR = arg.split('=')[1]
sys.argv.remove(arg)
if arg.startswith('--berkeley-db-libdir='):
BERKELEYDB_LIBDIR = arg.split('=')[1]
sys.argv.remove(arg)
if arg.startswith('--berkeley-db='):
BERKELEYDB_DIR = arg.split('=')[1]
sys.argv.remove(arg)
elif arg.startswith('--lflags='):
LFLAGS = arg.split('=')[1].split()
sys.argv.remove(arg)
elif arg.startswith('--libs='):
LIBS = arg.split('=')[1].split()
sys.argv.remove(arg)
if LFLAGS or LIBS:
lflags_arg = LFLAGS + LIBS
# Supported Berkeley DB versions, in order of preference.
db_ver_list = ((6, 2), (6, 1),
(5, 3), (5, 1),
(4, 8), (4, 7))
db_ver = None
# If we were not told where it is, go looking for it.
dblib = 'db'
incdir = libdir = None
if not BERKELEYDB_DIR and not BERKELEYDB_LIBDIR and not BERKELEYDB_INCDIR:
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = []
db_major = set()
bitness = ""
import platform
if (platform.architecture()[0] == "64bit") and \
(platform.platform(True, True).startswith("Solaris-")) and \
(not platform.uname()[3].startswith('joyent_')):
bitness = "/64"
for major, minor in db_ver_list :
if major not in db_major :
db_major.add(major)
db_inc_paths.extend([
'/usr/include/db%d%s' %(major, bitness),
'/usr/local/include/db%d%s' %(major, bitness),
'/opt/local/include/db%d%s' %(major, bitness),
'/opt/sfw/include/db%d%s' %(major, bitness),
'/sw/include/db%d%s' %(major, bitness),
'/usr/local/opt/berkeley-db@%s/include' % (major),
])
db_inc_paths.append('/usr/include/db%d%d%s' % \
(major, minor, bitness))
db_inc_paths.append('/usr/local/BerkeleyDB.%d.%d%s/include' % \
(major, minor, bitness))
db_inc_paths.append('/usr/local/include/db%d%d%s' % \
(major, minor, bitness))
db_inc_paths.append('/pkg/db-%d.%d%s/include' % \
(major, minor, bitness))
db_inc_paths.append('/opt/db-%d.%d%s/include' % \
(major, minor, bitness))
db_ver_inc_map = {}
class db_found(Exception): pass
try:
# this CCompiler object is only used to locate include files
compiler = distutils.ccompiler.new_compiler()
lib_dirs = compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
'/usr/lib/x86_64-linux-gnu/',
]
inc_dirs = compiler.include_dirs + [
'/usr/include', '/usr/local/opt/berkeley-db/include',
]
# See whether there is a Oracle or Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if debug: print "db: looking for db.h in", f
if os.path.exists(f):
with open(f) as fichero :
f = fichero.read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
if ( (not db_ver_inc_map.has_key(db_ver)) and
(db_ver in db_ver_list) ):
# save the include directory with the db.h version
# (first occurrance only)
db_ver_inc_map[db_ver] = d
if debug: print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if debug: print "db.h: unsupported version", db_ver, "in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
os.path.join(db_incdir, '..', 'lib64'),
os.path.join(db_incdir, '..', 'lib'),
os.path.join(db_incdir, '..', '..', 'lib64'),
os.path.join(db_incdir, '..', '..', 'lib'),
]
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
# Look for a version specific db-X.Y before an ambiguoius dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (
('db%d-%d.%d' % (db_ver[0], db_ver[0], db_ver[1])),
('db-%d.%d' % db_ver),
('db%d' % db_ver[0])
):
dblib_file = compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
db_libdir = os.path.abspath(os.path.dirname(dblib_file))
raise db_found
else:
if debug: print "db lib: ", dblib, "not found"
except db_found:
print "Found Berkeley DB %d.%d installation." % db_ver
print " include files in", db_incdir
print " library files in", db_libdir
print " library name is lib"+dblib
if debug: print "db: lib dir", db_libdir, "inc dir", db_incdir
incdir = db_incdir
libdir = db_libdir
else:
# this means Berkeley DB could not be found
pass
if BERKELEYDB_LIBDIR or BERKELEYDB_INCDIR:
libdir = BERKELEYDB_LIBDIR or None
incdir = BERKELEYDB_INCDIR or None
if not BERKELEYDB_DIR and not incdir and not libdir:
print "Can't find a local Berkeley DB installation."
print "(suggestion: try the --berkeley-db=/path/to/bsddb option)"
sys.exit(1)
# figure out from the base setting where the lib and .h are
if not incdir:
incdir = os.path.join(BERKELEYDB_DIR, 'include')
if not libdir:
libdir = os.path.join(BERKELEYDB_DIR, 'lib')
if not '-ldb' in LIBS:
libname = [dblib]
else:
if debug: print "LIBS already contains '-ldb' not adding our own", "'-l"+dblib+"'"
libname = []
utils = []
# Test if the old bsddb is built-in
static = 0
try:
# Possibly already imported in the "warning" section (python 2.7)
import bsddb
if str(bsddb).find('built-in') >= 0:
static = 1
except ImportError:
pass
# On Un*x, double check that no other built-in module pulls libdb in as a
# side-effect. TBD: how/what to do on other platforms?
fp = os.popen('ldd %s 2>&1' % sys.executable)
results = fp.read()
status = fp.close()
if not status and results.find('libdb.') >= 0:
static = 1
if static:
print """\
\aWARNING:
\tIt appears that the old bsddb module is staticly linked in the
\tPython executable. This will cause various random problems for
\tbsddb3, up to and including segfaults. Please rebuild your
\tPython either with bsddb disabled, or with it built as a shared
\tdynamic extension. Watch out for other modules (e.g. dbm) that create
\tdependencies in the python executable to libdb as a side effect."""
st = raw_input("Build anyway? (yes/[no]) ")
if st != "yes":
sys.exit(1)
# read db.h to figure out what version of Berkeley DB this is
ver = None
with open(os.path.join(incdir, 'db.h'), 'r') as f :
db_h_lines = f.readlines()
db_ver_re = re.compile(
r'^#define\s+DB_VERSION_STRING\s.*Berkeley DB (\d+\.\d+).*')
db_ver2 = db_ver
if db_ver is None :
print "Trying to use the Berkeley DB you specified..."
for line in db_h_lines:
match = db_ver_re.match(line)
if not match:
continue
fullverstr = match.group(1)
ver = fullverstr[0] + fullverstr[2] # 31 == 3.1, 32 == 3.2, etc.
db_ver = (int(fullverstr[0]), int(fullverstr[2]))
if (db_ver2 is not None) and (db_ver != db_ver2) :
raise AssertionError("Detected Berkeley DB version is inconsistent")
if db_ver not in db_ver_list:
raise AssertionError("pybsddb untested with this Berkeley DB "
"version %d.%d" %db_ver)
print 'Detected Berkeley DB version %d.%d from db.h' %db_ver
elif os.name == 'nt':
# The default build of Berkeley DB for windows just leaves
# everything in the build dirs in the db source tree. That means
# that we either have to hunt around to find it, (which would be
# even more difficult than the mess above for Unix...) or we make
# the builder specify everything here. Compounding the problem is
# version numbers in default path names, and different library
# names for debug/release or dll/static.
#
# So to make things easier, I'm just going to expect that the DB stuff
# has been moved to the ./db directory. There's an updatedb.bat file to
# help.
#
# You'll need to edit the project file that comes with Berkeley DB so it
# uses "Multithreaded DLL" and "Debug Multithreaded DLL" (/MD and /MDd)
# settings as appropriate to build .lib file (the db_static project).
incdir = 'db/include'
libdir = 'db/lib'
# read db.h to figure out what version of Berkeley DB this is
ver = None
with open(os.path.join(incdir, 'db.h'), 'r') as f :
db_h_lines = f.readlines()
db_ver_re = re.compile(
r'^#define\s+DB_VERSION_STRING\s.*Berkeley DB (\d+\.\d+).*')
for line in db_h_lines:
match = db_ver_re.match(line)
if not match:
continue
fullverstr = match.group(1)
ver = fullverstr[0] + fullverstr[2] # 31 == 3.1, 32 == 3.2, etc.
db_ver = (int(fullverstr[0]), int(fullverstr[2]))
if db_ver not in db_ver_list:
raise AssertionError("pybsddb untested with this Berkeley DB "
"version %d.%d" %db_ver)
print 'Detected Berkeley DB version %d.%d from db.h' %db_ver
if debug:
libname = ['libdb%ssd' % ver] # Debug, static
else:
libname = ['libdb%ss' % ver] # Release, static
utils = [("bsddb3/utils",
["db/bin/db_archive.exe",
"db/bin/db_checkpoint.exe",
"db/bin/db_deadlock.exe",
"db/bin/db_dump.exe",
"db/bin/db_load.exe",
"db/bin/db_printlog.exe",
"db/bin/db_recover.exe",
"db/bin/db_stat.exe",
"db/bin/db_upgrade.exe",
"db/bin/db_verify.exe",
"db/bin/libdb%s.dll" % ver,
]),
("bsddb3/test", glob.glob("test/*.py"))
]
if (db_ver in ((6, 0), (6, 1), (6, 2))) and \
("YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION" not in os.environ) :
print (
"\n"
"******* COMPILATION ABORTED *******\n"
"\n"
"You are linking a Berkeley DB version licensed under "
"AGPL3 or have a commercial license.\n"
"\n"
"AGPL3 is a strong copyleft license and derivative "
"works must be equivalently licensed.\n"
"\n"
"You have two choices:\n"
"\n"
" 1. If your code is AGPL3 or you have a commercial Berkeley DB "
"license from Oracle, please, define the environment variable "
"'YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION' to "
"any value, and try to install this python library again.\n"
"\n"
" 2. In any other case, you have to link to a previous version "
"of Berkeley DB. Remove Berlekey DB version 6.x and let this "
"python library try to locate an older version of the "
"Berkeley DB library in your system. Alternatively, you can "
"define the environment variable 'BERKELEYDB_DIR', or "
"'BERKELEYDB_INCDIR' and 'BERKELEYDB_LIBDIR', with the path of "
"the Berkeley DB you want to use and try to install this "
"python library again.\n"
"\n"
"Sorry for the inconvenience. I am trying to protect you.\n"
"\n"
"More details:\n"
"\n"
" https://forums.oracle.com/message/11184885\n"
" http://lists.debian.org/debian-legal/2013/07/\n"
"\n"
"******* COMPILATION ABORTED *******\n"
)
sys.exit(1)
version_suffix = ""
if sys.version_info[0] > 2 :
version_suffix = "3"
# #############################
# Delete pkgsrc stale info
# This is something that eventually should be not necesary.
# XXX - jcea@jcea.es - 20170125
from distutils import sysconfig
# The same dictionary is always returned, so we abuse it
# and modify it in place.
config_vars = sysconfig.get_config_vars()
for k, v in list(config_vars.items()):
if isinstance(v, str) and ('/db4' in v):
j = ' '.join([i for i in v.split() if not i.endswith('/db4')])
config_vars[k] = j
del config_vars
# #############################
# do the actual build, install, whatever...
setup(name = 'bsddb3',
version = VERSION,
description = 'Python bindings for Oracle Berkeley DB',
long_description = """\
This module provides a nearly complete wrapping of the Oracle/Sleepycat C API
for the Database Environment, Database, Cursor, Log Cursor, Sequence and
Transaction objects, and each of these is exposed as a Python type in the
bsddb3.db module. The database objects can use various access methods: btree,
hash, recno, and queue. Complete support of Berkeley DB distributed
transactions. Complete support for Berkeley DB Replication Manager. Complete
support for Berkeley DB Base Replication. Support for RPC.
Please see the documents in the docs directory of the source distribution or at
the website for more details on the types and methods provided. The goal is to
mirror most of the real Berkeley DB API so fall back to the Oracle Berkeley DB
documentation as appropriate.
If you need to support ancient versiones of Python and/or Berkeley DB , you can
use old releases of this bindings.
`Homepage <https://www.jcea.es/programacion/
pybsddb.htm>`__ --
`Releases (changelog) <https://www.jcea.es/programacion/
pybsddb.htm#Releases>`__ --
`Documentation <https://www.jcea.es/programacion/
pybsddb_doc/>`__ --
`Mailing List <https://mailman.jcea.es/listinfo/pybsddb>`__ --
`Donation <https://www.jcea.es/programacion/pybsddb_doc/donate.html>`__
""",
long_description_content_type = 'text/x-rst',
author = 'Jesus Cea, Robin Dunn, Gregory P. Smith, Andrew Kuchling, Barry Warsaw',
author_email = 'pybsddb@jcea.es',
url = 'https://www.jcea.es/programacion/pybsddb.htm',
license = "3-clause BSD License",
packages = ['bsddb3', 'bsddb3/tests'],
package_dir = {'bsddb3': 'Lib%s/bsddb' %version_suffix,
'bsddb3/tests': 'Lib%s/bsddb/test' %version_suffix},
ext_modules = [Extension('bsddb3._pybsddb',
sources = ['Modules/_bsddb.c'],
depends = ['Modules/bsddb.h'],
include_dirs = [ incdir ],
library_dirs = [ libdir ],
runtime_library_dirs = [ libdir ],
libraries = libname,
extra_link_args = lflags_arg,
)],
headers = ['Modules/bsddb.h'],
data_files = utils,
classifiers = [
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Natural Language :: English',
'Natural Language :: Spanish',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Software Development',
'Topic :: System :: Clustering',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 40.414591
| 90
| 0.578215
|
40718ee2ce5d24f893855b171b5090230a93f3d7
| 606
|
py
|
Python
|
samples/example_03/scenario_test.py
|
cair/deep-rts
|
7aa5dde0c5df10ae3a3d057e7b89641aec58e115
|
[
"MIT"
] | 144
|
2018-07-13T07:47:50.000Z
|
2022-03-31T06:29:50.000Z
|
samples/example_03/scenario_test.py
|
cair/DeepRTS
|
2ea4de0993ea0ca2677fdb36a172779db4ce7868
|
[
"MIT"
] | 18
|
2019-03-29T10:37:01.000Z
|
2022-03-02T12:47:34.000Z
|
samples/example_03/scenario_test.py
|
cair/DeepRTS
|
2ea4de0993ea0ca2677fdb36a172779db4ce7868
|
[
"MIT"
] | 23
|
2018-11-02T18:12:51.000Z
|
2022-02-15T20:32:18.000Z
|
if __name__ == "__main__":
x = Scenario(
python.Game("15x15-2v2.json", n_players=2),
Scenario.GOLD_COLLECT(1),
Scenario.STONE_COLLECT(1),
Scenario.LUMBER_COLLECT(1),
Scenario.FOOD_CONSUMPTION(1),
Scenario.FOOD_COUNT(1),
Scenario.DAMAGE_DONE(1),
Scenario.DAMAGE_TAKEN(1),
Scenario.UNITS_CREATED(1),
Scenario.NUM_FOOTMAN(1),
Scenario.NUM_PEASANT(1),
Scenario.NUM_ARCHER(1),
Scenario.NUM_FARM(1),
Scenario.NUM_BARRACKS(1),
Scenario.NUM_TOWN_HALL(1)
)
print(x.evaluate())
| 23.307692
| 51
| 0.60231
|
cbf03c8f48fc87a8d28e12a9b82eb5c5a79b3687
| 10,987
|
py
|
Python
|
atom/nucleus/python/nucleus_api/models/node_relationship.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 11
|
2019-04-16T02:11:17.000Z
|
2021-12-16T22:51:40.000Z
|
atom/nucleus/python/nucleus_api/models/node_relationship.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 81
|
2019-11-19T23:24:28.000Z
|
2022-03-28T11:35:47.000Z
|
atom/nucleus/python/nucleus_api/models/node_relationship.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 11
|
2020-07-08T02:29:56.000Z
|
2022-03-28T10:05:33.000Z
|
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.4
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class NodeRelationship(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'answer_id': 'str',
'create_date': 'datetime',
'decision_tree_id': 'str',
'id': 'str',
'is_leaf': 'bool',
'metadata': 'dict(str, str)',
'node_child_id': 'str',
'node_parent_id': 'str',
'secondary_id': 'str',
'update_date': 'datetime',
'value': 'str'
}
attribute_map = {
'answer_id': 'answer_id',
'create_date': 'create_date',
'decision_tree_id': 'decision_tree_id',
'id': 'id',
'is_leaf': 'is_leaf',
'metadata': 'metadata',
'node_child_id': 'node_child_id',
'node_parent_id': 'node_parent_id',
'secondary_id': 'secondary_id',
'update_date': 'update_date',
'value': 'value'
}
def __init__(self, answer_id=None, create_date=None, decision_tree_id=None, id=None, is_leaf=None, metadata=None, node_child_id=None, node_parent_id=None, secondary_id=None, update_date=None, value=None, _configuration=None): # noqa: E501
"""NodeRelationship - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._answer_id = None
self._create_date = None
self._decision_tree_id = None
self._id = None
self._is_leaf = None
self._metadata = None
self._node_child_id = None
self._node_parent_id = None
self._secondary_id = None
self._update_date = None
self._value = None
self.discriminator = None
self.answer_id = answer_id
if create_date is not None:
self.create_date = create_date
self.decision_tree_id = decision_tree_id
if id is not None:
self.id = id
if is_leaf is not None:
self.is_leaf = is_leaf
if metadata is not None:
self.metadata = metadata
if node_child_id is not None:
self.node_child_id = node_child_id
self.node_parent_id = node_parent_id
if secondary_id is not None:
self.secondary_id = secondary_id
if update_date is not None:
self.update_date = update_date
self.value = value
@property
def answer_id(self):
"""Gets the answer_id of this NodeRelationship. # noqa: E501
answer_id # noqa: E501
:return: The answer_id of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._answer_id
@answer_id.setter
def answer_id(self, answer_id):
"""Sets the answer_id of this NodeRelationship.
answer_id # noqa: E501
:param answer_id: The answer_id of this NodeRelationship. # noqa: E501
:type: str
"""
self._answer_id = answer_id
@property
def create_date(self):
"""Gets the create_date of this NodeRelationship. # noqa: E501
:return: The create_date of this NodeRelationship. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this NodeRelationship.
:param create_date: The create_date of this NodeRelationship. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def decision_tree_id(self):
"""Gets the decision_tree_id of this NodeRelationship. # noqa: E501
decision_tree_id # noqa: E501
:return: The decision_tree_id of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._decision_tree_id
@decision_tree_id.setter
def decision_tree_id(self, decision_tree_id):
"""Sets the decision_tree_id of this NodeRelationship.
decision_tree_id # noqa: E501
:param decision_tree_id: The decision_tree_id of this NodeRelationship. # noqa: E501
:type: str
"""
self._decision_tree_id = decision_tree_id
@property
def id(self):
"""Gets the id of this NodeRelationship. # noqa: E501
:return: The id of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NodeRelationship.
:param id: The id of this NodeRelationship. # noqa: E501
:type: str
"""
self._id = id
@property
def is_leaf(self):
"""Gets the is_leaf of this NodeRelationship. # noqa: E501
is_leaf # noqa: E501
:return: The is_leaf of this NodeRelationship. # noqa: E501
:rtype: bool
"""
return self._is_leaf
@is_leaf.setter
def is_leaf(self, is_leaf):
"""Sets the is_leaf of this NodeRelationship.
is_leaf # noqa: E501
:param is_leaf: The is_leaf of this NodeRelationship. # noqa: E501
:type: bool
"""
self._is_leaf = is_leaf
@property
def metadata(self):
"""Gets the metadata of this NodeRelationship. # noqa: E501
:return: The metadata of this NodeRelationship. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this NodeRelationship.
:param metadata: The metadata of this NodeRelationship. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def node_child_id(self):
"""Gets the node_child_id of this NodeRelationship. # noqa: E501
nodeChildId # noqa: E501
:return: The node_child_id of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._node_child_id
@node_child_id.setter
def node_child_id(self, node_child_id):
"""Sets the node_child_id of this NodeRelationship.
nodeChildId # noqa: E501
:param node_child_id: The node_child_id of this NodeRelationship. # noqa: E501
:type: str
"""
self._node_child_id = node_child_id
@property
def node_parent_id(self):
"""Gets the node_parent_id of this NodeRelationship. # noqa: E501
nodeParentId # noqa: E501
:return: The node_parent_id of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._node_parent_id
@node_parent_id.setter
def node_parent_id(self, node_parent_id):
"""Sets the node_parent_id of this NodeRelationship.
nodeParentId # noqa: E501
:param node_parent_id: The node_parent_id of this NodeRelationship. # noqa: E501
:type: str
"""
self._node_parent_id = node_parent_id
@property
def secondary_id(self):
"""Gets the secondary_id of this NodeRelationship. # noqa: E501
:return: The secondary_id of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this NodeRelationship.
:param secondary_id: The secondary_id of this NodeRelationship. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def update_date(self):
"""Gets the update_date of this NodeRelationship. # noqa: E501
:return: The update_date of this NodeRelationship. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this NodeRelationship.
:param update_date: The update_date of this NodeRelationship. # noqa: E501
:type: datetime
"""
self._update_date = update_date
@property
def value(self):
"""Gets the value of this NodeRelationship. # noqa: E501
value # noqa: E501
:return: The value of this NodeRelationship. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this NodeRelationship.
value # noqa: E501
:param value: The value of this NodeRelationship. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and value is None:
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NodeRelationship, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeRelationship):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NodeRelationship):
return True
return self.to_dict() != other.to_dict()
| 28.099744
| 243
| 0.596887
|
e1aab18d9cbeb6743c1b5f484171620f065fc3cc
| 4,349
|
py
|
Python
|
bootcamp/news/tests/test_views.py
|
nandkumar1996/sharebox-portal
|
1b4fb60c776d42271a03997ab47f4da67463ad91
|
[
"MIT"
] | 7
|
2019-11-25T10:43:31.000Z
|
2021-08-09T14:02:58.000Z
|
bootcamp/news/tests/test_views.py
|
nandkumar1996/sharebox-portal
|
1b4fb60c776d42271a03997ab47f4da67463ad91
|
[
"MIT"
] | 45
|
2020-03-29T20:01:38.000Z
|
2021-08-09T16:39:33.000Z
|
bootcamp/news/tests/test_views.py
|
nandkumar1996/sharebox-portal
|
1b4fb60c776d42271a03997ab47f4da67463ad91
|
[
"MIT"
] | 5
|
2020-03-30T15:33:42.000Z
|
2021-09-17T17:04:40.000Z
|
from django.test import Client
from django.urls import reverse
from test_plus.test import TestCase
from bootcamp.news.models import News
class NewsViewsTest(TestCase):
def setUp(self):
self.user = self.make_user("first_user")
self.other_user = self.make_user("second_user")
self.client = Client()
self.other_client = Client()
self.client.login(username="first_user", password="password")
self.other_client.login(username="second_user", password="password")
self.first_news = News.objects.create(
user=self.user,
content="This is a short content."
)
self.second_news = News.objects.create(
user=self.user,
content="This the second content."
)
self.third_news = News.objects.create(
user=self.other_user,
content="This is an answer to the first news.",
reply=True,
parent=self.first_news
)
def test_news_list(self):
response = self.client.get(reverse("news:list"))
assert response.status_code == 200
assert self.first_news in response.context["news_list"]
assert self.second_news in response.context["news_list"]
assert self.third_news not in response.context["news_list"]
def test_delete_news(self):
initial_count = News.objects.count()
response = self.client.post(
reverse("news:delete_news", kwargs={"pk": self.second_news.pk}))
assert response.status_code == 302
assert News.objects.count() == initial_count - 1
def test_post_news(self):
initial_count = News.objects.count()
response = self.client.post(
reverse("news:post_news"), {"post": "This a third element."},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert News.objects.count() == initial_count + 1
def test_like_news(self):
response = self.client.post(
reverse("news:like_post"),
{"news": self.first_news.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert self.first_news.count_likers() == 1
assert self.user in self.first_news.get_likers()
assert response.json()["likes"] == 1
def test_thread(self):
response = self.client.get(
reverse("news:get_thread"),
{"news": self.first_news.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert response.json()["uuid"] == str(self.first_news.pk)
assert "This is a short content." in response.json()["news"]
assert "This is an answer to the first news." in response.json()["thread"]
def test_posting_comments(self):
response = self.client.post(
reverse("news:post_comments"),
{
"reply": "This a third element.",
"parent": self.second_news.pk
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert response.json()["comments"] == 1
def test_updating_interactions(self):
first_response = self.client.post(
reverse("news:like_post"),
{"news": self.first_news.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
second_response = self.other_client.post(
reverse("news:like_post"),
{"news": self.first_news.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
third_response = self.client.post(
reverse("news:post_comments"),
{
"reply": "This a third element.",
"parent": self.first_news.pk
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
fourth_response = self.client.post(
reverse("news:update_interactions"),
{"id_value": self.first_news.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert first_response.status_code == 200
assert second_response.status_code == 200
assert third_response.status_code == 200
assert fourth_response.status_code == 200
assert fourth_response.json()["likes"] == 2
assert fourth_response.json()["comments"] == 2
| 39.18018
| 82
| 0.614854
|
b8425713f8ad8d17daea34638d12335bb8f2be05
| 3,361
|
py
|
Python
|
g_python/hunitytools.py
|
luiz1n/G-Python
|
b5ae7d22cae1730193f7ec76dfd85dd1bf172f9e
|
[
"MIT"
] | null | null | null |
g_python/hunitytools.py
|
luiz1n/G-Python
|
b5ae7d22cae1730193f7ec76dfd85dd1bf172f9e
|
[
"MIT"
] | null | null | null |
g_python/hunitytools.py
|
luiz1n/G-Python
|
b5ae7d22cae1730193f7ec76dfd85dd1bf172f9e
|
[
"MIT"
] | null | null | null |
import threading
import time
from .gextension import Extension
from .hmessage import HMessage, Direction
from .hpacket import HPacket
from .hunityparsers import HUnityEntity, HFUnityFloorItem
class UnityRoomUsers:
def __init__(self, ext: Extension, users_in_room=28, get_guest_room=385, user_logged_out=29):
self.room_users = {}
self.__callback_new_users = None
self.__ext = ext
self.__lock = threading.Lock()
ext.intercept(Direction.TO_CLIENT, self.__load_room_users, users_in_room)
ext.intercept(Direction.TO_SERVER, self.__clear_room_users, get_guest_room)
ext.intercept(Direction.TO_CLIENT, self.__remove_user, user_logged_out)
def __remove_user(self, message: HMessage):
self.__start_remove_user_processing_thread(message.packet.read_int())
def __start_remove_user_processing_thread(self, index: int):
thread = threading.Thread(target=self.__process_remove_user, args=(index,))
thread.start()
def __process_remove_user(self, index: int):
self.__lock.acquire()
try:
if index in self.room_users:
del self.room_users[index]
finally:
self.__lock.release()
def __load_room_users(self, message: HMessage):
users = HUnityEntity.parse(message.packet)
self.__start_user_processing_thread(users)
if self.__callback_new_users is not None:
self.__callback_new_users(users)
def __process_users_in_room(self, entities):
self.__lock.acquire()
try:
for user in entities:
self.room_users[user.index] = user
finally:
self.__lock.release()
def __start_user_processing_thread(self, entities):
thread = threading.Thread(target=self.__process_users_in_room, args=(entities,))
thread.start()
def __clear_room_users(self, _):
self.__lock.acquire()
self.room_users.clear()
self.__lock.release()
def on_new_users(self, func):
self.__callback_new_users = func
class UnityRoomFurni:
def __init__(self, ext: Extension, floor_items=32, wall_items='RoomWallItems',
request='RequestRoomHeightmap'):
self.floor_furni = []
self.wall_furni = []
self.__callback_floor_furni = None
self.__callback_wall_furni = None
self.__ext = ext
self.__request_id = request
ext.intercept(Direction.TO_CLIENT, self.__floor_furni_load, floor_items)
ext.intercept(Direction.TO_CLIENT, self.__wall_furni_load, wall_items)
def __floor_furni_load(self, message):
self.floor_furni = HFUnityFloorItem.parse(message.packet)
if self.__callback_floor_furni is not None:
self.__callback_floor_furni(self.floor_furni)
def __wall_furni_load(self, message):
self.wall_furni = HWallItem.parse(message.packet)
if self.__callback_wall_furni is not None:
self.__callback_wall_furni(self.wall_furni)
def on_floor_furni_load(self, callback):
self.__callback_floor_furni = callback
def on_wall_furni_load(self, callback):
self.__callback_wall_furni = callback
def request(self):
self.floor_furni = []
self.wall_furni = []
self.__ext.send_to_server(HPacket(self.__request_id))
| 33.949495
| 97
| 0.688486
|
68b2275acf5a0f10a8e6207e731df3c332d5db8c
| 35,115
|
py
|
Python
|
ManagementService/python/deployWorkflow.py
|
chengyih001/knix_impl
|
9921f512a9a4fdad49d3f3ae77f4d26cce7ef619
|
[
"Apache-2.0"
] | null | null | null |
ManagementService/python/deployWorkflow.py
|
chengyih001/knix_impl
|
9921f512a9a4fdad49d3f3ae77f4d26cce7ef619
|
[
"Apache-2.0"
] | null | null | null |
ManagementService/python/deployWorkflow.py
|
chengyih001/knix_impl
|
9921f512a9a4fdad49d3f3ae77f4d26cce7ef619
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import base64
import hashlib
import os
import time
import traceback
import docker
from docker.types import LogConfig
import requests
import time
import random
WF_TYPE_SAND = 0
WF_TYPE_ASL = 1
def is_asl_workflow(wfobj):
return 'StartAt' in wfobj and 'States' in wfobj and isinstance(wfobj['States'], dict)
def check_workflow_functions(wf_type, wfobj, email, sapi):
# this function does sanity checking:
# check whether all resources/functions referred in the workflow have been uploaded
success = True
errmsg = ""
wfexit = "end"
resource_names_to_be_checked = {}
if wf_type == WF_TYPE_ASL:
wf_state_map = wfobj['States']
for state_names in list(wf_state_map.keys()):
if wf_state_map[state_names]["Type"] == "Parallel":
parallelStateName = state_names
for branches in wf_state_map[parallelStateName]['Branches']:
for state in list(branches['States'].keys()): # state is the key
wf_state_map[state] = branches['States'][state] # add the state to the state map root
if wf_state_map[state_names]["Type"] == "Map":
mapStateName = state_names
iterator = wf_state_map[mapStateName]['Iterator'] # this is a dict
states_dict = iterator['States'] # this is a also dict
print (json.dumps(states_dict))
for state in states_dict.keys():
print ("FOUND MAP STATE: "+str(state))
wf_state_map[state] = states_dict[state]
"""
for iterators in wf_state_map[mapStateName]['Iterator']:
for state in list(iterators['States'].keys()): # state is the key
wf_state_map[state] = iterators['States'][state] # add the state to the state map root
"""
for wfsname in wf_state_map:
wfs = wf_state_map[wfsname]
if wfs["Type"] == "Task":
resource_names_to_be_checked[wfs["Resource"]] = True
else:
if "exit" in wfobj:
wfexit = wfobj["exit"]
elif "end" in wfobj:
wfexit = wfobj["end"]
# ensure that the state names or function names are unique (i.e., no duplication)
function_map = {}
wffunctions = wfobj["functions"]
for wff in wffunctions:
state_name = wff["name"]
if wfexit == state_name:
errmsg = "End of the workflow MUST NOT be a function: " + state_name
success = False
break
if state_name in function_map.keys():
errmsg = "The function names should be unique: " + state_name
success = False
break
else:
# keep track, so that we can check whether we've seen it before
function_map[state_name] = True
resource_name = state_name
if "resource" in wff:
resource_name = wff["resource"]
resource_names_to_be_checked[resource_name] = True
if success:
if not resource_names_to_be_checked.keys():
uploaded_resources = {}
else:
uploaded_resources = sapi.get(email + "_list_grains", True)
if uploaded_resources is not None and uploaded_resources != "":
uploaded_resources = json.loads(uploaded_resources)
else:
success = False
errmsg = "Could not retrieve uploaded functions list."
if success:
for resource_name in list(resource_names_to_be_checked.keys()):
if resource_name not in uploaded_resources:
success = False
errmsg += "\nResource has not been uploaded yet: " + resource_name
return success, errmsg, resource_names_to_be_checked, uploaded_resources
def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, dlc):
# this function compiles the resource info map used by the deployment
# initialization
resource_info_map = {}
for resource_name in list(resource_names.keys()):
resource_info = {}
if resource_name in uploaded_resources:
resource_info["id"] = uploaded_resources[resource_name]
resource_id = resource_info["id"]
resource_metadata = sapi.get(email + "_grain_" + resource_id, True)
if resource_metadata is not None and resource_metadata != "":
resource_metadata = json.loads(resource_metadata)
if "runtime" in resource_metadata:
resource_info["runtime"] = resource_metadata["runtime"]
num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id)
try:
num_chunks = int(num_chunks_str)
is_zip = True
except Exception as exc:
is_zip = False
resource_info["type"] = "code"
resource_info["ref"] = "grain_source_" + resource_id
if is_zip:
resource_info["type"] = "zip"
resource_info["ref"] = "grain_source_zip_num_chunks_" + resource_id
resource_info_map[resource_name] = resource_info
return resource_info_map
def start_docker_sandbox(host_to_deploy, uid, sid, wid, wname, sandbox_image_name):
""" Launch the docker run command remotely
Parameters:
host_to_deploy set(hostname, ip): IP is used to connect docker, the pair is given as extra host (/etc/host) to the launched container
uid - user id, typically cleansed email address, e.g. jdoe_at_example_com
sid - sandbox id
wid - workflow id
"""
ulimit_nofile = docker.types.Ulimit(name='nofile', soft=262144, hard=262144)
ulimit_list = [ulimit_nofile]
# set up the env variables
env_vars = {}
env_vars["MFN_HOSTNAME"] = host_to_deploy[0]
env_vars["MFN_ELASTICSEARCH"] = os.getenv("MFN_ELASTICSEARCH")
env_vars["MFN_QUEUE"] = "127.0.0.1:"+os.getenv("MFN_QUEUE").split(':')[1]
env_vars["MFN_DATALAYER"] = host_to_deploy[0]+":"+os.getenv("MFN_DATALAYER").split(':')[1]
env_vars["USERID"] = uid
env_vars["SANDBOXID"] = sid
env_vars["WORKFLOWID"] = wid
env_vars["WORKFLOWNAME"] = wname
endpoint_key = hashlib.sha256(str(time.time()).encode()).hexdigest()
env_vars["MFN_ENDPOINT_KEY"] = endpoint_key
env_vars["HTTP_PROXY"] = os.getenv("HTTP_PROXY")
env_vars["HTTPS_PROXY"] = os.getenv("HTTPS_PROXY")
env_vars["http_proxy"] = os.getenv("http_proxy")
env_vars["https_proxy"] = os.getenv("https_proxy")
env_vars["no_proxy"] = os.getenv("no_proxy")
lc = LogConfig(type=LogConfig.types.JSON, config={"max-size": "50m", "max-file": "5"})
success = False
try:
client = docker.DockerClient(base_url="tcp://" + host_to_deploy[1] + ":2375") # use IP address
success = True
except Exception as exc:
print("Error launching sandbox; can't connect to: " + host_to_deploy[1] + ":2375")
print(traceback.format_exc())
success = False
if success:
try:
sandbox = client.containers.get(sid)
sandbox.stop()
sandbox.remove(force=True)
except Exception as exc:
pass
if success:
try:
print("Starting sandbox docker container for: " + uid + " " + sid + " " + wid + " " + sandbox_image_name)
print("Docker daemon: " + "tcp://" + host_to_deploy[1] + ":2375" + ", environment variables: " + str(env_vars))
client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc)
# TEST/DEVELOPMENT: no auto_remove to access sandbox logs
#client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc)
except Exception as exc:
print("Error launching sandbox: " + str(host_to_deploy) + " " + uid + " " + sid + " " + wid)
print(traceback.format_exc())
success = False
finally:
client.close()
return success, endpoint_key
def get_workflow_host_port(host_to_deploy, sid):
success = False
try:
apiclient = docker.APIClient(base_url="tcp://" + host_to_deploy[1] + ":2375") # use IP address
success = True
except Exception as exc:
print("Error updating workflow endpoints; " + host_to_deploy[1] + ":2375")
print(traceback.format_exc())
success = False
if success:
try:
settings = apiclient.inspect_container(sid)
ports = settings["NetworkSettings"]["Ports"]
port_map = ports["8080/tcp"][0]
host_port = port_map["HostPort"]
success = True
except Exception as exc:
print("Error updating workflow endpoints; can't connect to: " + str(host_to_deploy) + " " + sid)
print(traceback.format_exc())
success = False
finally:
apiclient.close()
return success, host_port
def create_k8s_deployment(email, workflow_info, runtime, management=False):
# KUBERNETES MODE
new_workflow_conf = {}
conf_file = '/opt/mfn/SandboxAgent/conf/new_workflow.conf'
try:
with open(conf_file, 'r') as fp:
new_workflow_conf = json.load(fp)
except IOError as e:
raise Exception("Unable to load "+conf_file+". Ensure that the configmap has been setup properly", e)
ksvc_file = '/opt/mfn/SandboxAgent/conf/kservice.json'
try:
with open(ksvc_file, 'r') as fp:
kservice = json.load(fp)
except IOError as e:
raise Exception("Unable to load "+ksvc_file+". Ensure that the configmap has been setup properly", e)
# Kubernetes labels cannot contain @ or _ and should start and end with alphanumeric characters (and not be greater than 63 chars)
workflowNameForLabel = workflow_info["workflowName"].replace('@', '-').replace('_', '-').lower()
wfNameSanitized = 'w-' + workflowNameForLabel[:59] + '-w'
emailForLabel = email.replace('@', '-').replace('_', '-').lower()
emailSanitized = 'u-' + emailForLabel[:59] + '-u'
# Pod, Deployment and Hpa names for the new workflow will have a prefix containing the workflow name and user name
app_fullname_prefix = ''
if 'app.fullname.prefix' in new_workflow_conf:
app_fullname_prefix = new_workflow_conf['app.fullname.prefix']+'-'# + wfNameSanitized + '-' + emailSanitized + '-'
# Create a Deployment
with open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r") as f:
token = f.read()
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
namespace = f.read()
ksvcname = app_fullname_prefix + workflow_info["workflowId"].lower()
endpoint_key = hashlib.sha256(str(time.time()).encode()).hexdigest()
kservice['metadata']['name'] = ksvcname
kservice['metadata']['namespace'] = namespace
labels = kservice['metadata']['labels']
labels['user'] = emailSanitized
labels['workflow'] = wfNameSanitized
labels['workflowid'] = workflow_info["workflowId"]
labels = kservice['spec']['template']['metadata']['labels']
labels['user'] = emailSanitized
labels['workflowid'] = workflow_info["workflowId"]
kservice['spec']['template']['spec']['containers'][0]['image'] = new_workflow_conf['image.'+runtime]
env = kservice['spec']['template']['spec']['containers'][0]['env']
env.append({'name': 'MFN_ENDPOINT_KEY', 'value': endpoint_key})
env.append({'name': 'USERID', 'value': email})
env.append({'name': 'SANDBOXID','value': workflow_info["sandboxId"]})
env.append({'name': 'WORKFLOWID', 'value': workflow_info["workflowId"]})
env.append({'name': 'WORKFLOWNAME', 'value': workflow_info["workflowName"]})
# Special handling for the management container
if management:
management_workflow_conf = {}
conf_file = '/opt/mfn/SandboxAgent/conf/management_workflow.conf'
try:
with open(conf_file, 'r') as fp:
management_workflow_conf = json.load(fp)
except IOError as e:
raise Exception("Unable to load "+conf_file+". Ensure that the configmap has been setup properly", e)
kservice['spec']['template']['spec']['volumes'] = [{ 'name': 'new-workflow-conf', 'configMap': {'name': new_workflow_conf['configmap']}}]
kservice['spec']['template']['spec']['containers'][0]['volumeMounts'] = [{'name': 'new-workflow-conf', 'mountPath': '/opt/mfn/SandboxAgent/conf'}]
kservice['spec']['template']['spec']['containers'][0]['resources'] = management_workflow_conf['resources']
kservice['spec']['template']['spec']['serviceAccountName'] = new_workflow_conf['mgmtserviceaccount']
if 'HTTP_GATEWAYPORT' in new_workflow_conf:
env.append({'name': 'HTTP_GATEWAYPORT', 'value': new_workflow_conf['HTTP_GATEWAYPORT']})
if 'HTTPS_GATEWAYPORT' in new_workflow_conf:
env.append({'name': 'HTTPS_GATEWAYPORT', 'value': new_workflow_conf['HTTPS_GATEWAYPORT']})
for k in ['http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY']:
if not k in os.environ:
continue
for container in kservice['spec']['template']['spec']['containers']:
container['env'].append({'name': k, 'value': os.getenv(k)})
print('Checking if kservice exists')
resp = requests.get(
"https://"+os.getenv("KUBERNETES_SERVICE_HOST")+":"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/apis/serving.knative.dev/v1/namespaces/"+namespace+"/services/"+ksvcname,
headers={"Authorization": "Bearer "+token, "Accept": "application/json"},
verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
proxies={"https":""})
if resp.status_code == 200:
print('Deleting existing kservice')
resp = requests.delete(
"https://"+os.getenv("KUBERNETES_SERVICE_HOST")+":"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/apis/serving.knative.dev/v1/namespaces/"+namespace+"/services/"+ksvcname,
headers={"Authorization": "Bearer "+token, "Accept": "application/json"},
verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
proxies={"https":""})
try:
resp.raise_for_status()
except Exception as e:
print("ERROR deleting existing kservice")
print(resp.text)
print('Creating new kservice')
resp = requests.post(
"https://"+os.getenv("KUBERNETES_SERVICE_HOST")+":"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/apis/serving.knative.dev/v1/namespaces/"+namespace+"/services",
headers={"Authorization": "Bearer "+token, "Content-Type": "application/yaml", "Accept": "application/json"},
verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
data=json.dumps(kservice),
proxies={"https":""})
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
print(json.dumps(kservice))
print(resp.text)
raise Exception("Error creating kubernetes deployment for "+email+" "+workflow_info["workflowId"], e)
# Wait for the URL
url = None
retry = 60
while retry > 0:
try:
resp = requests.get(
"https://"+os.getenv("KUBERNETES_SERVICE_HOST")+":"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/apis/serving.knative.dev/v1/namespaces/"+namespace+"/services/"+ksvcname,
headers={"Authorization": "Bearer "+token, "Accept": "application/json"},
verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
proxies={"https":""})
resp.raise_for_status()
status = resp.json().get("status",{})
if "url" in status:
url = status["url"]
if "HTTPS_GATEWAYPORT" in os.environ:
url = "https://" + url.split("://",1)[1] + ":" + os.environ["HTTPS_GATEWAYPORT"]
elif "HTTP_GATEWAYPORT" in os.environ:
url = "http://" + url.split("://",1)[1] + ":" + os.environ["HTTP_GATEWAYPORT"]
break
except requests.exceptions.HTTPError as e:
print(e)
print(resp.text)
time.sleep(2)
retry -= 1
print("Workflow endpoint URL: "+str(url))
return url, endpoint_key
def handle(value, sapi):
assert isinstance(value, dict)
data = value
try:
if "email" not in data or "workflow" not in data:
raise Exception("malformed input")
email = data["email"]
# iea: I think it's okay to have the storage_userid inside the incoming value
# it is NOT coming from the client (e.g., browser) but actually derived
# in ManagementServiceEntry.py when the user has authenticated and put into the value
# that is passed to the next functions.
storage_userid = data["storage_userid"]
workflow = data["workflow"]
if "id" not in workflow:
raise Exception("malformed input")
sapi.log(json.dumps(workflow))
wfmeta = sapi.get(email + "_workflow_" + workflow["id"], True)
if wfmeta is None or wfmeta == "":
raise Exception("workflow metadata is not valid.")
try:
wfmeta = json.loads(wfmeta)
except:
raise Exception("workflow metadata is invalid json ("+wfmeta+")")
#if wfmeta["status"] != "undeployed" and wfmeta["status"] != "failed":
# raise Exception("workflow status is not undeployed: " + str(wfmeta["status"]))
dlc = sapi.get_privileged_data_layer_client(storage_userid)
# check workflow description and make sure that the functions are available
# compile the requirements for the workflow
#wfjson = sapi.get(email + "_workflow_json_" + wfmeta["id"], True)
wfjson = dlc.get("workflow_json_" + wfmeta["id"])
if wfjson is None or wfjson == "":
raise Exception("workflow JSON does not exist.")
wfjson = base64.b64decode(wfjson).decode()
try:
wfobj = json.loads(wfjson)
except:
raise Exception("workflow JSON is not valid.")
wf_type = WF_TYPE_SAND
if is_asl_workflow(wfobj):
wf_type = WF_TYPE_ASL
success, errmsg, resource_names, uploaded_resources = check_workflow_functions(wf_type, wfobj, email, sapi)
if not success:
raise Exception("Couldn't deploy workflow; " + errmsg)
# compile the single key workflow deployment info
# given such a key, the sandbox agent can retrieve the following information
# it is stored in the user's storage,
# just like all workflow and resource information
# it includes:
# - workflow info (ref, id)
# - map of resources
# - resource info for each (id, name, ref, type, runtime)
resource_info_map = compile_resource_info_map(resource_names, uploaded_resources, email, sapi, dlc)
workflow_info = {}
workflow_info["sandboxId"] = workflow["id"]
workflow_info["workflowId"] = workflow["id"]
workflow_info["workflowType"] = wf_type
workflow_info["json_ref"] = "workflow_json_" + wfmeta["id"]
workflow_info["workflowName"] = wfmeta["name"]
workflow_info["usertoken"] = data["usertoken"]
req = {}
req["installer"] = "pip"
workflow_info["sandbox_requirements"] = req
deployment_info = {}
deployment_info["workflow"] = workflow_info
deployment_info["resources"] = resource_info_map
#dlc.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info))
# _XXX_: important!
# put must not be queued as the function currently waits for the container to become ready
sapi.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info), True)
status = "deploying"
sapi.clearMap(workflow_info["workflowId"] + "_sandbox_status_map", is_private=True)
if 'KUBERNETES_SERVICE_HOST' in os.environ:
if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map):
runtime = "Java"
else:
runtime = "Python"
url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime)
if url is not None and len(url) > 0:
status = "deploying"
sapi.addSetEntry(workflow_info["workflowId"] + "_workflow_endpoints", str(url), is_private=True)
sapi.putMapEntry(workflow_info["workflowId"] + "_workflow_endpoint_map", endpoint_key, str(url), is_private=True)
urlset = set(wfmeta.get("endpoints",[]))
urlset.add(url)
wfmeta["endpoints"] = list(urlset)
else:
status = "failed"
else:
# We're running BARE METAL mode
# _XXX_: due to the queue service still being in java in the sandbox
sandbox_image_name = "microfn/sandbox"
if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map):
sandbox_image_name = "microfn/sandbox_java"
# TODO: intelligence on how to pick hosts
hosts = sapi.get("available_hosts", True)
print("available_hosts: " + str(hosts))
if hosts is not None and hosts != "":
hosts = json.loads(hosts)
deployed_hosts = {}
# instruct hosts to start the sandbox and deploy workflow
for hostname in hosts:
hostip = hosts[hostname]
host_to_deploy = (hostname, hostip)
success, endpoint_key = start_docker_sandbox(host_to_deploy, email, workflow_info["sandboxId"], workflow_info["workflowId"], workflow_info["workflowName"], sandbox_image_name)
if success:
deployed_hosts[hostname] = hostip
success, host_port = get_workflow_host_port(host_to_deploy, workflow_info["sandboxId"])
if success:
#sapi.log(str(hostip) + ", host_port: " + str(host_port))
url="http://"+str(hostip)+":"+str(host_port)
sapi.addSetEntry(workflow_info["workflowId"] + "_workflow_endpoints", url, is_private=True)
sapi.putMapEntry(workflow_info["workflowId"] + "_workflow_endpoint_map", endpoint_key, str(url), is_private=True)
urlset = set(wfmeta.get("endpoints",[]))
urlset.add(url)
wfmeta["endpoints"] = list(urlset)
status = "deploying"
sbinfo = {}
sbinfo["status"] = "deploying"
sbinfo["errmsg"] = ""
sapi.putMapEntry(workflow_info["workflowId"] + "_sandbox_status_map", endpoint_key, json.dumps(sbinfo), is_private=True)
#endpoints = sapi.retrieveMap(workflow_info["workflowId"] + "_workflow_endpoints", True)
#sapi.log(str(endpoints))
if not bool(deployed_hosts):
status = "failed"
else:
#sapi.log("deployed on hosts: " + json.dumps(deployed_hosts))
sapi.put(email + "_workflow_hosts_" + workflow["id"], json.dumps(deployed_hosts), True)
else:
print("available_hosts is empty. Not deploying")
status = "failed"
# Update workflow status
wfmeta["status"] = status
# somebody needs to update the workflow deployment status after
# successfully starting a sandbox
# in BARE_METAL and KUBERNETES mode
# put the workflow's status to the user storage
# so that the sandbox agent running on any host can update it
#dlc.put("workflow_status_" + workflow["id"], wfmeta["status"])
sapi.put("workflow_status_" + workflow["id"], wfmeta["status"], True)
print("Current workflow metadata: " + str(wfmeta))
if status is not "failed" and "associatedTriggerableTables" in wfmeta:
for table in wfmeta["associatedTriggerableTables"]:
addWorkflowToTableMetadata(email, table, wfmeta["name"], wfmeta["endpoints"], dlc)
sapi.put(email + "_workflow_" + workflow["id"], json.dumps(wfmeta), True)
dlc.shutdown()
# deploy queued up triggers
if status is not "failed" and "associatedTriggers" in wfmeta and "endpoints" in wfmeta and len(wfmeta["endpoints"]) > 0:
associatedTriggers = wfmeta["associatedTriggers"].copy()
for trigger_name in associatedTriggers:
trigger_id = storage_userid + "_" + trigger_name
print("Adding trigger name: " + str(trigger_name) + " to workflow")
if isTriggerPresent(email, trigger_id, trigger_name, sapi) == True:
#trigger_info = get_trigger_info(sapi, trigger_id)
#if wfmeta["name"] in trigger_info["associated_workflows"]:
# print("[deployWorkflow] Strangely global trigger info already has workflow_name: " + str(wfmeta["name"]) + ", in associated_workflows")
workflow_state = associatedTriggers[trigger_name]
addWorkflowToTrigger(email, wfmeta["name"], workflow_state, wfmeta, trigger_id, trigger_name, sapi)
else:
# workflow has an associated trigger name, but the trigger may have been deleted
# so remove the associated trigger name
print("Trigger_id: " + str(trigger_id) + " info not found. Removing trigger name: " + str(trigger_name) + ", from workflow's associatedTriggers")
assocTriggers = wfmeta['associatedTriggers']
del assocTriggers[trigger_name]
wfmeta['associatedTriggers'] = assocTriggers
print("Updating workflow meta to: " + str(wfmeta))
sapi.put(email + "_workflow_" + wfmeta["id"], json.dumps(wfmeta), True)
#deleteTriggerFromWorkflowMetadata(email, trigger_name, wfmeta["name"], workflow["id"], sapi)
else:
print("Unable to associate queued up triggers with workflow. Workflow meta: " + str(wfmeta))
except Exception as e:
response = {}
response_data = {}
response["status"] = "failure"
response_data["message"] = "Couldn't deploy workflow; " + str(e)
response["data"] = response_data
sapi.log(traceback.format_exc())
return response
# Finish successfully
response = {}
response_data = {}
response_data["message"] = "Successfully deployed workflow " + workflow["id"] + "."
response_data["workflow"] = workflow
response["status"] = "success"
response["data"] = response_data
sapi.log(json.dumps(response))
return response
def addWorkflowToTableMetadata(email, tablename, workflowname, workflow_endpoints, dlc):
metadata_key = tablename
metadata_urls = workflow_endpoints
triggers_metadata_table = 'triggersInfoTable'
bucket_metadata = {"urltype": "url", "urls": metadata_urls, "wfname": workflowname}
print("[addWorkflowToTableMetadata] User: " + email + ", Workflow: " + workflowname + ", Table: " + tablename + ", Adding metadata: " + str(bucket_metadata))
current_meta = dlc.get(metadata_key, tableName=triggers_metadata_table)
if current_meta == None or current_meta == '':
meta_list = []
else:
meta_list = json.loads(current_meta)
if type(meta_list == type([])):
for i in range(len(meta_list)):
meta=meta_list[i]
if meta["wfname"] == bucket_metadata["wfname"]:
del meta_list[i]
break
meta_list.append(bucket_metadata)
dlc.put(metadata_key, json.dumps(meta_list), tableName=triggers_metadata_table)
time.sleep(0.2)
updated_meta = dlc.get(metadata_key, tableName=triggers_metadata_table)
updated_meta_list = json.loads(updated_meta)
print("[addWorkflowToTableMetadata] User: " + email + ", Workflow: " + workflowname + ", Table: " + tablename + ", Updated metadata: " + str(updated_meta_list))
MAP_AVAILABLE_FRONTENDS = "available_triggers_frontned_map"
MAP_TRIGGERS_TO_INFO = "triggers_to_info_map"
### Utility functions ###
def get_available_frontends(context):
tf_hosts = context.getMapKeys(MAP_AVAILABLE_FRONTENDS, True)
return tf_hosts
def get_frontend_info(context, frontend_ip_port):
ret = context.getMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True)
if ret is "" or ret is None:
return None
else:
return json.loads(ret)
def get_trigger_info(context, trigger_id):
ret = context.getMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True)
if ret is "" or ret is None:
return None
else:
return json.loads(ret)
def add_trigger_info(context, trigger_id, data):
print("add_trigger_info: " + trigger_id + ", data: " + data)
context.putMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, data, True)
def remove_trigger_info(context, trigger_id):
print("remove_trigger_info: " + trigger_id)
context.deleteMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True)
def get_user_trigger_list(context, email):
user_triggers_list = context.get(email + "_list_triggers", True)
if user_triggers_list is not None and user_triggers_list != "":
user_triggers_list = json.loads(user_triggers_list)
else:
user_triggers_list = {}
return user_triggers_list
def isTriggerPresent(email, trigger_id, trigger_name, context):
# check if the global trigger is present
global_trigger_info = get_trigger_info(context, trigger_id)
print("[isTriggerPresent] global_trigger_info = " + str(global_trigger_info))
# check if the trigger does not exist in global and user's list
if global_trigger_info is None:
return False
return True
def addWorkflowToTrigger(email, workflow_name, workflow_state, workflow_details, trigger_id, trigger_name, context):
print("[addTriggerForWorkflow] called with workflow_name: " + str(workflow_name) + ", workflow_state: " + str(workflow_name) + ", workflow_details: " + str(workflow_details) + ", trigger_id: " + str(trigger_id) + ", trigger_name: " + trigger_name)
status_msg = ""
try:
workflow_endpoints = workflow_details["endpoints"]
if len(workflow_endpoints) == 0:
raise Exception("[addTriggerForWorkflow] No workflow endpoint available")
# TODO: [For bare metal clusters] send all workflow endpoints to frontend to let is load balance between wf endpoints. For k8s there will only be one name
selected_workflow_endpoint = workflow_endpoints[random.randint(0,len(workflow_endpoints)-1)]
print("[addTriggerForWorkflow] selected workflow endpoint: " + selected_workflow_endpoint)
workflow_to_add = \
{
"workflow_url": selected_workflow_endpoint,
"workflow_name": workflow_name,
"workflow_state": workflow_state
}
# get the list of available frontends.
tf_hosts = get_available_frontends(context)
if len(tf_hosts) == 0:
raise Exception("[addTriggerForWorkflow] No available TriggersFrontend found")
# if the frontend with the trigger is available
global_trigger_info = get_trigger_info(context, trigger_id)
tf_ip_port = global_trigger_info["frontend_ip_port"]
if tf_ip_port not in tf_hosts:
raise Exception("Frontend: " + tf_ip_port + " not available")
url = "http://" + tf_ip_port + "/add_workflows"
# send the request and wait for response
req_obj = {"trigger_id": trigger_id, "workflows": [workflow_to_add]}
print("[addTriggerForWorkflow] Contacting: " + url + ", with data: " + str(req_obj))
res_obj = {}
try:
res = requests.post(url, json=req_obj)
if res.status_code != 200:
raise Exception("status code: " + str(res.status_code) + " returned")
res_obj = res.json()
except Exception as e:
status_msg = "Error: trigger_id" + trigger_id + "," + str(e)
if "status" in res_obj and res_obj["status"].lower() == "success":
# if success then update the global trigger table to add a new workflow.
print("[addTriggerForWorkflow] Success response from " + url)
global_trigger_info["associated_workflows"][workflow_name] = workflow_to_add
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
status_msg = "[addTriggerForWorkflow] Trigger " + trigger_name + " added successfully to workflow:" + workflow_name + ". Message: " + res_obj["message"]
else:
if "message" in res_obj:
status_msg = status_msg + ", message: " + res_obj["message"]
status_msg = "[addTriggerForWorkflow] Error: " + status_msg + ", response: " + str(res_obj)
raise Exception(status_msg)
except Exception as e:
print("[addTriggerForWorkflow] exception: " + str(e))
# TODO: why remove this?
#if 'associatedTriggers' in workflow_details and trigger_name in workflow_details['associatedTriggers']:
# associatedTriggers = workflow_details['associatedTriggers']
# del associatedTriggers[trigger_name]
# workflow_details['associatedTriggers'] = associatedTriggers
# print("Removing trigger_name: " + str(trigger_name) + ", from associatedTriggers for the workflow. Updated workflow metadata: " + str(workflow_details))
# context.put(email + "_workflow_" + workflow_details["id"], json.dumps(workflow_details), True)
| 47.581301
| 251
| 0.629645
|
8754c0710e742b7cf20b782a0f75146a3591f1cf
| 24,063
|
py
|
Python
|
roles/eseries/library/netapp_e_volume.py
|
NetApp/eseries-stk
|
54e1f74c0f242651def25aee478cd40027b37f57
|
[
"BSD-3-Clause"
] | 2
|
2019-03-27T08:44:36.000Z
|
2020-10-22T21:15:30.000Z
|
roles/eseries/library/netapp_e_volume.py
|
NetApp/eseries-stk
|
54e1f74c0f242651def25aee478cd40027b37f57
|
[
"BSD-3-Clause"
] | 1
|
2020-04-14T20:24:41.000Z
|
2020-04-25T18:43:57.000Z
|
roles/eseries/library/netapp_e_volume.py
|
NetApp/eseries-stk
|
54e1f74c0f242651def25aee478cd40027b37f57
|
[
"BSD-3-Clause"
] | 4
|
2017-10-17T19:39:21.000Z
|
2019-03-12T13:30:14.000Z
|
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume
version_added: "2.2"
short_description: Manage storage volumes (standard and thin)
description:
- Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
extends_documentation_fragment:
- netapp.eseries
options:
state:
required: true
description:
- Whether the specified volume should exist or not.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the volume to manage
storage_pool_name:
required: true
description:
- "Required only when requested state is 'present'. The name of the storage pool the volume should exist on."
size_unit:
description:
- The unit used to interpret the size parameter
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
size:
required: true
description:
- "Required only when state = 'present'. The size of the volume in (size_unit)."
segment_size_kb:
description:
- The segment size of the new volume
default: 512
thin_provision:
description:
- Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool).
default: False
choices: ['yes','no','true','false']
thin_volume_repo_size:
description:
- Initial size of the thin volume repository volume (in size_unit)
required: True
thin_volume_max_repo_size:
description:
- Maximum size that the thin volume repository volume will automatically expand to
default: same as size (in size_unit)
ssd_cache_enabled:
description:
- Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
default: None (ignores existing SSD cache setting)
choices: ['yes','no','true','false']
data_assurance_enabled:
description:
- If data assurance should be enabled for the volume
default: false
# TODO: doc thin volume parameters
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
- name: No thin volume
netapp_e_volume:
ssid: "{{ ssid }}"
name: NewThinVolumeByAnsible
state: absent
log_path: /tmp/volume.log
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
when: check_volume
- name: No fat volume
netapp_e_volume:
ssid: "{{ ssid }}"
name: NewVolumeByAnsible
state: absent
log_path: /tmp/volume.log
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
when: check_volume
'''
RETURN = '''
---
msg:
description: State of volume
type: string
returned: always
sample: "Standard volume [workload_vol_1] has been created."
'''
import json
import logging
import time
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils.pycompat24 import get_exception
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def ifilter(predicate, iterable):
# python 2, 3 generic filtering.
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
class NetAppESeriesVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
storage_pool_name=dict(type='str'),
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb', 'pct'],
type='str'),
size=dict(type='int'),
segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'),
ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone
data_assurance_enabled=dict(default=False, type='bool'),
thin_provision=dict(default=False, type='bool'),
thin_volume_repo_size=dict(type='int'),
thin_volume_max_repo_size=dict(type='int'),
# TODO: add cache, owning controller support, thin expansion policy, etc
log_path=dict(type='str'),
))
self.module = AnsibleModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['storage_pool_name', 'size']),
('thin_provision', 'true', ['thin_volume_repo_size'])
],
supports_check_mode=True)
p = self.module.params
log_path = p['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if log_path:
logging.basicConfig(level=logging.DEBUG, filename=log_path)
self.state = p['state']
self.ssid = p['ssid']
self.name = p['name']
self.storage_pool_name = p['storage_pool_name']
self.size_unit = p['size_unit']
self.size = p['size']
self.relative_size = ['relative_size']
self.segment_size_kb = p['segment_size_kb']
self.ssd_cache_enabled = p['ssd_cache_enabled']
self.data_assurance_enabled = p['data_assurance_enabled']
self.thin_provision = p['thin_provision']
self.thin_volume_repo_size = p['thin_volume_repo_size']
self.thin_volume_max_repo_size = p['thin_volume_max_repo_size']
if not self.thin_volume_max_repo_size:
self.thin_volume_max_repo_size = self.size
self.validate_certs = p['validate_certs']
try:
self.api_usr = p['api_username']
self.api_pwd = p['api_password']
self.api_url = p['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username "
"and api_password and api_url to the module.")
if self.size_unit == 'pct':
pool = self.get_storage_pool(self.storage_pool_name)
self.size = int(int(pool['totalRaidedSpace']) * (self.size / 100.0))
pool_data = pool['volumeGroupData'].get('diskPoolData')
# TODO(lorenp): Is there anything we need to do for volumeGroups?
if pool_data is not None:
self.size = self.size - self.size % int(pool_data['allocGranularity'])
self.size_unit = 'b'
def get_volume(self, volume_name):
self.debug('fetching volumes')
# fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
try:
(rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid,
str(err)))
try:
self.debug('fetching thin-volumes')
(rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
volumes.extend(thinvols)
self.debug("searching for volume '%s'" % volume_name)
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
if volume_detail:
self.debug('found')
else:
self.debug('not found')
return volume_detail
def get_storage_pool(self, storage_pool_name):
self.debug("fetching storage pools")
# map the storage pool name to its id
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
self.debug("searching for storage pool '%s'" % storage_pool_name)
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
self.debug('found')
else:
self.debug('not found')
return pool_detail
def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled):
volume_add_req = dict(
name=name,
poolId=pool_id,
sizeUnit=size_unit,
size=size,
segSize=segment_size_kb,
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating volume '%s'" % name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
str(err)))
def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size,
thin_volume_max_repo_size, data_assurance_enabled):
thin_volume_add_req = dict(
name=name,
poolId=pool_id,
sizeUnit=size_unit,
virtualSize=size,
repositorySize=thin_volume_repo_size,
maximumRepositorySize=thin_volume_max_repo_size,
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating thin-volume '%s'" % name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
def delete_volume(self):
# delete the volume
self.debug("deleting volume '%s'" % self.volume_detail['name'])
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
self.volume_detail['id']),
method='DELETE', url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
str(err)))
@property
def volume_resource_name(self):
if self.volume_detail['thinProvisioned']:
return 'thin-volumes'
else:
return 'volumes'
@property
def volume_properties_changed(self):
return self.volume_ssdcache_setting_changed # or with other props here when extended
# TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold
@property
def volume_ssdcache_setting_changed(self):
# None means ignore existing setting
if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']:
self.debug("flash cache setting changed")
return True
def update_volume_properties(self):
update_volume_req = dict()
# conditionally add values so we ignore unspecified props
if self.volume_ssdcache_setting_changed:
update_volume_req['flashCache'] = self.ssd_cache_enabled
self.debug("updating volume properties...")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name,
self.volume_detail['id']),
data=json.dumps(update_volume_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
@property
def volume_needs_expansion(self):
current_size_bytes = int(self.volume_detail['capacity'])
requested_size_bytes = self.size * self._size_unit_map[self.size_unit]
# TODO: check requested/current repo volume size for thin-volumes as well
# TODO: do we need to build any kind of slop factor in here?
return requested_size_bytes > current_size_bytes
def expand_volume(self):
is_thin = self.volume_detail['thinProvisioned']
if is_thin:
# TODO: support manual repo expansion as well
self.debug('expanding thin volume')
thin_volume_expand_req = dict(
newVirtualSize=self.size,
sizeUnit=self.size_unit
)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid,
self.volume_detail[
'id']),
data=json.dumps(thin_volume_expand_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
# TODO: check return code
else:
self.debug('expanding volume')
volume_expand_req = dict(
expansionSize=self.size,
sizeUnit=self.size_unit
)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
self.volume_detail['id']),
data=json.dumps(volume_expand_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
self.debug('polling for completion...')
while True:
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
self.volume_detail[
'id']),
method='GET', url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % (
self.name, self.ssid, str(err)))
action = resp['action']
percent_complete = resp['percentComplete']
self.debug('expand action %s, %s complete...' % (action, percent_complete))
if action == 'none':
self.debug('expand complete')
break
else:
time.sleep(5)
def apply(self):
changed = False
volume_exists = False
msg = None
self.volume_detail = self.get_volume(self.name)
if self.volume_detail:
volume_exists = True
if self.state == 'absent':
self.debug("CHANGED: volume exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# check requested volume size, see if expansion is necessary
if self.volume_needs_expansion:
self.debug(
"CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
self.size_unit,
self.volume_detail[
'capacity']))
changed = True
if self.volume_properties_changed:
self.debug("CHANGED: one or more volume properties have changed")
changed = True
else:
if self.state == 'present':
self.debug("CHANGED: volume does not exist, but requested state is 'present'")
changed = True
if changed:
if self.module.check_mode:
self.debug('skipping changes due to check mode')
else:
if self.state == 'present':
if not volume_exists:
pool_detail = self.get_storage_pool(self.storage_pool_name)
if not pool_detail:
self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
if self.thin_provision and not pool_detail['diskPool']:
self.module.fail_json(
msg='Thin provisioned volumes can only be located on disk pools (not volume groups)')
pool_id = pool_detail['id']
if not self.thin_provision:
self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb,
self.data_assurance_enabled)
msg = "Standard volume [%s] has been created." % (self.name)
else:
self.create_thin_volume(pool_id, self.name, self.size_unit, self.size,
self.thin_volume_repo_size, self.thin_volume_max_repo_size,
self.data_assurance_enabled)
msg = "Thin volume [%s] has been created." % (self.name)
else: # volume exists but differs, modify...
if self.volume_needs_expansion:
self.expand_volume()
msg = "Volume [%s] has been expanded." % (self.name)
# this stuff always needs to run on present (since props can't be set on creation)
if self.volume_properties_changed:
self.update_volume_properties()
msg = "Properties of volume [%s] has been updated." % (self.name)
elif self.state == 'absent':
self.delete_volume()
msg = "Volume [%s] has been deleted." % (self.name)
else:
self.debug("exiting with no changes")
if self.state == 'absent':
msg = "Volume [%s] did not exist." % (self.name)
else:
msg = "Volume [%s] already exists." % (self.name)
self.module.exit_json(msg=msg, changed=changed)
def main():
v = NetAppESeriesVolume()
try:
v.apply()
except Exception:
e = get_exception()
v.debug("Exception in apply(): \n%s" % format_exc(e))
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
if __name__ == '__main__':
main()
| 42.816726
| 127
| 0.525371
|
aa0af72bea04f683f43f226b3365f8366d4f1cb3
| 22,573
|
py
|
Python
|
flopy/mf6/modflow/mfgwtmwt.py
|
dbrakenhoff/flopy
|
0d5953303694e023da8ee9e9453862bbdb7ffb66
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/mf6/modflow/mfgwtmwt.py
|
dbrakenhoff/flopy
|
0d5953303694e023da8ee9e9453862bbdb7ffb66
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/mf6/modflow/mfgwtmwt.py
|
dbrakenhoff/flopy
|
0d5953303694e023da8ee9e9453862bbdb7ffb66
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY
# mf6/utils/createpackages.py
from .. import mfpackage
from ..data.mfdatautil import ListTemplateGenerator
class ModflowGwtmwt(mfpackage.MFPackage):
"""
ModflowGwtmwt defines a mwt package within a gwt6 model.
Parameters
----------
model : MFModel
Model that this package is a part of. Package is automatically
added to model when it is initialized.
loading_package : bool
Do not set this parameter. It is intended for debugging and internal
processing purposes only.
flow_package_name : string
* flow_package_name (string) keyword to specify the name of the
corresponding flow package. If not specified, then the corresponding
flow package must have the same name as this advanced transport
package (the name associated with this package in the GWT name file).
auxiliary : [string]
* auxiliary (string) defines an array of one or more auxiliary variable
names. There is no limit on the number of auxiliary variables that
can be provided on this line; however, lists of information provided
in subsequent blocks must have a column of data for each auxiliary
variable name defined here. The number of auxiliary variables
detected on this line determines the value for naux. Comments cannot
be provided anywhere on this line as they will be interpreted as
auxiliary variable names. Auxiliary variables may not be used by the
package, but they will be available for use by other parts of the
program. The program will terminate with an error if auxiliary
variables are specified on more than one line in the options block.
flow_package_auxiliary_name : string
* flow_package_auxiliary_name (string) keyword to specify the name of
an auxiliary variable in the corresponding flow package. If
specified, then the simulated concentrations from this advanced
transport package will be copied into the auxiliary variable
specified with this name. Note that the flow package must have an
auxiliary variable with this name or the program will terminate with
an error. If the flows for this advanced transport package are read
from a file, then this option will have no affect.
boundnames : boolean
* boundnames (boolean) keyword to indicate that boundary names may be
provided with the list of well cells.
print_input : boolean
* print_input (boolean) keyword to indicate that the list of well
information will be written to the listing file immediately after it
is read.
print_concentration : boolean
* print_concentration (boolean) keyword to indicate that the list of
well concentration will be printed to the listing file for every
stress period in which "HEAD PRINT" is specified in Output Control.
If there is no Output Control option and PRINT_CONCENTRATION is
specified, then concentration are printed for the last time step of
each stress period.
print_flows : boolean
* print_flows (boolean) keyword to indicate that the list of well flow
rates will be printed to the listing file for every stress period
time step in which "BUDGET PRINT" is specified in Output Control. If
there is no Output Control option and "PRINT_FLOWS" is specified,
then flow rates are printed for the last time step of each stress
period.
save_flows : boolean
* save_flows (boolean) keyword to indicate that well flow terms will be
written to the file specified with "BUDGET FILEOUT" in Output
Control.
concentration_filerecord : [concfile]
* concfile (string) name of the binary output file to write
concentration information.
budget_filerecord : [budgetfile]
* budgetfile (string) name of the binary output file to write budget
information.
timeseries : {varname:data} or timeseries data
* Contains data for the ts package. Data can be stored in a dictionary
containing data for the ts package with variable names as keys and
package data as values. Data just for the timeseries variable is also
acceptable. See ts package documentation for more information.
observations : {varname:data} or continuous data
* Contains data for the obs package. Data can be stored in a dictionary
containing data for the obs package with variable names as keys and
package data as values. Data just for the observations variable is
also acceptable. See obs package documentation for more information.
packagedata : [mawno, strt, aux, boundname]
* mawno (integer) integer value that defines the well number associated
with the specified PACKAGEDATA data on the line. MAWNO must be
greater than zero and less than or equal to NMAWWELLS. Well
information must be specified for every well or the program will
terminate with an error. The program will also terminate with an
error if information for a well is specified more than once. This
argument is an index variable, which means that it should be treated
as zero-based when working with FloPy and Python. Flopy will
automatically subtract one when loading index variables and add one
when writing index variables.
* strt (double) real value that defines the starting concentration for
the well.
* aux (double) represents the values of the auxiliary variables for
each well. The values of auxiliary variables must be present for each
well. The values must be specified in the order of the auxiliary
variables specified in the OPTIONS block. If the package supports
time series and the Options block includes a TIMESERIESFILE entry
(see the "Time-Variable Input" section), values can be obtained from
a time series by entering the time-series name in place of a numeric
value.
* boundname (string) name of the well cell. BOUNDNAME is an ASCII
character variable that can contain as many as 40 characters. If
BOUNDNAME contains spaces in it, then the entire name must be
enclosed within single quotes.
mwtperioddata : [mawno, mwtsetting]
* mawno (integer) integer value that defines the well number associated
with the specified PERIOD data on the line. MAWNO must be greater
than zero and less than or equal to NMAWWELLS. This argument is an
index variable, which means that it should be treated as zero-based
when working with FloPy and Python. Flopy will automatically subtract
one when loading index variables and add one when writing index
variables.
* mwtsetting (keystring) line of information that is parsed into a
keyword and values. Keyword values that can be used to start the
MWTSETTING string include: STATUS, CONCENTRATION, RAINFALL,
EVAPORATION, RUNOFF, and AUXILIARY. These settings are used to assign
the concentration of associated with the corresponding flow terms.
Concentrations cannot be specified for all flow terms. For example,
the Multi-Aquifer Well Package supports a "WITHDRAWAL" flow term. If
this withdrawal term is active, then water will be withdrawn from the
well at the calculated concentration of the well.
status : [string]
* status (string) keyword option to define well status. STATUS
can be ACTIVE, INACTIVE, or CONSTANT. By default, STATUS is
ACTIVE, which means that concentration will be calculated for
the well. If a well is inactive, then there will be no solute
mass fluxes into or out of the well and the inactive value
will be written for the well concentration. If a well is
constant, then the concentration for the well will be fixed
at the user specified value.
concentration : [string]
* concentration (string) real or character value that defines
the concentration for the well. The specified CONCENTRATION
is only applied if the well is a constant concentration well.
If the Options block includes a TIMESERIESFILE entry (see the
"Time-Variable Input" section), values can be obtained from a
time series by entering the time-series name in place of a
numeric value.
rate : [string]
* rate (string) real or character value that defines the
injection solute concentration :math:`(ML^{-3})` for the
well. If the Options block includes a TIMESERIESFILE entry
(see the "Time-Variable Input" section), values can be
obtained from a time series by entering the time-series name
in place of a numeric value.
auxiliaryrecord : [auxname, auxval]
* auxname (string) name for the auxiliary variable to be
assigned AUXVAL. AUXNAME must match one of the auxiliary
variable names defined in the OPTIONS block. If AUXNAME does
not match one of the auxiliary variable names defined in the
OPTIONS block the data are ignored.
* auxval (double) value for the auxiliary variable. If the
Options block includes a TIMESERIESFILE entry (see the "Time-
Variable Input" section), values can be obtained from a time
series by entering the time-series name in place of a numeric
value.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
auxiliary = ListTemplateGenerator(("gwt6", "mwt", "options", "auxiliary"))
concentration_filerecord = ListTemplateGenerator(
("gwt6", "mwt", "options", "concentration_filerecord")
)
budget_filerecord = ListTemplateGenerator(
("gwt6", "mwt", "options", "budget_filerecord")
)
ts_filerecord = ListTemplateGenerator(
("gwt6", "mwt", "options", "ts_filerecord")
)
obs_filerecord = ListTemplateGenerator(
("gwt6", "mwt", "options", "obs_filerecord")
)
packagedata = ListTemplateGenerator(
("gwt6", "mwt", "packagedata", "packagedata")
)
mwtperioddata = ListTemplateGenerator(
("gwt6", "mwt", "period", "mwtperioddata")
)
package_abbr = "gwtmwt"
_package_type = "mwt"
dfn_file_name = "gwt-mwt.dfn"
dfn = [
[
"block options",
"name flow_package_name",
"type string",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name auxiliary",
"type string",
"shape (naux)",
"reader urword",
"optional true",
],
[
"block options",
"name flow_package_auxiliary_name",
"type string",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name boundnames",
"type keyword",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name print_input",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_concentration",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name save_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name concentration_filerecord",
"type record concentration fileout concfile",
"shape",
"reader urword",
"tagged true",
"optional true",
],
[
"block options",
"name concentration",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name concfile",
"type string",
"preserve_case true",
"shape",
"in_record true",
"reader urword",
"tagged false",
"optional false",
],
[
"block options",
"name budget_filerecord",
"type record budget fileout budgetfile",
"shape",
"reader urword",
"tagged true",
"optional true",
],
[
"block options",
"name budget",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name fileout",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name budgetfile",
"type string",
"preserve_case true",
"shape",
"in_record true",
"reader urword",
"tagged false",
"optional false",
],
[
"block options",
"name ts_filerecord",
"type record ts6 filein ts6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package ts",
"construct_data timeseries",
"parameter_name timeseries",
],
[
"block options",
"name ts6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name filein",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name ts6_filename",
"type string",
"preserve_case true",
"in_record true",
"reader urword",
"optional false",
"tagged false",
],
[
"block options",
"name obs_filerecord",
"type record obs6 filein obs6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package obs",
"construct_data continuous",
"parameter_name observations",
],
[
"block options",
"name obs6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name obs6_filename",
"type string",
"preserve_case true",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block packagedata",
"name packagedata",
"type recarray mawno strt aux boundname",
"shape (maxbound)",
"reader urword",
],
[
"block packagedata",
"name mawno",
"type integer",
"shape",
"tagged false",
"in_record true",
"reader urword",
"numeric_index true",
],
[
"block packagedata",
"name strt",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block packagedata",
"name aux",
"type double precision",
"in_record true",
"tagged false",
"shape (naux)",
"reader urword",
"time_series true",
"optional true",
],
[
"block packagedata",
"name boundname",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
"optional true",
],
[
"block period",
"name iper",
"type integer",
"block_variable True",
"in_record true",
"tagged false",
"shape",
"valid",
"reader urword",
"optional false",
],
[
"block period",
"name mwtperioddata",
"type recarray mawno mwtsetting",
"shape",
"reader urword",
],
[
"block period",
"name mawno",
"type integer",
"shape",
"tagged false",
"in_record true",
"reader urword",
"numeric_index true",
],
[
"block period",
"name mwtsetting",
"type keystring status concentration rate auxiliaryrecord",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block period",
"name status",
"type string",
"shape",
"tagged true",
"in_record true",
"reader urword",
],
[
"block period",
"name concentration",
"type string",
"shape",
"tagged true",
"in_record true",
"time_series true",
"reader urword",
],
[
"block period",
"name rate",
"type string",
"shape",
"tagged true",
"in_record true",
"reader urword",
"time_series true",
],
[
"block period",
"name auxiliaryrecord",
"type record auxiliary auxname auxval",
"shape",
"tagged",
"in_record true",
"reader urword",
],
[
"block period",
"name auxiliary",
"type keyword",
"shape",
"in_record true",
"reader urword",
],
[
"block period",
"name auxname",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block period",
"name auxval",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
"time_series true",
],
]
def __init__(
self,
model,
loading_package=False,
flow_package_name=None,
auxiliary=None,
flow_package_auxiliary_name=None,
boundnames=None,
print_input=None,
print_concentration=None,
print_flows=None,
save_flows=None,
concentration_filerecord=None,
budget_filerecord=None,
timeseries=None,
observations=None,
packagedata=None,
mwtperioddata=None,
filename=None,
pname=None,
parent_file=None,
):
super(ModflowGwtmwt, self).__init__(
model, "mwt", filename, pname, loading_package, parent_file
)
# set up variables
self.flow_package_name = self.build_mfdata(
"flow_package_name", flow_package_name
)
self.auxiliary = self.build_mfdata("auxiliary", auxiliary)
self.flow_package_auxiliary_name = self.build_mfdata(
"flow_package_auxiliary_name", flow_package_auxiliary_name
)
self.boundnames = self.build_mfdata("boundnames", boundnames)
self.print_input = self.build_mfdata("print_input", print_input)
self.print_concentration = self.build_mfdata(
"print_concentration", print_concentration
)
self.print_flows = self.build_mfdata("print_flows", print_flows)
self.save_flows = self.build_mfdata("save_flows", save_flows)
self.concentration_filerecord = self.build_mfdata(
"concentration_filerecord", concentration_filerecord
)
self.budget_filerecord = self.build_mfdata(
"budget_filerecord", budget_filerecord
)
self._ts_filerecord = self.build_mfdata("ts_filerecord", None)
self._ts_package = self.build_child_package(
"ts", timeseries, "timeseries", self._ts_filerecord
)
self._obs_filerecord = self.build_mfdata("obs_filerecord", None)
self._obs_package = self.build_child_package(
"obs", observations, "continuous", self._obs_filerecord
)
self.packagedata = self.build_mfdata("packagedata", packagedata)
self.mwtperioddata = self.build_mfdata("mwtperioddata", mwtperioddata)
self._init_complete = True
| 36.883987
| 79
| 0.555752
|
6ae9a916c7114d348e1270f0b9bba07feb12b842
| 2,167
|
py
|
Python
|
22/sol.py
|
chaserobertson/advent2020
|
77da333b66b6cc152834888dfe022cd526de45cc
|
[
"MIT"
] | null | null | null |
22/sol.py
|
chaserobertson/advent2020
|
77da333b66b6cc152834888dfe022cd526de45cc
|
[
"MIT"
] | null | null | null |
22/sol.py
|
chaserobertson/advent2020
|
77da333b66b6cc152834888dfe022cd526de45cc
|
[
"MIT"
] | null | null | null |
import pathlib
import queue
in_file = pathlib.Path.cwd().joinpath('22', 'input.txt')
with open(in_file) as input:
lines = input.readlines()
class Player:
def __init__(self, id):
self.id = int(id)
self.deck = queue.SimpleQueue()
def __str__(self):
output = 'Player ' + str(self.id) + '\'s deck:'
if self.deck.empty():
return output
newQueue = queue.SimpleQueue()
while not self.deck.empty():
card = self.deck.get()
output += ' ' + str(card) + ','
newQueue.put(card)
self.deck = newQueue
return output[:-1]
def score(self):
score = 0
stack = queue.LifoQueue()
while not self.deck.empty():
stack.put(self.deck.get())
multiplier = 1
while not stack.empty():
score += multiplier * stack.get()
multiplier += 1
return score
def topCard(self):
return self.deck.get()
def insertBottomCard(self, card):
self.deck.put(int(card))
def insertBottomCards(self, cards):
for card in cards:
self.insertBottomCard(card)
print('part 1')
player_id = 1
players = []
for line in lines:
if line == '\n':
continue
elif line[:6] == 'Player':
players.append(Player(player_id))
player_id += 1
else:
players[-1].insertBottomCard(line.strip())
print('Initial Hands: ')
for player in players:
print(player)
round = 1
while not sum([player.deck.empty() for player in players]):
print('-- Round %d --' % round)
for player in players:
print(player)
cards = [player.topCard() for player in players]
for i in range(0, len(cards)):
print('Player {0} plays: {1}'.format(i + 1, cards[i]))
winning_card = max(cards)
winner = cards.index(winning_card)
print('Player {0} wins the round!'.format(winner + 1))
cards.sort()
cards.reverse()
players[winner].insertBottomCards(cards)
round += 1
print()
print()
print('== Post-game results ==')
for player in players:
print(player)
print(player.score())
print('part 2')
| 24.077778
| 62
| 0.577757
|
1f3c0a8d66f05e05ac6113769f9290b209e434f3
| 5,017
|
py
|
Python
|
docs/conf.py
|
radtomas/sufler
|
ff60fb110b5aad8ca0ea6d784148b596a3e5a0db
|
[
"MIT"
] | 3
|
2018-03-29T14:35:08.000Z
|
2020-11-18T03:11:10.000Z
|
docs/conf.py
|
radtomas/sufler
|
ff60fb110b5aad8ca0ea6d784148b596a3e5a0db
|
[
"MIT"
] | 12
|
2018-03-22T10:47:11.000Z
|
2021-03-25T22:08:41.000Z
|
docs/conf.py
|
radtomas/sufler
|
ff60fb110b5aad8ca0ea6d784148b596a3e5a0db
|
[
"MIT"
] | 1
|
2018-03-09T13:35:12.000Z
|
2018-03-09T13:35:12.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
# -- Project information -----------------------------------------------------
project = u'Sufler'
copyright = u'2018, LimeBrains'
author = u'Radoslaw Tomaszewski'
# The short X.Y version
version = u'0.0.1'
# The full version, including alpha/beta/rc tags
release = u'0.0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'suflerdoc'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sufler.tex', u'sufler Documentation',
u'Radoslaw Tomaszewski', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sufler', u'sufler Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sufler', u'sufler Documentation',
author, 'sufler', 'One line description of project.',
'Miscellaneous'),
]
| 29.863095
| 79
| 0.651585
|
0fe0cb324a68728ec1877b6c3ab8e46ee6bb5ac2
| 46,312
|
py
|
Python
|
gramex/cache.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | 1
|
2020-05-17T18:03:44.000Z
|
2020-05-17T18:03:44.000Z
|
gramex/cache.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | null | null | null |
gramex/cache.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | null | null | null |
'''Caching utilities'''
from __future__ import unicode_literals
import io
import os
import re
import six
import sys
import json
import time
import atexit
import inspect
import requests
import tempfile
import mimetypes
import subprocess # nosec
import pandas as pd
import tornado.template
from threading import Thread
from six.moves.queue import Queue
from orderedattrdict import AttrDict
from tornado.concurrent import Future
from tornado.ioloop import IOLoop, PeriodicCallback
from gramex.config import app_log, merge, used_kwargs, CustomJSONDecoder, CustomJSONEncoder
from six.moves.urllib_parse import urlparse
MILLISECOND = 0.001 # in seconds
_opener_defaults = dict(mode='r', buffering=-1, encoding='utf-8', errors='strict',
newline=None, closefd=True)
_markdown_defaults = dict(output_format='html5', extensions=[
'markdown.extensions.codehilite',
'markdown.extensions.extra',
'markdown.extensions.toc',
'markdown.extensions.meta',
'markdown.extensions.sane_lists',
'markdown.extensions.smarty',
])
# A set of temporary files to delete on program exit
_TEMP_FILES = set()
_ID_CACHE = set()
def _delete_temp_files():
for path in _TEMP_FILES:
if os.path.exists(path):
os.remove(path)
atexit.register(_delete_temp_files)
def hashfn(fn):
'''Returns a unique hash value for the function.'''
# id() returns a unique value for the lifetime of an object.
# To ensure that ID is not re-cycled, cache object, so it's never released.
_ID_CACHE.add(fn)
return id(fn)
def cache_key(*args):
'''Converts arguments into a string suitable for use as a cache key'''
return json.dumps(args, sort_keys=True, separators=(',', ':'))
def opener(callback, read=False, **open_kwargs):
'''
Converts any function that accepts a string or handle as its parameter into
a function that takes the first parameter from a file path.
Here are a few examples::
jsonload = opener(json.load)
jsonload('x.json') # opens x.json and runs json.load(handle)
gramex.cache.open('x.json', jsonload) # Loads x.json, cached
# read=True parameter passes the contents (not handle) to the function
template = opener(string.Template, read=True)
template('abc.txt').substitute(x=val)
gramex.cache.open('abc.txt', template).substitute(x=val)
# If read=True, callback may be None. The result of .read() is passed as-is
text = opener(None, read=True)
gramex.cache.open('abc.txt', text)
Keyword arguments applicable for ``io.open`` are passed to ``io.open``. These
default to ``io.open(mode='r', buffering=-1, encoding='utf-8',
errors='strict', newline=None, closefd=True)``. All other arguments and
keyword arguments are passed to the callback (e.g. to ``json.load``).
When reading binary files, pass ``mode='rb', encoding=None, errors=None``.
'''
merge(open_kwargs, _opener_defaults, 'setdefault')
if read:
# Pass contents to callback
def method(path, **kwargs):
open_args = {key: kwargs.pop(key, val) for key, val in open_kwargs.items()}
with io.open(path, **open_args) as handle:
result = handle.read()
return callback(result, **kwargs) if callable(callback) else result
else:
if not callable(callback):
raise ValueError('opener callback %s not a function', repr(callback))
# Pass handle to callback
def method(path, **kwargs):
open_args = {key: kwargs.pop(key, val) for key, val in open_kwargs.items()}
with io.open(path, **open_args) as handle:
return callback(handle, **kwargs)
return method
@opener
def _markdown(handle, **kwargs):
from markdown import markdown
return markdown(handle.read(), **{k: kwargs.pop(k, v) for k, v in _markdown_defaults.items()})
def _template(path, **kwargs):
root, name = os.path.split(path)
return tornado.template.Loader(root, **kwargs).load(name)
def stat(path):
'''
Returns a file status tuple - based on file last modified time and file size
'''
if os.path.exists(path):
stat = os.stat(path)
return (stat.st_mtime, stat.st_size)
return (None, None)
def hashed(val):
'''Return the hashed value of val. If not possible, return None'''
try:
hash(val)
return val
except TypeError:
try:
return json.dumps(val, sort_keys=True, separators=(',', ':'))
except Exception:
return None
# gramex.cache.open() stores its cache here.
# {(path, callback): {data: ..., stat: ...}}
_OPEN_CACHE = {}
_OPEN_CALLBACKS = dict(
bin=opener(None, read=True, mode='rb', encoding=None, errors=None),
txt=opener(None, read=True),
text=opener(None, read=True),
csv=pd.read_csv,
excel=pd.read_excel,
xls=pd.read_excel,
xlsx=pd.read_excel,
hdf=pd.read_hdf,
html=pd.read_html,
jsondata=pd.read_json,
sas=pd.read_sas,
stata=pd.read_stata,
table=pd.read_table,
parquet=pd.read_parquet,
feather=pd.read_feather,
md=_markdown,
markdown=_markdown,
tmpl=_template,
template=_template,
)
def open(path, callback=None, transform=None, rel=False, **kwargs):
'''
Reads a file, processes it via a callback, caches the result and returns it.
When called again, returns the cached result unless the file has updated.
By default, it determine the file type using the extension. For example::
open('data.yaml') # Loads a YAML file
open('data.csv') # Loads a CSV file
The 2nd parameter (callback) a predefined string that can be one of
- ``bin``: reads binary files using io.open
- ``text`` or ``txt``: reads text files using io.open
- ``yaml``: reads files using yaml.load via io.open
- ``config``: reads files using using :py:class:`gramex.config.PathConfig`.
Same as ``yaml``, but allows ``import:`` and variable substitution.
- ``json``: reads files using json.load via io.open
- ``jsondata``: reads files using pd.read_json
- ``template``: reads files using tornado.Template via io.open
- ``markdown`` or ``md``: reads files using markdown.markdown via io.open
- ``csv``, ``excel``, ``xls``, `xlsx``, ``hdf``, ``html``, ``sas``,
``stata``, ``table``, ``parquet``, ``feather``: reads using Pandas
- ``xml``, ``svg``, ``rss``, ``atom``: reads using lxml.etree
For example::
# Load data.yaml as YAML into an AttrDict
open('data.yaml', 'yaml')
# Load data.json as JSON into an AttrDict
open('data.json', 'json', object_pairs_hook=AttrDict)
# Load data.csv as CSV into a Pandas DataFrame
open('data.csv', 'csv', encoding='cp1252')
It can also be a function that accepts the filename and any other arguments::
# Load data using a custom callback
open('data.fmt', my_format_reader_function, arg='value')
This is called as ``my_format_reader_function('data.fmt', arg='value')`` and
cached. Future calls do not re-load and re-calculate this data.
``transform=`` is an optioanl function that processes the data returned by
the callback. For example::
# Returns the count of the CSV file, updating it only when changed
open('data.csv', 'csv', transform=lambda data: len(data))
# After loading data.xlsx into a DataFrame, returned the grouped result
open('data.xlsx', 'xslx', transform=lambda data: data.groupby('city')['sales'].sum())
If ``transform=`` is not a callable, it is ignored.
``rel=True`` opens the path relative to the caller function's file path. If
``D:/app/calc.py`` calls ``open('data.csv', 'csv', rel=True)``, the path
is replaced with ``D:/app/data.csv``.
Any other keyword arguments are passed directly to the callback. If the
callback is a predefined string and uses io.open, all argument applicable to
io.open are passed to io.open and the rest are passed to the callback.
'''
# Pass _reload_status = True for testing purposes. This returns a tuple:
# (result, reloaded) instead of just the result.
_reload_status = kwargs.pop('_reload_status', False)
reloaded = False
_cache = kwargs.pop('_cache', _OPEN_CACHE)
# Get the parent frame's filename. Compute path relative to that.
if rel:
stack = inspect.getouterframes(inspect.currentframe(), 2)
folder = os.path.dirname(os.path.abspath(stack[1][1]))
path = os.path.join(folder, path)
original_callback = callback
if callback is None:
callback = os.path.splitext(path)[-1][1:]
callback_is_str = isinstance(callback, six.string_types)
key = (
path,
original_callback if callback_is_str else id(callback),
hashfn(transform),
frozenset(((k, hashed(v)) for k, v in kwargs.items())),
)
cached = _cache.get(key, None)
fstat = stat(path)
if cached is None or fstat != cached.get('stat'):
reloaded = True
if callable(callback):
data = callback(path, **kwargs)
elif callback_is_str:
method = None
if callback in _OPEN_CALLBACKS:
method = _OPEN_CALLBACKS[callback]
elif callback in {'yml', 'yaml'}:
import yaml
method = opener(yaml.load)
elif callback in {'json'}:
import json
method = opener(json.load)
elif callback in {'config'}:
from gramex.config import PathConfig
method = PathConfig
elif callback in {'xml', 'svg', 'rss', 'atom'}:
from lxml import etree
method = etree.parse
if method is not None:
data = method(path, **kwargs)
elif original_callback is None:
raise TypeError('gramex.cache.open: path "%s" has unknown extension' % path)
else:
raise TypeError('gramex.cache.open(callback="%s") is not a known type' % callback)
else:
raise TypeError('gramex.cache.open(callback=) must be a function, not %r' % callback)
if callable(transform):
data = transform(data)
_cache[key] = {'data': data, 'stat': fstat}
result = _cache[key]['data']
return (result, reloaded) if _reload_status else result
def open_cache(cache):
'''
Use ``cache`` as the new cache for all open requests.
Copies keys from old cache, and deletes them from the old cache.
'''
global _OPEN_CACHE
# Copy keys from old cache to new cache. Delete from
keys = list(_OPEN_CACHE.keys())
for key in keys:
cache[key] = _OPEN_CACHE[key]
del _OPEN_CACHE[key]
_OPEN_CACHE = cache
_SAVE_CALLBACKS = dict(
json='to_json',
csv='to_csv',
xlsx='to_excel',
hdf='to_hdf',
html='to_html',
stata='to_stata',
# Other configurations not supported
)
def save(data, url, callback=None, **kwargs):
'''
Saves a DataFrame into file at url. It does not cache.
``callback`` is almost the same as for :py:func:`gramex.cache.open`. It can
be ``json``, ``csv``, ``xlsx``, ``hdf``, ``html``, ``stata`` or
a function that accepts the filename and any other arguments.
Other keyword arguments are passed directly to the callback.
'''
if callback is None:
callback = os.path.splitext(url)[-1][1:]
if callable(callback):
return callback(data, url, **kwargs)
elif callback in _SAVE_CALLBACKS:
method = getattr(data, _SAVE_CALLBACKS[callback])
return method(url, **(used_kwargs(method, kwargs)[0]))
else:
raise TypeError('gramex.cache.save(callback="%s") is unknown' % callback)
# gramex.cache.query() stores its cache here
_QUERY_CACHE = {}
_STATUS_METHODS = {}
def _wheres(dbkey, tablekey, default_db, names, fn=None):
'''
Convert a table name list like ['sales', 'dept.sales']) to a WHERE clause
like ``(table="sales") OR (db="dept" AND table="sales")``.
TODO: escape the table names to avoid SQL injection attacks
'''
where = []
for name in names:
db, table = name.rsplit('.', 2) if '.' in name else (default_db, name)
if not fn:
where.append("({}='{}' AND {}='{}')".format(dbkey, db, tablekey, table))
else:
where.append("({}={}('{}') AND {}={}('{}'))".format(
dbkey, fn[0], db, tablekey, fn[1], table))
return ' OR '.join(where)
def _table_status(engine, tables):
'''
Returns the last updated date of a list of tables.
'''
# Cache the SQL query or file date check function beforehand.
# Every time method is called with a URL and table list, run cached query
dialect = engine.dialect.name
key = (engine.url, tuple(tables))
db = engine.url.database
if _STATUS_METHODS.get(key, None) is None:
if len(tables) == 0:
raise ValueError('gramex.cache.query table list is empty: %s', repr(tables))
for name in tables:
if not name or not isinstance(name, six.string_types):
raise ValueError('gramex.cache.query invalid table list: %s', repr(tables))
if dialect == 'mysql':
# https://dev.mysql.com/doc/refman/5.7/en/tables-table.html
# Works only on MySQL 5.7 and above
q = ('SELECT update_time FROM information_schema.tables WHERE ' +
_wheres('table_schema', 'table_name', db, tables))
elif dialect == 'mssql':
# https://goo.gl/b4aL9m
q = ('SELECT last_user_update FROM sys.dm_db_index_usage_stats WHERE ' +
_wheres('database_id', 'object_id', db, tables, fn=['DB_ID', 'OBJECT_ID']))
elif dialect == 'postgresql':
# https://www.postgresql.org/docs/9.6/static/monitoring-stats.html
q = ('SELECT n_tup_ins, n_tup_upd, n_tup_del FROM pg_stat_all_tables WHERE ' +
_wheres('schemaname', 'relname', 'public', tables))
elif dialect == 'sqlite':
if not db:
raise KeyError('gramex.cache.query does not support memory sqlite "%s"' % dialect)
q = db
else:
raise KeyError('gramex.cache.query cannot cache dialect "%s" yet' % dialect)
if dialect == 'sqlite':
_STATUS_METHODS[key] = lambda: stat(q)
else:
_STATUS_METHODS[key] = lambda: pd.read_sql(q, engine).to_json(orient='records')
return _STATUS_METHODS[key]()
def query(sql, engine, state=None, **kwargs):
'''
Read SQL query or database table into a DataFrame. Caches results unless
state has changed. It always re-runs the query unless state is specified.
The state can be specified in 3 ways:
1. A string. This must be as a lightweight SQL query. If the result changes,
the original SQL query is re-run.
2. A function. This is called to determine the state of the database.
3. A list of tables. This list of ["db.table"] names specifies which tables
to watch for. This is currently experimental.
4. ``None``: the default. The query is always re-run and not cached.
'''
# Pass _reload_status = True for testing purposes. This returns a tuple:
# (result, reloaded) instead of just the result.
_reload_status = kwargs.pop('_reload_status', False)
reloaded = False
_cache = kwargs.pop('_cache', _QUERY_CACHE)
store_cache = True
key = (sql, engine.url)
current_status = _cache.get(key, {}).get('status', None)
if isinstance(state, (list, tuple)):
status = _table_status(engine, tuple(state))
elif isinstance(state, six.string_types):
status = pd.read_sql(state, engine).to_dict(orient='list')
elif callable(state):
status = state()
elif state is None:
# Create a new status every time, so that the query is always re-run
status = object()
store_cache = False
else:
raise TypeError('gramex.cache.query(state=) must be a table list, query or fn, not %s',
repr(state))
if status == current_status:
result = _cache[key]['data']
else:
app_log.debug('gramex.cache.query: %s. engine: %s. state: %s. kwargs: %s', sql, engine,
state, kwargs)
result = pd.read_sql(sql, engine, **kwargs)
if store_cache:
_cache[key] = {
'data': result,
'status': status,
}
reloaded = True
return (result, reloaded) if _reload_status else result
# gramex.cache.reload_module() stores its cache here. {module_name: file_stat}
_MODULE_CACHE = {}
def reload_module(*modules):
'''
Reloads one or more modules if they are outdated, i.e. only if required the
underlying source file has changed.
For example::
import mymodule # Load cached module
reload_module(mymodule) # Reload module if the source has changed
This is most useful during template development. If your changes are in a
Python module, add adding these lines to pick up new module changes when
the template is re-run.
'''
for module in modules:
name = getattr(module, '__name__', None)
path = getattr(module, '__file__', None)
# sys.__file__ does not exist, but don't raise a warning. You can't reload it
if name in {'sys'}:
continue
if name is None or path is None or not os.path.exists(path):
app_log.warning('Path for module %s is %s: not found', name, path)
continue
# On Python 3, __file__ points to the .py file. In Python 2, it's the .pyc file
# https://www.python.org/dev/peps/pep-3147/#file
if path.lower().endswith('.pyc'):
path = path[:-1]
if not os.path.exists(path):
app_log.warning('Path for module %s is %s: not found', name, path)
continue
# The first time, don't reload it. Thereafter, if it's older or resized, reload it
fstat = stat(path)
if fstat != _MODULE_CACHE.get(name, fstat):
app_log.info('Reloading module %s', name)
six.moves.reload_module(module)
_MODULE_CACHE[name] = fstat
def urlfetch(path, info=False, **kwargs):
'''
- If path is a file path, return as is.
- If path is a file path and info is true, return a dict with name (filepath),
ext (extension), and content_type as well as r, url set to None.
- If path is a URL, download the file, return the saved filename.
The filename extension is based on the URL's Content-Type HTTP header.
- If info is true, returns a dict with name (filename), r (request)
url, ext (extension), content_type.
- Any other keyword arguments are passed to requests.get.
- Automatically delete the files on exit of the application.
- This is a synchronous function, i.e. it waits until the file is downloaded.
'''
url = urlparse(path)
if url.scheme not in {'http', 'https'}: # path is a filepath
if info:
ext = os.path.splitext(path)[1]
content_type = mimetypes.guess_type(path, strict=True)[0]
return {'name': path, 'r': None, 'url': None, 'ext': ext, 'content_type': content_type}
else:
return path
r = requests.get(path, **kwargs)
if 'Content-Type' in r.headers:
content_type = r.headers['Content-Type'].split(';')[0]
ext = mimetypes.guess_extension(content_type, strict=False)
else:
ext = os.path.splitext(url.path)[1]
with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as handle:
for chunk in r.iter_content(chunk_size=16384):
handle.write(chunk)
_TEMP_FILES.add(handle.name)
if info:
return {'name': handle.name, 'r': r, 'url': url, 'ext': ext, 'content_type': content_type}
else:
return handle.name
class Subprocess(object):
'''
tornado.process.Subprocess does not work on Windows.
https://github.com/tornadoweb/tornado/issues/1585
This is a threaded alternative based on
http://stackoverflow.com/a/4896288/100904
Run a program async and wait for it to execute. Then get its output::
stdout, stderr = yield Subprocess(['ls', '-la']).wait_for_exit()
Run a program async and send each line to the handler as it writes::
yield Subprocess(
['ls', '-la'], # Run 'ls -la'
buffer_size='line', # Buffer output line by line
stream_stdout=handler.write, # Send output to handler.write(line)
stream_stderr=handler.write, # Send errors to handler.write(line)
)
Run a program async and appends output into a list::
proc = Subprocess(
['ls', '-la'],
buffer_size='line',
stream_stdout='list_out', # Append output to self.list_out
stream_stderr='list_err', # Append errors to self.list_err
)
output = proc.list_out[-10:] # Return last 10 lines of output
yield proc.wait_for_exit() # Wait until application is done
Run a program async and appends output into a queue::
proc = Subprocess(
['ls', '-la'], # Run 'ls -la'
buffer_size='line', # Buffer output line by line
stream_stdout='queue_out', # Save output in proc.out queue
stream_stderr='queue_err', # Save errors in proc.err queue
)
output = proc.queue_out.get_nowait() # Returns first line of output
yield proc.wait_for_exit() # Wait until application is done
To write to multiple streams, pass a list::
proc = Subprocess(
args,
buffer_size='line',
stream_stdout=[handler.write, 'list_out', 'queue_out', my_callback],
stream_stderr=[handler.write, 'list_err', 'queue_err', my_callback],
**kwargs
)
yield proc.wait_for_exit()
To check the process return code, use ``.proc`` which has the ``Popen``
object::
if proc.proc.returncode:
raise Exception('Process failed with return code %d', proc.proc.returncode)
:arg list args: command line arguments passed as a list to Subprocess
:arg methodlist stream_stdout: optional list of write methods - called when stdout has data
:arg methodlist stream_stderr: optional list of write methods - called when stderr has data
:arg str_or_int buffer_size: 'line' to write line by line, any int for chunk size
:arg dict kwargs: additional kwargs passed to subprocess.Popen
stream_stdout and stream_stderr can be:
- a function that accept a byte string. Called as stdout/stderr are buffered
- OR a string starting with ``list_`` or ``queue_``. Appends buffered output
- OR a list of any of the above
- OR an empty list. In this case, ``.wait_for_exit()`` returns a tuple with
``stdout`` and ``stderr`` as a tuple of byte strings.
'''
def __init__(self, args, stream_stdout=[], stream_stderr=[], buffer_size=0, **kwargs):
self.args = args
# self.proc.stdout & self.proc.stderr are streams with process output
kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE
# On UNIX, close all file descriptors except 0, 1, 2 before child
# process is executed. I've no idea why. Copied from
# http://stackoverflow.com/a/4896288/100904
kwargs['close_fds'] = 'posix' in sys.builtin_module_names
self.proc = subprocess.Popen(args, **kwargs) # nosec
self.thread = {} # Has the running threads
self.future = {} # Stores the futures indicating stream close
self.loop = _get_current_ioloop()
# Buffering has 2 modes. buffer_size='line' reads and writes line by line
# buffer_size=<number> reads in byte chunks. Define the appropriate method
if hasattr(buffer_size, 'lower') and 'line' in buffer_size.lower():
def _write(stream, callbacks, future, retval):
'''Call callbacks with content from stream. On EOF mark future as done'''
while True:
content = stream.readline()
if len(content) > 0:
if isinstance(content, six.text_type):
content = content.encode('utf-8')
for callback in callbacks:
callback(content)
else:
stream.close()
break
while self.proc.poll() is None:
time.sleep(MILLISECOND)
self.loop.add_callback(future.set_result, retval())
else:
# If the buffer size is 0 or negative, use the default buffer size to read
if buffer_size <= 0:
buffer_size = io.DEFAULT_BUFFER_SIZE
def _write(stream, callbacks, future, retval):
'''Call callbacks with content from stream. On EOF mark future as done'''
while True:
content = stream.read(buffer_size)
size = len(content)
if size > 0:
if isinstance(content, six.text_type):
content = content.encode('utf-8')
for callback in callbacks:
# This may raise a ValueError: write to closed file.
# TODO: decide how to handle it.
callback(content)
if size < buffer_size:
stream.close()
break
while self.proc.poll() is None:
time.sleep(MILLISECOND)
self.loop.add_callback(future.set_result, retval())
callbacks_lookup = {'stdout': stream_stdout, 'stderr': stream_stderr}
for stream in ('stdout', 'stderr'):
callbacks = callbacks_lookup[stream]
# If stream_stdout or stream_stderr are not defined, construct a
# BytesIO and return its value when the stream is closed
if not callbacks:
ret_stream = io.BytesIO()
callbacks = [ret_stream.write]
retval = ret_stream.getvalue
else:
retval = lambda: b'' # noqa
# If stream_stdout or stream_stderr has 'out' or 'err', create these
# as queue attributes (self.out, self.err)
callbacks = list(callbacks) if isinstance(callbacks, list) else [callbacks]
for index, method in enumerate(callbacks):
if isinstance(method, six.string_types):
if method.startswith('list_'):
if hasattr(self, method):
callbacks[index] = getattr(self, method).append
else:
log = []
setattr(self, method, log)
callbacks[index] = log.append
elif method.startswith('queue_'):
if hasattr(self, method):
callbacks[index] = getattr(self, method).put
else:
log = Queue()
setattr(self, method, log)
callbacks[index] = log.put
else:
raise ValueError('Invalid stream_%s: %s', stream, method)
self.future[stream] = future = Future()
# Thread writes from self.proc.stdout / stderr to appropriate callbacks
self.thread[stream] = t = Thread(
target=_write,
args=(getattr(self.proc, stream), callbacks, future, retval))
t.daemon = True # Thread dies with the program
t.start()
def wait_for_exit(self):
'''
Returns futures for (stdout, stderr). To wait for the process to complete, use::
stdout, stderr = yield proc.wait_for_exit()
'''
return [self.future['stdout'], self.future['stderr']]
_daemons = {}
_regex_type = type(re.compile(''))
# Python 3 needs sys.stderr.buffer.write for writing binary strings
_stderr_write = sys.stderr.buffer.write if hasattr(sys.stderr, 'buffer') else sys.stderr.write
def daemon(args, restart=1, first_line=None, stream=True, timeout=5, buffer_size='line', **kwargs):
'''
This is the same as :py:class:`Subprocess`, but has a few additional checks.
1. If we have already called :py:class:`Subprocess` with the same arguments,
re-use the same instance.
2. Send the process STDOUT and STDERR to this application's STDERR. This
makes it easy to see what errors the application reports.
3. Supports retry attempts.
4. Checks if the first line of output is a matches a string / re -- ensuring
that the application started properly.
'''
arg_str = args if isinstance(args, six.string_types) else ' '.join(args)
try:
key = cache_key(arg_str, kwargs)
except (TypeError, ValueError):
app_log.error('daemon args must be JSON serializable')
raise
# Send the stdout and stderr to (a) stderr AND to (b) a local queue we read
queue = Queue(maxsize=10)
for channel in ('stream_stdout', 'stream_stderr'):
if channel not in kwargs:
kwargs[channel] = []
elif not isinstance(kwargs[channel], list):
kwargs[channel] = [kwargs[channel]]
if first_line:
kwargs[channel].append(queue.put)
if stream is True:
kwargs[channel].append(_stderr_write)
elif callable(stream):
kwargs[channel].append(stream)
# Buffer by line by default. This is required for the first_line check, not otherwise.
kwargs['buffer_size'] = buffer_size
# started is set if we actually call Subprocess as part of this function
started = False
# If process was never started, start it
if key not in _daemons:
started = _daemons[key] = Subprocess(args, **kwargs)
# Ensure that process is running. Restart if required
proc = _daemons[key]
restart = int(restart)
while proc.proc.returncode is not None and restart > 0:
restart -= 1
proc = started = _daemons[key] = Subprocess(args, **kwargs)
if proc.proc.returncode is not None:
raise RuntimeError('Error %d starting %s' % (proc.proc.returncode, arg_str))
if started:
app_log.info('Started: %s', arg_str)
future = Future()
# If process was started, wait until it has initialized. Else just return the proc
if first_line and started:
if isinstance(first_line, six.string_types):
def check(proc):
actual = queue.get(timeout=timeout).decode('utf-8')
if first_line not in actual:
raise AssertionError('%s: wrong first line: %s (no "%s")' %
(arg_str, actual, first_line))
elif isinstance(first_line, _regex_type):
def check(proc):
actual = queue.get(timeout=timeout).decode('utf-8')
if not first_line.search(actual):
raise AssertionError('%s: wrong first line: %s' % (arg_str, actual))
elif callable(first_line):
check = first_line
loop = _get_current_ioloop()
def checker(proc):
try:
check(proc)
except Exception as e:
loop.add_callback(future.set_exception, e)
else:
loop.add_callback(future.set_result, proc)
proc._check_thread = t = Thread(target=checker, args=(proc, ))
t.daemon = True # Thread dies with the program
t.start()
else:
future.set_result(proc)
return future
def _get_current_ioloop():
'''
Return the current IOLoop. But if we're not already in an IOLoop, return an
object that mimics add_callback() by running the method immediately.
This allows daemon() to be run without Tornado / asyncio.
'''
loop = IOLoop.current(instance=False)
if loop is None:
loop = AttrDict(add_callback=lambda fn, *args, **kwargs: fn(*args, **kwargs))
return loop
def get_store(type, **kwargs):
if type == 'memory':
return KeyStore(**kwargs)
elif type == 'sqlite':
return SQLiteStore(**kwargs)
elif type == 'json':
return JSONStore(**kwargs)
elif type == 'redis':
return RedisStore(**kwargs)
elif type == 'hdf5':
return HDF5Store(**kwargs)
else:
raise NotImplementedError('Store type: %s not implemented' % type)
class KeyStore(object):
'''
Base class for persistent dictionaries. (But KeyStore is not persistent.)
>>> store = KeyStore()
>>> value = store.load(key, None) # Load a value. It's like dict.get()
>>> store.dump(key, value) # Save a value. It's like dict.set(), but doesn't flush
>>> store.flush() # Saves to disk
>>> store.close() # Close the store
You can initialize a KeyStore with a ``flush=`` parameter. The store is
flushed to disk via ``store.flush()`` every ``flush`` seconds.
If a ``purge=`` is provided, the data is purged of missing values every
``purge`` seconds. You can provide a custom ``purge_keys=`` function that
returns an iterator of keys to delete if any.
When the program exits, ``.close()`` is automatically called.
'''
def __init__(self, flush=None, purge=None, purge_keys=None, **kwargs):
'''Initialise the KeyStore at path'''
self.store = {}
if callable(purge_keys):
self.purge_keys = purge_keys
elif purge_keys is not None:
app_log.error(
'KeyStore: purge_keys=%r invalid. Must be function(dict)',
purge_keys)
# Periodically flush and purge buffers
if flush is not None:
PeriodicCallback(self.flush, callback_time=flush * 1000).start()
if purge is not None:
PeriodicCallback(self.purge, callback_time=purge * 1000).start()
# Call close() when Python gracefully exits
atexit.register(self.close)
def keys(self):
'''Return all keys in the store'''
return self.store.keys()
def load(self, key, default=None):
'''Same as store.get(), but called "load" to indicate persistence'''
key = self._escape(key)
return self.store.get(key, {} if default is None else default)
def dump(self, key, value):
'''Same as store[key] = value'''
key = self._escape(key)
self.store[key] = value
def _escape(self, key):
'''Converts key into a unicode string (interpreting byte-string keys as UTF-8)'''
if isinstance(key, six.binary_type):
return six.text_type(key, encoding='utf-8')
return six.text_type(key)
@staticmethod
def purge_keys(data):
return [key for key, val in data.items() if val is None]
def flush(self):
'''Write to disk'''
pass
def purge(self):
'''Delete empty keys and flush'''
for key in self.purge_keys(self.store):
try:
del self.store[key]
except KeyError:
# If the key was already removed from store, ignore
pass
self.flush()
def close(self):
'''Flush and close all open handles'''
raise NotImplementedError()
class RedisStore(KeyStore):
'''
A KeyStore that stores data in a Redis database. Typical usage::
>>> store = RedisStore('localhost:6379:1:password=x:...') # host:port:db:params
>>> value = store.load(key)
>>> store.dump(key, value)
The path in the constructor contains parameters separated by colon (:):
- `host`: the Redis server location (default: localhost)
- `port`: the Redis server port (default: 6379)
- `db`: the Redis server DB number (default: 0)
- zero or more parameters passed to StrictRedis (e.g. password=abc)
Values are encoded as JSON using gramex.config.CustomJSONEncoder (thus
handling datetime.) Keys are JSON encoded.
'''
def __init__(self, path=None, *args, **kwargs):
super(RedisStore, self).__init__(*args, **kwargs)
from redis import StrictRedis
host, port, db, redis_kwargs = 'localhost', 6379, 0, {}
if isinstance(path, six.string_types):
parts = path.split(':')
if len(parts):
host = parts.pop(0)
if len(parts):
port = int(parts.pop(0))
if len(parts):
db = int(parts.pop(0))
redis_kwargs = dict(part.split('=', 2) for part in parts)
redis_kwargs['decode_responses'] = True
redis_kwargs.setdefault('encoding', 'utf-8')
self.store = StrictRedis(host=host, port=port, db=db, **redis_kwargs)
def load(self, key, default=None):
result = self.store.get(key)
if result is None:
return default
try:
return json.loads(
result, object_pairs_hook=AttrDict, cls=CustomJSONDecoder)
except ValueError:
app_log.error('RedisStore("%s").load("%s") is not JSON ("%r..."")',
self.store, key, result)
return default
def dump(self, key, value):
if value is None:
self.store.delete(key)
else:
value = json.dumps(
value,
ensure_ascii=True,
separators=(',', ':'),
cls=CustomJSONEncoder)
self.store.set(key, value)
def close(self):
pass
def purge(self):
app_log.debug('Purging %s', self.store)
# TODO: optimize item retrieval
items = {key: self.load(key, None) for key in self.store.keys()}
for key in self.purge_keys(items):
self.store.delete(key)
class SQLiteStore(KeyStore):
'''
A KeyStore that stores data in a SQLite file. Typical usage::
>>> store = SQLiteStore('file.db', table='store')
>>> value = store.load(key)
>>> store.dump(key, value)
Values are encoded as JSON using gramex.config.CustomJSONEncoder (thus
handling datetime.) Keys are JSON encoded.
'''
def __init__(self, path, table='store', *args, **kwargs):
super(SQLiteStore, self).__init__(*args, **kwargs)
self.path = _create_path(path)
from sqlitedict import SqliteDict
self.store = SqliteDict(
self.path, tablename=table, autocommit=True,
encode=lambda v: json.dumps(v, separators=(',', ':'), ensure_ascii=True,
cls=CustomJSONEncoder),
decode=lambda v: json.loads(v, object_pairs_hook=AttrDict, cls=CustomJSONDecoder),
)
def close(self):
self.store.close()
def flush(self):
super(SQLiteStore, self).flush()
self.store.commit()
def keys(self):
# Keys need to be escaped
return (self._escape(key) for key in self.store.keys())
def purge(self):
app_log.debug('Purging %s', self.path)
super(SQLiteStore, self).purge()
class HDF5Store(KeyStore):
'''
A KeyStore that stores data in a HDF5 file. Typical usage::
>>> store = HDF5Store('file.h5', flush=15)
>>> value = store.load(key)
>>> store.dump(key, value)
Internally, it uses HDF5 groups to store data. Values are encoded as JSON
using gramex.config.CustomJSONEncoder (thus handling datetime.) Keys are JSON
encoded, and '/' is escaped as well (since HDF5 groups treat / as subgroups.)
'''
def __init__(self, path, *args, **kwargs):
super(HDF5Store, self).__init__(*args, **kwargs)
self.path = _create_path(path)
self.changed = False
import h5py
# h5py.File fails with OSError: Unable to create file (unable to open file: name =
# '.meta.h5', errno = 17, error message = 'File exists', flags = 15, o_flags = 502)
# TODO: identify why this happens and resolve it.
self.store = h5py.File(self.path, 'a')
def load(self, key, default=None):
# Keys cannot contain / in HDF5 store. Escape it
key = self._escape(key).replace('/', '\t')
result = self.store.get(key, None)
if result is None:
return default
try:
return json.loads(
result.value,
object_pairs_hook=AttrDict,
cls=CustomJSONDecoder)
except ValueError:
app_log.error('HDF5Store("%s").load("%s") is not JSON ("%r..."")',
self.path, key, result.value)
return default
def dump(self, key, value):
key = self._escape(key)
if self.store.get(key) != value:
if key in self.store:
del self.store[key]
self.store[key] = json.dumps(
value,
ensure_ascii=True,
separators=(',', ':'),
cls=CustomJSONEncoder)
self.changed = True
def _escape(self, key):
'''
Converts key into a unicode string (interpreting byte-string keys as UTF-8).
HDF5 does not accept / in key names. Replace those with tabs.
'''
if isinstance(key, six.binary_type):
key = six.text_type(key, encoding='utf-8')
else:
key = six.text_type(key)
return key.replace('/', '\t')
def keys(self):
# Keys cannot contain / in HDF5 store. Unescape it
return (key.replace('\t', '/') for key in self.store.keys())
def flush(self):
super(HDF5Store, self).flush()
if self.changed:
app_log.debug('Flushing %s', self.path)
self.store.flush()
self.changed = False
def purge(self):
'''
Load all keys into self.store. Delete what's required. Save.
'''
self.flush()
changed = False
items = {
key: json.loads(
val.value, object_pairs_hook=AttrDict, cls=CustomJSONDecoder)
for key, val in self.store.items()
}
for key in self.purge_keys(items):
del self.store[key]
changed = True
if changed:
app_log.debug('Purging %s', self.path)
self.store.flush()
def close(self):
try:
self.store.close()
# h5py.h5f.get_obj_ids often raises a ValueError: Not a file id.
# This is presumably if the file handle has been closed. Log & ignore.
except ValueError:
app_log.debug('HDF5Store("%s").close() error ignored', self.path)
class JSONStore(KeyStore):
'''
A KeyStore that stores data in a JSON file. Typical usage::
>>> store = JSONStore('file.json', flush=15)
>>> value = store.load(key)
>>> store.dump(key, value)
This is less efficient than HDF5Store for large data, but is human-readable.
They also cannot support multiple instances. Only one JSONStore instance
is permitted per file.
'''
def __init__(self, path, *args, **kwargs):
super(JSONStore, self).__init__(*args, **kwargs)
self.path = _create_path(path)
self.store = self._read_json()
self.changed = False
self.update = {} # key-values added since flush
def _read_json(self):
try:
with io.open(self.path) as handle: # noqa: no encoding for json
return json.load(handle, cls=CustomJSONDecoder)
except (IOError, ValueError):
return {}
def _write_json(self, data):
json_value = json.dumps(
data,
ensure_ascii=True,
separators=(',', ':'),
cls=CustomJSONEncoder)
with io.open(self.path, 'w') as handle: # noqa: no encoding for json
handle.write(json_value)
def dump(self, key, value):
'''Same as store[key] = value'''
key = self._escape(key)
if self.store.get(key) != value:
self.store[key] = value
self.update[key] = value
self.changed = True
def flush(self):
super(JSONStore, self).flush()
if self.changed:
app_log.debug('Flushing %s', self.path)
store = self._read_json()
store.update(self.update)
self._write_json(store)
self.store = store
self.update = {}
self.changed = False
def purge(self):
'''
Load all keys into self.store. Delete what's required. Save.
'''
self.flush()
changed = False
for key in self.purge_keys(self.store):
del self.store[key]
changed = True
if changed:
app_log.debug('Purging %s', self.path)
self._write_json(self.store)
def close(self):
try:
self.flush()
# This has happened when the directory was deleted. Log & ignore.
except OSError:
app_log.error('Cannot flush %s', self.path)
def _create_path(path):
# Ensure that path directory exists
path = os.path.abspath(path)
folder = os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder)
return path
def sizeof(obj):
if isinstance(obj, dict):
return sys.getsizeof(obj) + sum(sizeof(k) + sizeof(v) for k, v in obj.items())
elif isinstance(obj, (set, list)):
return sys.getsizeof(obj) + sum(sizeof(v) for v in obj)
return sys.getsizeof(obj)
| 38.022989
| 99
| 0.599737
|
7514a074efe8f4de9516d192fd2ec88bb08bc708
| 144
|
py
|
Python
|
cms/plugins/moderation/tests/apps.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | 13
|
2015-03-13T21:32:16.000Z
|
2020-08-07T08:09:02.000Z
|
cms/plugins/moderation/tests/apps.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | 131
|
2015-04-04T11:27:14.000Z
|
2020-10-16T13:39:16.000Z
|
cms/plugins/moderation/tests/apps.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | 16
|
2015-06-05T12:56:28.000Z
|
2021-01-06T15:15:53.000Z
|
from django.apps import AppConfig
class ModerationConfig(AppConfig):
name = 'cms.plugins.moderation.tests'
label = 'moderation_tests'
| 20.571429
| 41
| 0.756944
|
0efe7f8638a4486a7445ee13d86918b2b0fbf71e
| 4,086
|
py
|
Python
|
Samples-Http/OLD/Python/TTSSample.py
|
jiajzhan/Cognitive-Speech-TTS
|
ce10ef9cc24fe02d06e3078eabb79922aa9d0409
|
[
"MIT"
] | 385
|
2017-04-14T08:22:31.000Z
|
2022-03-29T05:21:14.000Z
|
Samples-Http/OLD/Python/TTSSample.py
|
jiajzhan/Cognitive-Speech-TTS
|
ce10ef9cc24fe02d06e3078eabb79922aa9d0409
|
[
"MIT"
] | 107
|
2017-04-12T03:08:08.000Z
|
2022-01-24T05:39:55.000Z
|
Samples-Http/OLD/Python/TTSSample.py
|
jiajzhan/Cognitive-Speech-TTS
|
ce10ef9cc24fe02d06e3078eabb79922aa9d0409
|
[
"MIT"
] | 373
|
2017-04-14T10:57:23.000Z
|
2022-03-29T02:58:10.000Z
|
'''
After you've set your subscription key, run this application from your working
directory with this command: python TTSSample.py
'''
import os, requests, time
from xml.etree import ElementTree
# This code is required for Python 2.7
try: input = raw_input
except NameError: pass
'''
If you prefer, you can hardcode your subscription key as a string and remove
the provided conditional statement. However, we do recommend using environment
variables to secure your subscription keys. The environment variable is
set to SPEECH_SERVICE_KEY in our sample.
For example:
subscription_key = "Your-Key-Goes-Here"
'''
if 'SPEECH_SERVICE_KEY' in os.environ:
subscription_key = os.environ['SPEECH_SERVICE_KEY']
else:
print('Environment variable for your subscription key is not set.')
exit()
class TextToSpeech(object):
def __init__(self, subscription_key):
self.subscription_key = subscription_key
self.tts = input("What would you like to convert to speech: ")
self.timestr = time.strftime("%Y%m%d-%H%M")
self.access_token = None
'''
The TTS endpoint requires an access token. This method exchanges your
subscription key for an access token that is valid for ten minutes.
'''
def get_token(self):
fetch_token_url = "https://westus.api.cognitive.microsoft.com/sts/v1.0/issueToken"
headers = {
'Ocp-Apim-Subscription-Key': self.subscription_key
}
response = requests.post(fetch_token_url, headers=headers)
self.access_token = str(response.text)
def save_audio(self):
base_url = 'https://westus.tts.speech.microsoft.com/'
path = 'cognitiveservices/v1'
constructed_url = base_url + path
headers = {
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/ssml+xml',
'X-Microsoft-OutputFormat': 'riff-24khz-16bit-mono-pcm',
'User-Agent': 'YOUR_RESOURCE_NAME'
}
xml_body = ElementTree.Element('speak', version='1.0')
xml_body.set('{http://www.w3.org/XML/1998/namespace}lang', 'en-us')
voice = ElementTree.SubElement(xml_body, 'voice')
voice.set('{http://www.w3.org/XML/1998/namespace}lang', 'en-US')
voice.set('name', 'en-US-Guy24kRUS') # Short name for 'Microsoft Server Speech Text to Speech Voice (en-US, Guy24KRUS)'
voice.text = self.tts
body = ElementTree.tostring(xml_body)
response = requests.post(constructed_url, headers=headers, data=body)
'''
If a success response is returned, then the binary audio is written
to file in your working directory. It is prefaced by sample and
includes the date.
'''
if response.status_code == 200:
with open('sample-' + self.timestr + '.wav', 'wb') as audio:
audio.write(response.content)
print("\nStatus code: " + str(response.status_code) + "\nYour TTS is ready for playback.\n")
else:
print("\nStatus code: " + str(response.status_code) + "\nSomething went wrong. Check your subscription key and headers.\n")
print("Reason: " + str(response.reason) + "\n")
def get_voices_list(self):
base_url = 'https://westus.tts.speech.microsoft.com/'
path = 'cognitiveservices/voices/list'
constructed_url = base_url + path
headers = {
'Authorization': 'Bearer ' + self.access_token,
}
response = requests.get(constructed_url, headers=headers)
if response.status_code == 200:
print("\nAvailable voices: \n" + response.text)
else:
print("\nStatus code: " + str(response.status_code) + "\nSomething went wrong. Check your subscription key and headers.\n")
if __name__ == "__main__":
app = TextToSpeech(subscription_key)
app.get_token()
app.save_audio()
# Get a list of voices https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/rest-text-to-speech#get-a-list-of-voices
# app.get_voices_list()
| 41.693878
| 140
| 0.660548
|
af31336ec68661f5d7c71536310a131a4a97d714
| 26,725
|
py
|
Python
|
bcbio/qc/multiqc.py
|
WimSpee/bcbio-nextgen
|
7b20b9a2cb5e918af130d011961761c49f758e14
|
[
"MIT"
] | 1
|
2021-05-11T06:40:17.000Z
|
2021-05-11T06:40:17.000Z
|
bcbio/qc/multiqc.py
|
stl-23/bcbio-nextgen
|
1a241c0f0a7db4e754164015f4fb0090eaa6bee7
|
[
"MIT"
] | null | null | null |
bcbio/qc/multiqc.py
|
stl-23/bcbio-nextgen
|
1a241c0f0a7db4e754164015f4fb0090eaa6bee7
|
[
"MIT"
] | null | null | null |
"""High level summaries of samples and programs with MultiQC.
https://github.com/ewels/MultiQC
"""
import collections
import glob
import io
import json
import mimetypes
import os
import pandas as pd
import shutil
import numpy as np
from collections import OrderedDict
import pybedtools
import six
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.provenance import do, programs
from bcbio.provenance import data as provenancedata
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.bam import ref
from bcbio.qc.qsignature import get_qsig_multiqc_files
from bcbio.structural import annotate
from bcbio.utils import walk_json
from bcbio.variation import bedutils
from bcbio.qc.variant import get_active_vcinfo
from bcbio.upload import get_all_upload_paths_from_sample
from bcbio.variation import coverage
from bcbio.chipseq import atac
def summary(*samples):
"""Summarize all quality metrics together"""
samples = list(utils.flatten(samples))
work_dir = dd.get_work_dir(samples[0])
multiqc = config_utils.get_program("multiqc", samples[0]["config"])
if not multiqc:
logger.debug("multiqc not found. Update bcbio_nextgen.py tools to fix this issue.")
out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "multiqc"))
out_data = os.path.join(out_dir, "multiqc_data")
out_file = os.path.join(out_dir, "multiqc_report.html")
file_list = os.path.join(out_dir, "list_files.txt")
work_samples = cwlutils.unpack_tarballs([utils.deepish_copy(x) for x in samples], samples[0])
work_samples = _summarize_inputs(work_samples, out_dir)
if not utils.file_exists(out_file):
with tx_tmpdir(samples[0], work_dir) as tx_out:
in_files = _get_input_files(work_samples, out_dir, tx_out)
in_files += _merge_metrics(work_samples, out_dir)
if _one_exists(in_files):
with utils.chdir(out_dir):
config_file = _create_config_file(out_dir, work_samples)
input_list_file = _create_list_file(in_files, file_list)
if dd.get_tmp_dir(samples[0]):
export_tmp = "export TMPDIR=%s && " % dd.get_tmp_dir(samples[0])
else:
export_tmp = ""
locale_export = utils.locale_export()
path_export = utils.local_path_export()
other_opts = config_utils.get_resources("multiqc", samples[0]["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts])
cmd = ("{path_export}{export_tmp}{locale_export} "
"{multiqc} -c {config_file} -f -l {input_list_file} {other_opts} -o {tx_out}")
do.run(cmd.format(**locals()), "Run multiqc")
if utils.file_exists(os.path.join(tx_out, "multiqc_report.html")):
shutil.move(os.path.join(tx_out, "multiqc_report.html"), out_file)
shutil.move(os.path.join(tx_out, "multiqc_data"), out_data)
samples = _group_by_sample_and_batch(samples)
if utils.file_exists(out_file) and samples:
data_files = set()
for i, data in enumerate(samples):
data_files.add(os.path.join(out_dir, "report", "metrics", dd.get_sample_name(data) + "_bcbio.txt"))
data_files.add(os.path.join(out_dir, "report", "metrics", "target_info.yaml"))
data_files.add(os.path.join(out_dir, "multiqc_config.yaml"))
[data_files.add(f) for f in glob.glob(os.path.join(out_dir, "multiqc_data", "*"))]
data_files = [f for f in data_files if f and utils.file_exists(f)]
if "summary" not in samples[0]:
samples[0]["summary"] = {}
samples[0]["summary"]["multiqc"] = {"base": out_file, "secondary": data_files}
data_json = os.path.join(out_dir, "multiqc_data", "multiqc_data.json")
data_json_final = _save_uploaded_data_json(samples, data_json, os.path.join(out_dir, "multiqc_data"))
if data_json_final:
samples[0]["summary"]["multiqc"]["secondary"].append(data_json_final)
# Prepare final file list and inputs for downstream usage
file_list_final = _save_uploaded_file_list(samples, file_list, out_dir)
if file_list_final:
samples[0]["summary"]["multiqc"]["secondary"].append(file_list_final)
if any([cwlutils.is_cwl_run(d) for d in samples]):
for indir in ["inputs", "report"]:
tarball = os.path.join(out_dir, "multiqc-%s.tar.gz" % (indir))
if not utils.file_exists(tarball):
with utils.chdir(out_dir):
cmd = ["tar", "-czvpf", tarball, indir]
do.run(cmd, "Compress multiqc inputs: %s" % indir)
samples[0]["summary"]["multiqc"]["secondary"].append(tarball)
if any([cwlutils.is_cwl_run(d) for d in samples]):
samples = _add_versions(samples)
return [[data] for data in samples]
def _add_versions(samples):
"""Add tool and data versions to the summary.
"""
samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]),
"data": provenancedata.write_versions(samples[0]["dirs"], samples)}
return samples
def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
samples = _merge_target_information(samples, metrics_dir)
logger.info("summarize fastqc")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc"))
with utils.chdir(out_dir):
_merge_fastqc(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
logger.info("summarize preseq")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq"))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
return samples
def _save_uploaded_data_json(samples, data_json_work, out_dir):
""" Fixes all absolute work-rooted paths to relative final-rooted paths
"""
if not utils.file_exists(data_json_work):
return None
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return data_json_work
with io.open(data_json_work, encoding="utf-8") as f:
data = json.load(f, object_pairs_hook=OrderedDict)
upload_base = samples[0]["upload"]["dir"]
data = walk_json(data, lambda s: _work_path_to_rel_final_path(s, upload_path_mapping, upload_base))
data_json_final = os.path.join(out_dir, "multiqc_data_final.json")
with io.open(data_json_final, "w", encoding="utf-8") as f:
json.dump(data, f, indent=4)
return data_json_final
def _save_uploaded_file_list(samples, file_list_work, out_dir):
""" Fixes all absolute work-rooted paths to relative final-rooted paths
For CWL, prepare paths relative to output directory.
"""
if not utils.file_exists(file_list_work):
return None
if any([cwlutils.is_cwl_run(d) for d in samples]):
upload_paths = []
with open(file_list_work) as f:
for p in (l.strip() for l in f.readlines() if os.path.exists(l.strip())):
if p.startswith(out_dir):
upload_paths.append(p.replace(out_dir + "/", ""))
else:
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return None
with open(file_list_work) as f:
paths = [l.strip() for l in f.readlines() if os.path.exists(l.strip())]
upload_paths = [p for p in [
_work_path_to_rel_final_path(path, upload_path_mapping, samples[0]["upload"]["dir"])
for path in paths
] if p]
if not upload_paths:
return None
file_list_final = os.path.join(out_dir, "list_files_final.txt")
with open(file_list_final, "w") as f:
for path in upload_paths:
f.write(path + '\n')
return file_list_final
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):
""" Check if `path` is a work-rooted path, and convert to a relative final-rooted path
"""
if not path or not isinstance(path, str):
return path
upload_path = None
# First, check in the mapping: if it's there is a direct reference and
# it's a file, we immediately return it (saves lots of iterations)
if upload_path_mapping.get(path) is not None and os.path.isfile(path):
upload_path = upload_path_mapping[path]
else:
# Not a file: check for elements in the mapping that contain
# it
paths_to_check = [key for key in upload_path_mapping
if path.startswith(key)]
if paths_to_check:
for work_path in paths_to_check:
if os.path.isdir(work_path):
final_path = upload_path_mapping[work_path]
upload_path = path.replace(work_path, final_path)
break
if upload_path is not None:
return os.path.relpath(upload_path, upload_base_dir)
else:
return None
def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False
def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, six.string_types):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles.get("secondary", [])
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, six.string_types):
if os.path.exists(pfiles):
pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
staged_files.extend(get_qsig_multiqc_files(samples))
# Back compatible -- to migrate to explicit specifications in input YAML
if not any([cwlutils.is_cwl_run(d) for d in samples]):
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")):
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files)))
def _in_temp_directory(f):
return any(x.startswith("tmp") for x in f.split("/"))
def _get_batches(data):
batches = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
return batches
def _group_by_sample_and_batch(samples):
"""Group samples split by QC method back one per sample-batch.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)
return [xs[0] for xs in out.values()]
def _create_list_file(paths, out_file):
with open(out_file, "w") as f:
for path in paths:
f.write(path + '\n')
return out_file
def _create_config_file(out_dir, samples):
"""Provide configuration file for multiqc report."""
out_file = os.path.join(out_dir, "multiqc_config.yaml")
out = {"table_columns_visible": dict()}
extra_fn_clean_trim = []
extra_fn_clean_trim.extend(["coverage.mosdepth.region.dist", "coverage.mosdepth.global.dist"])
out["extra_fn_clean_trim"] = extra_fn_clean_trim
# Avoid duplicated bcbio columns with qualimap
if any(("qualimap" in dd.get_tools_on(d) or "qualimap_full" in dd.get_tools_on(d)) for d in samples):
# Hiding metrics duplicated by Qualimap
out["table_columns_visible"]["bcbio"] = {"Average_insert_size": False}
out["table_columns_visible"]["FastQC"] = {"percent_gc": False}
# Setting up thresholds for Qualimap depth cutoff calculations, based on sample avg depths
avg_depths = [tz.get_in(["summary", "metrics", "Avg_coverage"], s) for s in samples]
avg_depths = [x for x in avg_depths if x]
# Picking all thresholds up to the highest sample average depth
thresholds = [t for t in coverage.DEPTH_THRESHOLDS if not avg_depths or t <= max(avg_depths)]
# ...plus one more
if len(thresholds) < len(coverage.DEPTH_THRESHOLDS):
thresholds.append(coverage.DEPTH_THRESHOLDS[len(thresholds)])
# Showing only thresholds surrounding any of average depths
thresholds_hidden = []
for i, t in enumerate(thresholds):
if t > 20: # Not hiding anything below 20x
if any(thresholds[i-1] <= c < thresholds[i] for c in avg_depths if c and i-1 >= 0) or \
any(thresholds[i] <= c < thresholds[i+1] for c in avg_depths if c and i+1 < len(thresholds)):
pass
else:
thresholds_hidden.append(t)
# Hide coverage unless running full qualimap, downsampled inputs are confusing
if not any(("qualimap_full" in dd.get_tools_on(d)) for d in samples):
thresholds_hidden = thresholds + thresholds_hidden
thresholds_hidden.sort()
thresholds = []
out['qualimap_config'] = {
'general_stats_coverage': [str(t) for t in thresholds],
'general_stats_coverage_hidden': [str(t) for t in thresholds_hidden]}
# Avoid confusing peddy outputs, sticking to ancestry and sex prediction
out["table_columns_visible"]["Peddy"] = {"family_id": False, "sex_het_ratio": False,
"error_sex_check": False}
# Setting the module order
module_order = []
module_order.extend([
"bcbio",
"samtools",
"goleft_indexcov",
"peddy"
])
out['bcftools'] = {'write_separate_table': True}
# if germline calling was performed:
if any("germline" in (get_active_vcinfo(s) or {}) or # tumor-only somatic with germline extraction
dd.get_phenotype(s) == "germline" or # or paired somatic with germline calling for normal
_has_bcftools_germline_stats(s) # CWL organized statistics
for s in samples):
# Split somatic and germline variant stats into separate multiqc submodules,
# with somatic going into General Stats, and germline going into a separate table:
module_order.extend([{
'bcftools': {
'name': 'Bcftools (somatic)',
'info': 'Bcftools stats for somatic variant calls only.',
'path_filters': ['*_bcftools_stats.txt'],
'custom_config': {'write_general_stats': True},
}},
{'bcftools': {
'name': 'Bcftools (germline)',
'info': 'Bcftools stats for germline variant calls only.',
'path_filters': ['*_bcftools_stats_germline.txt'],
'custom_config': {'write_general_stats': False},
}},
])
else:
module_order.append("bcftools")
module_order.extend([
"salmon",
"star",
"picard",
"qualimap",
"snpeff",
"fastqc",
"preseq",
"bismark"
])
out["module_order"] = module_order
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
out["preseq"] = _make_preseq_multiqc_config(preseq_samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _has_bcftools_germline_stats(data):
"""Check for the presence of a germline stats file, CWL compatible.
"""
stats_file = tz.get_in(["summary", "qc"], data)
if isinstance(stats_file, dict):
stats_file = tz.get_in(["variants", "base"], stats_file)
if not stats_file:
stats_file = ""
return stats_file.find("bcftools_stats_germline") > 0
def _check_multiqc_input(path):
"""Check if file exists, and return empty if it doesn't"""
if utils.file_exists(path):
return path
# ## report and coverage
def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
def _add_disambiguate(sample):
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
sample["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
sample["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
sample["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return sample
def _add_atac(sample):
atac_metrics = atac.calculate_encode_complexity_metrics(sample)
if not atac_metrics:
return sample
sample["summary"]["metrics"] = tz.merge(atac_metrics, sample["summary"]["metrics"])
return sample
def _fix_duplicated_rate(dt):
"""Get RNA duplicated rate if exists and replace by samtools metric"""
if "Duplication_Rate_of_Mapped" in dt:
dt["Duplicates_pct"] = 100.0 * dt["Duplication_Rate_of_Mapped"]
return dt
def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
s = _add_atac(s)
m = tz.get_in(['summary', 'metrics'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in list(m.keys()):
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
if m.get('rRNA_rate'):
dt['rRNA_rate'] = m.get('rRNA_rate')
if m.get("RiP"):
dt['RiP_pct'] = "%.3f" % (int(m.get("RiP")) / float(m.get("Total_reads", 1)) * 100)
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out
def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples
def _merge_preseq(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
real_counts_file = os.path.abspath(os.path.join("preseq_real_counts.txt"))
with file_transaction(samples[0], real_counts_file) as tx_out_file:
with open(tx_out_file, "w") as f:
for s, m in zip(samples, metrics):
line = dd.get_sample_name(s) + "\t" + str(m["Preseq_read_count"])
if m.get("Preseq_unique_count") is not None:
line += "\t" + str(m["Preseq_unique_count"])
line += "\n"
f.write(line)
samples[0]["summary"]["qc"]["preseq"]["secondary"] = [real_counts_file]
def _make_preseq_multiqc_config(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
out = {"read_length": float(np.median([m["Preseq_read_length"] for m in metrics]))}
genome_sizes = list(set(m["Preseq_genome_size"] for m in metrics))
if len(genome_sizes) == 1:
out["genome_size"] = genome_sizes[0]
return out
def _merge_target_information(samples, metrics_dir):
out_file = os.path.abspath(os.path.join(metrics_dir, "target_info.yaml"))
if utils.file_exists(out_file):
return samples
genomes = set(dd.get_genome_build(data) for data in samples)
coverage_beds = set(dd.get_coverage(data) for data in samples)
original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)
data = samples[0]
info = {}
# Reporting in MultiQC only if the genome is the same across all samples
if len(genomes) == 1:
info["genome_info"] = {
"name": dd.get_genome_build(data),
"size": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]),
}
# Reporting in MultiQC only if the target is the same across all samples
vcr_orig = None
if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:
vcr_orig = list(original_variant_regions)[0]
vcr_clean = bedutils.clean_file(vcr_orig, data)
info["variants_regions_info"] = {
"bed": vcr_orig,
"size": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),
"regions": pybedtools.BedTool(vcr_clean).count(),
}
gene_num = annotate.count_genes(vcr_clean, data)
if gene_num is not None:
info["variants_regions_info"]["genes"] = gene_num
else:
info["variants_regions_info"] = {
"bed": "callable regions",
}
# Reporting in MultiQC only if the target is the same across samples
if len(coverage_beds) == 1:
cov_bed = list(coverage_beds)[0]
if cov_bed not in [None, "None"]:
if vcr_orig and vcr_orig == cov_bed:
info["coverage_bed_info"] = info["variants_regions_info"]
else:
clean_bed = bedutils.clean_file(cov_bed, data, prefix="cov-", simple=True)
info["coverage_bed_info"] = {
"bed": cov_bed,
"size": pybedtools.BedTool(cov_bed).total_coverage(),
"regions": pybedtools.BedTool(clean_bed).count(),
}
gene_num = annotate.count_genes(clean_bed, data)
if gene_num is not None:
info["coverage_bed_info"]["genes"] = gene_num
else:
info["coverage_bed_info"] = info["variants_regions_info"]
coverage_intervals = set(data["config"]["algorithm"]["coverage_interval"] for data in samples)
if len(coverage_intervals) == 1:
info["coverage_interval"] = list(coverage_intervals)[0]
if info:
with open(out_file, "w") as out_handle:
yaml.safe_dump(info, out_handle)
return samples
| 43.455285
| 141
| 0.630196
|
a6ac6220683f282b6f8d45f487300a99bfc59863
| 300
|
py
|
Python
|
jobboardscraper/search/views.py
|
dillonko/jobboardscraper
|
21d662c3bad2e054c3b43f648e5b276339ee1d32
|
[
"BSD-3-Clause"
] | null | null | null |
jobboardscraper/search/views.py
|
dillonko/jobboardscraper
|
21d662c3bad2e054c3b43f648e5b276339ee1d32
|
[
"BSD-3-Clause"
] | 5
|
2020-12-09T20:45:28.000Z
|
2021-12-14T10:38:50.000Z
|
jobboardscraper/search/views.py
|
dillonko/jobboardscraper
|
21d662c3bad2e054c3b43f648e5b276339ee1d32
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from haystack.generic_views import SearchView
from pure_pagination.mixins import PaginationMixin
from .forms import MySearchForm
class MySearchView(PaginationMixin, SearchView):
form_class = MySearchForm
paginate_by = getattr(settings, 'PAGINATE_BY', 10)
| 25
| 54
| 0.816667
|
0c85938847af34d61d3a2dcb6319d1b6b498133c
| 653
|
py
|
Python
|
converters.py
|
ThadeuJose/Python-WEBPtoPNGConverter
|
64e73138d53846e9068f97e48d65aca7dfb77f6c
|
[
"MIT"
] | null | null | null |
converters.py
|
ThadeuJose/Python-WEBPtoPNGConverter
|
64e73138d53846e9068f97e48d65aca7dfb77f6c
|
[
"MIT"
] | null | null | null |
converters.py
|
ThadeuJose/Python-WEBPtoPNGConverter
|
64e73138d53846e9068f97e48d65aca7dfb77f6c
|
[
"MIT"
] | null | null | null |
from PIL import Image
from files import EXTENSION, INPUT_EXTENSION, OUTPUT_EXTENSION, create_path, get_all_files, get_filename
def convert_webp_to_png(inputpath, outputpath, file_extension):
MODE = "RGB"
im = Image.open(inputpath).convert(MODE)
im.save(outputpath, file_extension)
def convert_folder_webp_to_png(inputfolder, outputfolder):
for file in get_all_files(inputfolder):
filename = get_filename(file)
convert_webp_to_png(create_path(inputfolder, filename, INPUT_EXTENSION),
create_path(outputfolder,filename, OUTPUT_EXTENSION),
EXTENSION)
| 40.8125
| 104
| 0.705972
|
8ddd94b371dc3a525bbc004a88e2a0ae77445778
| 6,807
|
py
|
Python
|
tournament/models.py
|
siddeshlc8/Software-Engineering-Project
|
f1b5c1ef029d1126b6446b53755b9c91b095d4a1
|
[
"MIT"
] | 4
|
2021-01-11T09:10:49.000Z
|
2022-03-20T09:39:41.000Z
|
tournament/models.py
|
siddeshlc8/Software_Engineering_Project
|
f1b5c1ef029d1126b6446b53755b9c91b095d4a1
|
[
"MIT"
] | 6
|
2020-06-05T22:33:29.000Z
|
2022-01-13T01:33:17.000Z
|
tournament/models.py
|
siddeshlc8/Software_Engineering_Project
|
f1b5c1ef029d1126b6446b53755b9c91b095d4a1
|
[
"MIT"
] | 1
|
2018-09-10T07:44:56.000Z
|
2018-09-10T07:44:56.000Z
|
from django.db import models
from organizer.models import Organizer
from player.models import Player
# Create your models here.
class Tournament(models.Model):
name = models.CharField(max_length=20, unique=True)
image = models.ImageField(blank=True, upload_to='tournaments', default='tournaments/no.png')
place = models.TextField()
start_date = models.DateField()
end_date = models.DateField(default=None)
organizer = models.ForeignKey(Organizer, on_delete=models.CASCADE, null=True, blank=True)
tournament_status = models.IntegerField(default=0)
tournament_schedule = models.IntegerField(default=0)
def __str__(self):
return self.name
class Team(models.Model):
name = models.CharField(max_length=20, unique=True)
owner = models.CharField(max_length=20)
logo = models.ImageField()
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, blank=True)
players = models.ManyToManyField(Player)
def __str__(self):
return self.name
class Match(models.Model):
name = models.CharField(max_length=10, blank=True, null=True)
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, blank=True)
team_1 = models.ForeignKey('Team', related_name='team_1', on_delete=models.DO_NOTHING, null=True, blank=True)
team_2 = models.ForeignKey('Team', related_name='team_2', on_delete=models.DO_NOTHING, null=True, blank=True)
overs = models.IntegerField(default=0)
match_status = models.IntegerField(default=0)
toss_winner = models.ForeignKey('Team', related_name='toss_winner', on_delete=models.DO_NOTHING,
blank=True, null=True)
toss_winner_choice = models.CharField(max_length=10, default='Select')
match_winner = models.ForeignKey('Team', related_name='match_winner', on_delete=models.DO_NOTHING, blank=True, null=True)
def __str__(self):
return self.name
class FirstInningss(models.Model):
match = models.ForeignKey(Match, on_delete=models.CASCADE, blank=True, null=True)
completed = models.BooleanField(default=False)
batting_team = models.ForeignKey(Team, related_name='batting_team1', on_delete=models.DO_NOTHING,
blank=True, null=True)
bowling_team = models.ForeignKey(Team, related_name='bowling_team1', on_delete=models.DO_NOTHING,
blank=True, null=True)
current_over = models.IntegerField(default=0)
openers_selected = models.BooleanField(default=False)
striker = models.ForeignKey(Player, related_name='striker1', on_delete=models.DO_NOTHING,
blank=True, null=True)
non_striker = models.ForeignKey(Player, related_name='non_strike1',
on_delete=models.DO_NOTHING, blank=True, null=True)
previous_bowler = models.ForeignKey(Player, related_name='previous_bowler1',
on_delete=models.DO_NOTHING, blank=True, null=True)
current_bowler = models.ForeignKey(Player, related_name='current_bowler1',
on_delete=models.DO_NOTHING, blank=True, null=True)
def __str__(self):
return self.match.name
class SecondInnings(models.Model):
match = models.ForeignKey(Match, on_delete=models.CASCADE, blank=True, null=True)
completed = models.BooleanField(default=False)
batting_team = models.ForeignKey('Team', related_name='batting_team2', on_delete=models.DO_NOTHING,
blank=True, null=True)
bowling_team = models.ForeignKey('Team', related_name='bowling_team2', on_delete=models.DO_NOTHING,
blank=True, null=True)
current_over = models.IntegerField(default=0)
openers_selected = models.BooleanField(default=False)
striker = models.ForeignKey(Player, related_name='striker2', on_delete=models.DO_NOTHING,
blank=True, null=True)
non_striker = models.ForeignKey(Player, related_name='non_striker2',
on_delete=models.DO_NOTHING, blank=True, null=True)
previous_bowler = models.ForeignKey(Player, related_name='previous_bowler2',
on_delete=models.DO_NOTHING, blank=True, null=True)
current_bowler = models.ForeignKey(Player, related_name='current_bowler2',
on_delete=models.DO_NOTHING, blank=True, null=True)
def __str__(self):
return self.match.name
class MatchAdditional(models.Model):
match = models.ForeignKey(Match, on_delete=models.CASCADE, blank=True, null=True)
current_innings = models.CharField(max_length=10, blank=True, null=True)
toss_stored = models.BooleanField(default=False)
def __str__(self):
return self.match.name
class Score(models.Model):
match = models.ForeignKey(Match, on_delete=models.CASCADE, null=True, blank=True)
innings = models.CharField(max_length=11)
batting_team = models.ForeignKey('Team', related_name='batting_team', on_delete=models.DO_NOTHING,
null=True, blank=True)
bowling_team = models.ForeignKey('Team', related_name='bowling_team', on_delete=models.DO_NOTHING,
null=True, blank=True)
ball_number = models.IntegerField(null=True, blank=True)
over_number = models.IntegerField(null=True, blank=True)
bowler = models.ForeignKey('player.Player', related_name='bowler', null=True, on_delete=models.DO_NOTHING)
batsman = models.ForeignKey('player.Player',related_name='batsman', null=True,
on_delete=models.DO_NOTHING)
description = models.CharField(max_length=500, null=True, blank=True)
wicket = models.BooleanField(default=False)
six = models.BooleanField(default=False)
four = models.BooleanField(default=False)
is_highlight = models.BooleanField(default=False)
highlight = models.CharField(max_length=20, null=True, blank=True)
out_batsman = models.ForeignKey('player.Player', related_name='out_batsman', null=True, blank=True,
on_delete=models.DO_NOTHING)
def __str__(self):
return self.match.name + ' - ' + self.innings + ' - ' + str(self.over_number) + '.' + str(self.ball_number)
class ScoreCard(models.Model):
winner = models.ForeignKey('Team', related_name='winner', on_delete=models.DO_NOTHING, blank=True,
null=True)
team_1_score = models.BigIntegerField(default=0)
team_2_score = models.BigIntegerField(default=0)
team_2_wickets = models.IntegerField(default=0)
team_1_wickets = models.IntegerField(default=0)
| 46.944828
| 125
| 0.682827
|
053f054478f468cf9631ab6be03e0c50f52c3e72
| 4,313
|
py
|
Python
|
src/test/test_bip38.py
|
alessandro-saglimbeni/libwally-core
|
ae84c26519b6c513332c19bc183dc0b584f4bf33
|
[
"MIT"
] | 1
|
2021-06-06T18:11:23.000Z
|
2021-06-06T18:11:23.000Z
|
src/test/test_bip38.py
|
alessandro-saglimbeni/libwally-core
|
ae84c26519b6c513332c19bc183dc0b584f4bf33
|
[
"MIT"
] | 1
|
2020-12-31T11:25:31.000Z
|
2021-01-22T00:24:37.000Z
|
src/test/test_bip38.py
|
alessandro-saglimbeni/libwally-core
|
ae84c26519b6c513332c19bc183dc0b584f4bf33
|
[
"MIT"
] | 2
|
2020-08-24T07:39:11.000Z
|
2020-09-30T22:36:40.000Z
|
import unittest
from util import *
K_MAIN, K_TEST, K_COMP, K_EC, K_CHECK, K_RAW, K_ORDER = 0, 7, 256, 512, 1024, 2048, 4096
# BIP38 Vectors from
# https://github.com/bitcoin/bips/blob/master/bip-0038.mediawiki
cases = [
[ 'CBF4B9F70470856BB4F40F80B87EDB90865997FFEE6DF315AB166D713AF433A5',
'TestingOneTwoThree',
K_MAIN,
'6PRVWUbkzzsbcVac2qwfssoUJAN1Xhrg6bNk8J7Nzm5H7kxEbn2Nh2ZoGg' ],
[ '09C2686880095B1A4C249EE3AC4EEA8A014F11E6F986D0B5025AC1F39AFBD9AE',
'Satoshi',
K_MAIN,
'6PRNFFkZc2NZ6dJqFfhRoFNMR9Lnyj7dYGrzdgXXVMXcxoKTePPX1dWByq' ],
[ '64EEAB5F9BE2A01A8365A579511EB3373C87C40DA6D2A25F05BDA68FE077B66E',
unhexlify('cf9300f0909080f09f92a9'),
K_MAIN,
'6PRW5o9FLp4gJDDVqJQKJFTpMvdsSGJxMYHtHaQBF3ooa8mwD69bapcDQn' ],
[ 'CBF4B9F70470856BB4F40F80B87EDB90865997FFEE6DF315AB166D713AF433A5',
'TestingOneTwoThree',
K_MAIN + K_COMP,
'6PYNKZ1EAgYgmQfmNVamxyXVWHzK5s6DGhwP4J5o44cvXdoY7sRzhtpUeo' ],
[ '09C2686880095B1A4C249EE3AC4EEA8A014F11E6F986D0B5025AC1F39AFBD9AE',
'Satoshi',
K_MAIN + K_COMP,
'6PYLtMnXvfG3oJde97zRyLYFZCYizPU5T3LwgdYJz1fRhh16bU7u6PPmY7' ],
# Raw vectors
[ '09C2686880095B1A4C249EE3AC4EEA8A014F11E6F986D0B5025AC1F39AFBD9AE',
'Satoshi',
K_MAIN + K_COMP + K_RAW,
'0142E00B76EA60B62F66F0AF93D8B5380652AF51D1A3902EE00726CCEB70CA636B5B57CE6D3E2F' ],
[ '3CBC4D1E5C5248F81338596C0B1EE025FBE6C112633C357D66D2CE0BE541EA18',
'jon',
K_MAIN + K_COMP + K_RAW + K_ORDER,
'0142E09F8EE6E3A2FFCB13A99AA976AEDA5A2002ED3DF97FCB9957CD863357B55AA2072D3EB2F9' ],
]
class BIP38Tests(unittest.TestCase):
def from_priv(self, priv_key, passwd, flags):
priv, p_len = make_cbuffer(priv_key)
if flags > K_RAW:
out_buf, out_len = make_cbuffer('00' * 39)
ret = bip38_raw_from_private_key(priv, p_len, passwd, len(passwd),
flags, out_buf, out_len)
return ret, h(out_buf).upper()
else:
return bip38_from_private_key(priv, p_len, passwd, len(passwd), flags)
def to_priv(self, bip38, passwd, flags):
priv, priv_len = make_cbuffer('00' * 32)
bip38 = utf8(bip38)
if flags > K_RAW:
raw, raw_len = make_cbuffer(bip38)
ret = bip38_raw_to_private_key(raw, raw_len, passwd, len(passwd),
flags, priv, priv_len)
else:
ret = bip38_to_private_key(bip38, passwd, len(passwd), flags,
priv, priv_len)
return ret, priv
def test_bip38(self):
for case in cases:
priv_key, passwd, flags, expected = case
passwd = utf8(passwd) if type(passwd) is not bytes else passwd
ret, bip38 = self.from_priv(priv_key, passwd, flags)
self.assertEqual(ret, WALLY_OK)
bip38 = bip38.decode('utf-8') if type(bip38) is bytes else bip38
self.assertEqual(bip38, expected)
ret, new_priv_key = self.to_priv(bip38, passwd, flags)
self.assertEqual(ret, WALLY_OK)
self.assertEqual(h(new_priv_key).upper(), utf8(priv_key))
ret, new_priv_key = self.to_priv(bip38, '', flags + K_CHECK)
self.assertEqual(ret, WALLY_OK)
def test_bip38_invalid(self):
priv_key = 'CBF4B9F70470856BB4F40F80B87EDB90865997FFEE6DF315AB166D713AF433A5'
passwd = utf8('TestingInvalidFlags')
K_RES1 = 0x10 # BIP38_FLAG_RESERVED1
for flags, expected in [(0, WALLY_OK),
(K_RES1, WALLY_EINVAL),
(K_RAW, WALLY_OK),
(K_RAW+K_RES1, WALLY_EINVAL)]:
ret, _ = self.from_priv(priv_key, passwd, K_MAIN + flags)
self.assertEqual(ret, expected)
def test_bip38_flags(self):
priv_key = "6PYTh1Jgj3caimSrFjsfR5wJ8zUgWNDiPoNVZapSy8BwkF4NaKa1R32CaN"
ret, flags = bip38_get_flags(utf8(priv_key))
self.assertEqual(ret, WALLY_OK)
flags = int(flags)
self.assertEqual(flags & 256, 256) # BIP38_COMPRESSED
self.assertEqual(flags & 512, 0) # BIP38_KEY_EC_MULT
if __name__ == '__main__':
unittest.main()
| 41.07619
| 89
| 0.649664
|
3df49cf86a9b6d391723ddb2223f38f56c31d8d8
| 10,585
|
py
|
Python
|
test/ext/test_indexable.py
|
rissikess/sqlalchemy-ceodbc
|
6f81f3f7a3c6e39843d478e11e010923a3bf7672
|
[
"MIT"
] | null | null | null |
test/ext/test_indexable.py
|
rissikess/sqlalchemy-ceodbc
|
6f81f3f7a3c6e39843d478e11e010923a3bf7672
|
[
"MIT"
] | null | null | null |
test/ext/test_indexable.py
|
rissikess/sqlalchemy-ceodbc
|
6f81f3f7a3c6e39843d478e11e010923a3bf7672
|
[
"MIT"
] | null | null | null |
from sqlalchemy.testing import assert_raises
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, Text
from sqlalchemy.sql.sqltypes import ARRAY, JSON
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import Session
from sqlalchemy.testing import fixtures
from sqlalchemy.ext.indexable import index_property
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.testing import eq_, ne_, is_, in_, not_in_
from sqlalchemy import inspect
class IndexPropertyTest(fixtures.TestBase):
def test_array(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer),
default=[])
first = index_property('array', 0)
tenth = index_property('array', 9)
a = A(array=[1, 2, 3])
eq_(a.first, 1)
assert_raises(AttributeError, lambda: a.tenth)
a.first = 100
eq_(a.first, 100)
eq_(a.array, [100, 2, 3])
del a.first
eq_(a.first, 2)
a2 = A(first=5)
eq_(a2.first, 5)
eq_(a2.array, [5])
def test_array_longinit(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer),
default=[])
first = index_property('array', 0)
fifth = index_property('array', 4)
a1 = A(fifth=10)
a2 = A(first=5)
eq_(a1.array, [None, None, None, None, 10])
eq_(a2.array, [5])
assert_raises(IndexError, setattr, a2, "fifth", 10)
def test_json(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column('id', Integer, primary_key=True)
json = Column('_json', JSON, default={})
field = index_property('json', 'field')
j = J(json={'a': 1, 'b': 2})
assert_raises(AttributeError, lambda: j.field)
j.field = 'test'
eq_(j.field, 'test')
eq_(j.json, {'a': 1, 'b': 2, 'field': 'test'})
j2 = J(field='test')
eq_(j2.json, {"field": "test"})
eq_(j2.field, "test")
def test_value_is_none_attributeerror(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer))
first = index_property('array', 1)
a = A()
assert_raises(AttributeError, getattr, a, "first")
assert_raises(AttributeError, delattr, a, "first")
def test_get_attribute_error(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer))
first = index_property('array', 1)
a = A(array=[])
assert_raises(AttributeError, lambda: a.first)
def test_set_immutable(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
array = Column(ARRAY(Integer))
first = index_property('array', 1, mutable=False)
a = A()
def set_():
a.first = 10
assert_raises(AttributeError, set_)
def test_set_mutable_dict(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
field = index_property('json', 'field')
j = J()
j.field = 10
j.json = {}
assert_raises(AttributeError, lambda: j.field)
assert_raises(AttributeError, delattr, j, "field")
j.field = 10
eq_(j.field, 10)
def test_get_default_value(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
default = index_property('json', 'field', default='default')
none = index_property('json', 'field', default=None)
j = J()
assert j.json is None
assert j.default == 'default'
assert j.none is None
j.json = {}
assert j.default == 'default'
assert j.none is None
j.default = None
assert j.default is None
assert j.none is None
j.none = 10
assert j.default is 10
assert j.none == 10
class IndexPropertyArrayTest(fixtures.DeclarativeMappedTest):
__requires__ = ('array_type',)
__backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Array(fixtures.ComparableEntity, Base):
__tablename__ = "array"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
array = Column(ARRAY(Integer), default=[])
array0 = Column(ARRAY(Integer, zero_indexes=True), default=[])
first = index_property('array', 0)
first0 = index_property('array0', 0, onebased=False)
def test_query(self):
Array = self.classes.Array
s = Session(testing.db)
s.add_all([
Array(),
Array(array=[1, 2, 3], array0=[1, 2, 3]),
Array(array=[4, 5, 6], array0=[4, 5, 6])])
s.commit()
a1 = s.query(Array).filter(Array.array == [1, 2, 3]).one()
a2 = s.query(Array).filter(Array.first == 1).one()
eq_(a1.id, a2.id)
a3 = s.query(Array).filter(Array.first == 4).one()
ne_(a1.id, a3.id)
a4 = s.query(Array).filter(Array.first0 == 1).one()
eq_(a1.id, a4.id)
a5 = s.query(Array).filter(Array.first0 == 4).one()
ne_(a1.id, a5.id)
def test_mutable(self):
Array = self.classes.Array
s = Session(testing.db)
a = Array(array=[1, 2, 3])
s.add(a)
s.commit()
a.first = 42
eq_(a.first, 42)
s.commit()
eq_(a.first, 42)
del a.first
eq_(a.first, 2)
s.commit()
eq_(a.first, 2)
def test_modified(self):
from sqlalchemy import inspect
Array = self.classes.Array
s = Session(testing.db)
a = Array(array=[1, 2, 3])
s.add(a)
s.commit()
i = inspect(a)
is_(i.modified, False)
in_('array', i.unmodified)
a.first = 10
is_(i.modified, True)
not_in_('array', i.unmodified)
class IndexPropertyJsonTest(fixtures.DeclarativeMappedTest):
# TODO: remove reliance on "astext" for these tests
__requires__ = ('json_type',)
__only_on__ = 'postgresql'
__backend__ = True
@classmethod
def setup_classes(cls):
from sqlalchemy.dialects.postgresql import JSON
Base = cls.DeclarativeBasic
class json_property(index_property):
def __init__(self, attr_name, index, cast_type):
super(json_property, self).__init__(attr_name, index)
self.cast_type = cast_type
def expr(self, model):
expr = super(json_property, self).expr(model)
return expr.astext.cast(self.cast_type)
class Json(fixtures.ComparableEntity, Base):
__tablename__ = "json"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
json = Column(JSON, default={})
field = index_property('json', 'field')
json_field = index_property('json', 'field')
int_field = json_property('json', 'field', Integer)
text_field = json_property('json', 'field', Text)
other = index_property('json', 'other')
subfield = json_property('other', 'field', Text)
def test_query(self):
Json = self.classes.Json
s = Session(testing.db)
s.add_all([
Json(),
Json(json={'field': 10}),
Json(json={'field': 20})])
s.commit()
a1 = s.query(Json).filter(Json.json['field'].astext.cast(Integer) == 10)\
.one()
a2 = s.query(Json).filter(Json.field.astext == '10').one()
eq_(a1.id, a2.id)
a3 = s.query(Json).filter(Json.field.astext == '20').one()
ne_(a1.id, a3.id)
a4 = s.query(Json).filter(Json.json_field.astext == '10').one()
eq_(a2.id, a4.id)
a5 = s.query(Json).filter(Json.int_field == 10).one()
eq_(a2.id, a5.id)
a6 = s.query(Json).filter(Json.text_field == '10').one()
eq_(a2.id, a6.id)
def test_mutable(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={})
s.add(j)
s.commit()
j.other = 42
eq_(j.other, 42)
s.commit()
eq_(j.other, 42)
def test_modified(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={})
s.add(j)
s.commit()
i = inspect(j)
is_(i.modified, False)
in_('json', i.unmodified)
j.other = 42
is_(i.modified, True)
not_in_('json', i.unmodified)
def test_cast_type(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={'field': 10})
s.add(j)
s.commit()
jq = s.query(Json).filter(Json.int_field == 10).one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.text_field == '10').one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.json_field.astext == '10').one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.text_field == 'wrong').first()
is_(jq, None)
j.json = {'field': True}
s.commit()
jq = s.query(Json).filter(Json.text_field == 'true').one()
eq_(j.id, jq.id)
def test_multi_dimension(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={'other': {'field': 'multi'}})
s.add(j)
s.commit()
eq_(j.other, {'field': 'multi'})
eq_(j.subfield, 'multi')
jq = s.query(Json).filter(Json.subfield == 'multi').first()
eq_(j.id, jq.id)
| 28.226667
| 81
| 0.547567
|
5d5acc1d9b48118cc45983b71b7e4007f7e0decd
| 466
|
py
|
Python
|
fastaProcessing/fastaReplaceDeg.py
|
linlifeng/llbio
|
962f6abda8e8a576cb0bf4d73d3a5c4ef9b1a04b
|
[
"MIT"
] | null | null | null |
fastaProcessing/fastaReplaceDeg.py
|
linlifeng/llbio
|
962f6abda8e8a576cb0bf4d73d3a5c4ef9b1a04b
|
[
"MIT"
] | null | null | null |
fastaProcessing/fastaReplaceDeg.py
|
linlifeng/llbio
|
962f6abda8e8a576cb0bf4d73d3a5c4ef9b1a04b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
1. fasta fname
2. replace with?
'''
from sys import argv,exit
from Bio import SeqIO
try:
fname = argv[1]
repl = argv[2]
except:
exit(__doc__)
f = open(fname, 'r')
records = SeqIO.parse(f,'fasta')
for r in records:
seq = r.seq
newSeq = ''
for l in seq:
if l.lower() not in ['a','t','c','g']:
newSeq += repl
else:
newSeq += l
print ">%s\n%s\n"%(r.description, newSeq)
| 15.533333
| 46
| 0.530043
|
34e24e51fc99a3e2607974e174f316b0da43e947
| 8,957
|
py
|
Python
|
homeassistant/generated/zeroconf.py
|
davyike/core
|
13cc7583ed5c7de43c56b43db8fdc9879a853666
|
[
"Apache-2.0"
] | 3
|
2019-10-02T04:40:26.000Z
|
2020-02-16T13:19:08.000Z
|
homeassistant/generated/zeroconf.py
|
davyike/core
|
13cc7583ed5c7de43c56b43db8fdc9879a853666
|
[
"Apache-2.0"
] | 18
|
2021-11-03T06:21:27.000Z
|
2022-03-31T06:20:57.000Z
|
homeassistant/generated/zeroconf.py
|
davyike/core
|
13cc7583ed5c7de43c56b43db8fdc9879a853666
|
[
"Apache-2.0"
] | null | null | null |
"""Automatically generated by hassfest.
To update, run python3 -m script.hassfest
"""
# fmt: off
ZEROCONF = {
"_Volumio._tcp.local.": [
{
"domain": "volumio"
}
],
"_airplay._tcp.local.": [
{
"domain": "apple_tv",
"properties": {
"model": "appletv*"
}
},
{
"domain": "apple_tv",
"properties": {
"model": "audioaccessory*"
}
},
{
"domain": "apple_tv",
"properties": {
"am": "airport*"
}
},
{
"domain": "samsungtv",
"properties": {
"manufacturer": "samsung*"
}
}
],
"_airport._tcp.local.": [
{
"domain": "apple_tv"
}
],
"_api._udp.local.": [
{
"domain": "guardian"
}
],
"_appletv-v2._tcp.local.": [
{
"domain": "apple_tv"
}
],
"_axis-video._tcp.local.": [
{
"domain": "axis",
"properties": {
"macaddress": "00408c*"
}
},
{
"domain": "axis",
"properties": {
"macaddress": "accc8e*"
}
},
{
"domain": "axis",
"properties": {
"macaddress": "b8a44f*"
}
},
{
"domain": "doorbird",
"properties": {
"macaddress": "1ccae3*"
}
}
],
"_bond._tcp.local.": [
{
"domain": "bond"
}
],
"_companion-link._tcp.local.": [
{
"domain": "apple_tv"
}
],
"_daap._tcp.local.": [
{
"domain": "forked_daapd"
}
],
"_dkapi._tcp.local.": [
{
"domain": "daikin"
}
],
"_dvl-deviceapi._tcp.local.": [
{
"domain": "devolo_home_control"
},
{
"domain": "devolo_home_network"
}
],
"_easylink._tcp.local.": [
{
"domain": "modern_forms",
"name": "wac*"
}
],
"_elg._tcp.local.": [
{
"domain": "elgato"
}
],
"_enphase-envoy._tcp.local.": [
{
"domain": "enphase_envoy"
}
],
"_esphomelib._tcp.local.": [
{
"domain": "esphome"
},
{
"domain": "zha",
"name": "tube*"
}
],
"_fbx-api._tcp.local.": [
{
"domain": "freebox"
}
],
"_googlecast._tcp.local.": [
{
"domain": "cast"
}
],
"_hap._tcp.local.": [
{
"domain": "homekit_controller"
},
{
"domain": "zwave_me",
"name": "*z.wave-me*"
}
],
"_homekit._tcp.local.": [
{
"domain": "homekit"
}
],
"_hscp._tcp.local.": [
{
"domain": "apple_tv"
}
],
"_http._tcp.local.": [
{
"domain": "bosch_shc",
"name": "bosch shc*"
},
{
"domain": "nam",
"name": "nam-*"
},
{
"domain": "nam",
"properties": {
"manufacturer": "nettigo"
}
},
{
"domain": "pure_energie",
"name": "smartbridge*"
},
{
"domain": "rachio",
"name": "rachio*"
},
{
"domain": "rainmachine",
"name": "rainmachine*"
},
{
"domain": "shelly",
"name": "shelly*"
}
],
"_hue._tcp.local.": [
{
"domain": "hue"
}
],
"_hwenergy._tcp.local.": [
{
"domain": "homewizard"
}
],
"_ipp._tcp.local.": [
{
"domain": "ipp"
}
],
"_ipps._tcp.local.": [
{
"domain": "ipp"
}
],
"_kizbox._tcp.local.": [
{
"domain": "overkiz",
"name": "gateway*"
},
{
"domain": "somfy",
"name": "gateway*"
}
],
"_leap._tcp.local.": [
{
"domain": "lutron_caseta"
}
],
"_lookin._tcp.local.": [
{
"domain": "lookin"
}
],
"_mediaremotetv._tcp.local.": [
{
"domain": "apple_tv"
}
],
"_miio._udp.local.": [
{
"domain": "xiaomi_aqara"
},
{
"domain": "xiaomi_miio"
},
{
"domain": "yeelight",
"name": "yeelink-*"
}
],
"_nanoleafapi._tcp.local.": [
{
"domain": "nanoleaf"
}
],
"_nanoleafms._tcp.local.": [
{
"domain": "nanoleaf"
}
],
"_nut._tcp.local.": [
{
"domain": "nut"
}
],
"_octoprint._tcp.local.": [
{
"domain": "octoprint"
}
],
"_plexmediasvr._tcp.local.": [
{
"domain": "plex"
}
],
"_plugwise._tcp.local.": [
{
"domain": "plugwise"
}
],
"_powerview._tcp.local.": [
{
"domain": "hunterdouglas_powerview"
}
],
"_printer._tcp.local.": [
{
"domain": "brother",
"name": "brother*"
}
],
"_raop._tcp.local.": [
{
"domain": "apple_tv",
"properties": {
"am": "appletv*"
}
},
{
"domain": "apple_tv",
"properties": {
"am": "audioaccessory*"
}
},
{
"domain": "apple_tv",
"properties": {
"am": "airport*"
}
}
],
"_sideplay._tcp.local.": [
{
"domain": "ecobee",
"properties": {
"mdl": "eb-*"
}
},
{
"domain": "ecobee",
"properties": {
"mdl": "ecobee*"
}
}
],
"_sleep-proxy._udp.local.": [
{
"domain": "apple_tv"
}
],
"_sonos._tcp.local.": [
{
"domain": "sonos"
}
],
"_spotify-connect._tcp.local.": [
{
"domain": "spotify"
}
],
"_ssh._tcp.local.": [
{
"domain": "smappee",
"name": "smappee1*"
},
{
"domain": "smappee",
"name": "smappee2*"
},
{
"domain": "smappee",
"name": "smappee50*"
}
],
"_system-bridge._udp.local.": [
{
"domain": "system_bridge"
}
],
"_touch-able._tcp.local.": [
{
"domain": "apple_tv"
}
],
"_viziocast._tcp.local.": [
{
"domain": "vizio"
}
],
"_wled._tcp.local.": [
{
"domain": "wled"
}
],
"_xbmc-jsonrpc-h._tcp.local.": [
{
"domain": "kodi"
}
],
"_zwave-js-server._tcp.local.": [
{
"domain": "zwave_js"
}
]
}
HOMEKIT = {
"3810X": "roku",
"4660X": "roku",
"7820X": "roku",
"819LMB": "myq",
"AC02": "tado",
"Abode": "abode",
"BSB002": "hue",
"C105X": "roku",
"C135X": "roku",
"EB-*": "ecobee",
"Healty Home Coach": "netatmo",
"Iota": "abode",
"LIFX A19": "lifx",
"LIFX BR30": "lifx",
"LIFX Beam": "lifx",
"LIFX Candle": "lifx",
"LIFX Clean": "lifx",
"LIFX Color": "lifx",
"LIFX DLCOL": "lifx",
"LIFX DLWW": "lifx",
"LIFX Dlight": "lifx",
"LIFX Downlight": "lifx",
"LIFX Filament": "lifx",
"LIFX GU10": "lifx",
"LIFX Lightstrip": "lifx",
"LIFX Mini": "lifx",
"LIFX Nightvision": "lifx",
"LIFX Pls": "lifx",
"LIFX Plus": "lifx",
"LIFX Tile": "lifx",
"LIFX White": "lifx",
"LIFX Z": "lifx",
"MYQ": "myq",
"NL29": "nanoleaf",
"NL42": "nanoleaf",
"NL47": "nanoleaf",
"NL48": "nanoleaf",
"NL52": "nanoleaf",
"NL59": "nanoleaf",
"Netatmo Relay": "netatmo",
"PowerView": "hunterdouglas_powerview",
"Presence": "netatmo",
"Rachio": "rachio",
"SPK5": "rainmachine",
"Sensibo": "sensibo",
"Smart Bridge": "lutron_caseta",
"Socket": "wemo",
"TRADFRI": "tradfri",
"Touch HD": "rainmachine",
"Welcome": "netatmo",
"Wemo": "wemo",
"YL*": "yeelight",
"ecobee*": "ecobee",
"iSmartGate": "gogogate2",
"iZone": "izone",
"tado": "tado"
}
| 20.08296
| 47
| 0.356146
|
f79962dd478d66de69057f56d81005f401c027d4
| 9,860
|
py
|
Python
|
main.py
|
TharukRenuja/Telegraph-Uploader
|
7a01d32742177bf9b3c896f448c6ca915a6953a3
|
[
"MIT"
] | null | null | null |
main.py
|
TharukRenuja/Telegraph-Uploader
|
7a01d32742177bf9b3c896f448c6ca915a6953a3
|
[
"MIT"
] | null | null | null |
main.py
|
TharukRenuja/Telegraph-Uploader
|
7a01d32742177bf9b3c896f448c6ca915a6953a3
|
[
"MIT"
] | null | null | null |
# Made with python3
# (C) @FayasNoushad
# Copyright permission under MIT License
# All rights reserved by FayasNoushad
# License -> https://github.com/FayasNoushad/Telegraph-Uploader-Bot-V2/blob/main/LICENSE
import os
import time
import math
import json
import string
import random
import traceback
import asyncio
import datetime
import aiofiles
from random import choice
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.errors import FloodWait, InputUserDeactivated, UserIsBlocked, PeerIdInvalid, UserNotParticipant, UserBannedInChannel
from pyrogram.errors.exceptions.bad_request_400 import PeerIdInvalid
from telegraph import upload_file
from database import Database
UPDATE_CHANNEL = os.environ.get("UPDATE_CHANNEL", "")
BOT_OWNER = int(os.environ["BOT_OWNER"])
DATABASE_URL = os.environ["DATABASE_URL"]
db = Database(DATABASE_URL, "FnTelegraphBot")
Bot = Client(
"Telegraph Uploader Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"],
)
START_TEXT = """**Hello {} 😌
I am small media or file to telegra.ph link uploader bot.**
>> `I can convert under 5MB photo or video to telegraph link.`
Powered by @SLBotsOfficial👑"""
HELP_TEXT = """**Hey, Follow these steps:**
➠ Just give me a media under 5MB
➠ Then I will download it
➠ I will then upload it to the telegra.ph link
**Available Commands**
/start - Checking Bot Online
/help - For more help
/about - For more about me
/status - For bot updates
Powered by @SLBotsOfficial👑"""
ABOUT_TEXT = """--**About Me**-- 😎
🤖 **Name :** [Telegraph Uploader](https://telegram.me/{})
👨💻 **Creator :** [Fayas](https://github.com/TharukRenuja)
📢 **Channel :** [Fayas Noushad](https://telegram.me/SLBotsOfficial)
📝 **Language :** [Python3](https://python.org)
🧰 **Framework :** [Pyrogram](https://pyrogram.org)
📡 **Server :** [Heroku](https://heroku.com)"""
FORCE_SUBSCRIBE_TEXT = "<code>Sorry Dear You Must Join My Updates Channel for using me 😌😉....</code>"
START_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('⚙ Help', callback_data='help'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]]
)
HELP_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]]
)
ABOUT_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('Help ⚙', callback_data='help'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]]
)
async def send_msg(user_id, message):
try:
await message.copy(chat_id=user_id)
return 200, None
except FloodWait as e:
await asyncio.sleep(e.x)
return send_msg(user_id, message)
except InputUserDeactivated:
return 400, f"{user_id} : deactivated\n"
except UserIsBlocked:
return 400, f"{user_id} : user is blocked\n"
except PeerIdInvalid:
return 400, f"{user_id} : user id invalid\n"
except Exception as e:
return 500, f"{user_id} : {traceback.format_exc()}\n"
@Bot.on_callback_query()
async def cb_handler(bot, update):
if update.data == "home":
await update.message.edit_text(
text=START_TEXT.format(update.from_user.mention),
reply_markup=START_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit_text(
text=HELP_TEXT,
reply_markup=HELP_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
reply_markup=ABOUT_BUTTONS,
disable_web_page_preview=True
)
else:
await update.message.delete()
@Bot.on_message(filters.private & filters.command(["start"]))
async def start(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=START_TEXT.format(update.from_user.mention),
disable_web_page_preview=True,
reply_markup=START_BUTTONS
)
@Bot.on_message(filters.private & filters.command(["help"]))
async def help(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=HELP_TEXT,
disable_web_page_preview=True,
reply_markup=HELP_BUTTONS
)
@Bot.on_message(filters.private & filters.command(["about"]))
async def about(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
disable_web_page_preview=True,
reply_markup=ABOUT_BUTTONS
)
@Bot.on_message(filters.media & filters.private)
async def telegraph_upload(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
if UPDATE_CHANNEL:
try:
user = await bot.get_chat_member(UPDATE_CHANNEL, update.chat.id)
if user.status == "kicked":
await update.reply_text(text="You are banned!")
return
except UserNotParticipant:
await update.reply_text(
text=FORCE_SUBSCRIBE_TEXT,
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(text="⚙ Join Updates Channel ⚙", url=f"https://telegram.me/{UPDATE_CHANNEL}")]]
)
)
return
except Exception as error:
print(error)
await update.reply_text(text="Something wrong. Contact <a href='https://telegram.me/TharukRenuja'>Developer</a>.", disable_web_page_preview=True)
return
medianame = "./DOWNLOADS/" + "FayasNoushad/FnTelegraphBot"
text = await update.reply_text(
text="<code>Downloading to My Server ...</code>",
disable_web_page_preview=True
)
await bot.download_media(
message=update,
file_name=medianame
)
await text.edit_text(
text="<code>Downloading Completed. Now I am Uploading to telegra.ph Link ...</code>",
disable_web_page_preview=True
)
try:
response = upload_file(medianame)
except Exception as error:
print(error)
await text.edit_text(
text=f"Error :- {error}",
disable_web_page_preview=True
)
return
try:
os.remove(medianame)
except Exception as error:
print(error)
return
await text.edit_text(
text=f"<b>Link :-</b> <code>https://telegra.ph{response[0]}</code>\n\n<b>Join :-</b> @SLBotsOfficial",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(text="Open Link", url=f"https://telegra.ph{response[0]}"),
InlineKeyboardButton(text="Share Link", url=f"https://telegram.me/share/url?url=https://telegra.ph{response[0]}")
],
[InlineKeyboardButton(text="⚙ Join Updates Channel ⚙", url="https://telegram.me/SLBotsOfficial")]
]
)
)
@Bot.on_message(filters.private & filters.command("broadcast") & filters.user(BOT_OWNER) & filters.reply)
async def broadcast(bot, update):
broadcast_ids = {}
all_users = await db.get_all_users()
broadcast_msg = update.reply_to_message
while True:
broadcast_id = ''.join([random.choice(string.ascii_letters) for i in range(3)])
if not broadcast_ids.get(broadcast_id):
break
out = await update.reply_text(text=f"Broadcast Started! You will be notified with log file when all the users are notified.")
start_time = time.time()
total_users = await db.total_users_count()
done = 0
failed = 0
success = 0
broadcast_ids[broadcast_id] = dict(total = total_users, current = done, failed = failed, success = success)
async with aiofiles.open('broadcast.txt', 'w') as broadcast_log_file:
async for user in all_users:
sts, msg = await send_msg(user_id = int(user['id']), message = broadcast_msg)
if msg is not None:
await broadcast_log_file.write(msg)
if sts == 200:
success += 1
else:
failed += 1
if sts == 400:
await db.delete_user(user['id'])
done += 1
if broadcast_ids.get(broadcast_id) is None:
break
else:
broadcast_ids[broadcast_id].update(dict(current = done, failed = failed, success = success))
if broadcast_ids.get(broadcast_id):
broadcast_ids.pop(broadcast_id)
completed_in = datetime.timedelta(seconds=int(time.time()-start_time))
await asyncio.sleep(3)
await out.delete()
if failed == 0:
await update.reply_text(text=f"broadcast completed in `{completed_in}`\n\nTotal users {total_users}.\nTotal done {done}, {success} success and {failed} failed.", quote=True)
else:
await update.reply_document(document='broadcast.txt', caption=f"broadcast completed in `{completed_in}`\n\nTotal users {total_users}.\nTotal done {done}, {success} success and {failed} failed.")
os.remove('broadcast.txt')
@Bot.on_message(filters.private & filters.command("status"), group=5)
async def status(bot, update):
total_users = await db.total_users_count()
text = "**Bot Status**\n"
text += f"\n**Total Users:** `{total_users}`"
await update.reply_text(
text=text,
quote=True,
disable_web_page_preview=True
)
Bot.run()
| 33.310811
| 199
| 0.66998
|
5c18acc59e8fdfa76448dce04c040546686b3f1f
| 13,060
|
py
|
Python
|
wildfly/client.py
|
bheesham/wildfly-py
|
b5a3da0c7843e22b0c76b2b12fc088f0a2c9bd2d
|
[
"Apache-2.0"
] | 1
|
2021-06-08T18:44:59.000Z
|
2021-06-08T18:44:59.000Z
|
wildfly/client.py
|
zsuzhengdu/wildfly-py
|
b5a3da0c7843e22b0c76b2b12fc088f0a2c9bd2d
|
[
"Apache-2.0"
] | null | null | null |
wildfly/client.py
|
zsuzhengdu/wildfly-py
|
b5a3da0c7843e22b0c76b2b12fc088f0a2c9bd2d
|
[
"Apache-2.0"
] | 1
|
2021-06-08T18:42:35.000Z
|
2021-06-08T18:42:35.000Z
|
# python binding for wildlfy management http/json api
import logging
import json
import requests
from . import util
from . import api
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Define some
KEY_OUTCOME = "outcome"
KEY_RESULT = "result"
class Client(requests.Session,
api.HostApiMixin,
api.ServerApiMixin,
api.DeploymentApiMixin):
DEFAULT_MANAGEMENT_PORT = '9990'
DEFAULT_MANAGEMENT_USER = 'admin'
DEFAULT_MANAGEMENT_PWD = 'admin'
DEFAULT_TIMEOUT = 5000
def __init__(
self,
host,
port=DEFAULT_MANAGEMENT_PORT,
username=DEFAULT_MANAGEMENT_USER,
password=DEFAULT_MANAGEMENT_PWD,
timeout=DEFAULT_TIMEOUT):
super(Client, self).__init__()
self.username = username
self.password = password
self.host = host
self.port = port
self.timeout = timeout
self.endpoint = 'http://{}:{}/management'.format(self.host, self.port)
def _post(self, request):
logger.debug('Request: {}'.format(request))
headers = {'content-type': 'application/json'}
response = requests.post(
self.endpoint,
headers=headers,
auth=requests.auth.HTTPDigestAuth(
self.username,
self.password),
data=json.dumps(request))
if response.status_code in [200, 204]:
logger.debug(
'Response Status Code: {}: {}'.format(
response.status_code,
response.reason))
elif response.status_code == 500:
logger.debug(
'Response Status Code: {}: {}'.format(
response.status_code,
response.json()))
else:
response.raise_for_status()
logger.debug('Response: {}'.format(response.json()))
return response
def execute(self, operation, parameters={}, address=[]):
""" Execute operation on resource. """
request = {'address': address, 'operation': operation}
request.update(parameters)
return self._post(request)
def add(self, address, parameters=None):
""" Creates a new management resource. """
response = self.execute('add', parameters, address)
return response
def remove(self, address):
""" Removes an existing resource. """
response = self.execute('remove', address=address)
return response
def read_resource(
self,
address=[],
recursive=False,
recursive_depth=10,
runtime=False,
include_defaults=True,
attributes_only=False):
""" Reads a resource's attribute values along with either
basic or complete information about any child resources. """
response = self.execute('read-resource',
{'recursive': recursive,
'recursive_depth': recursive_depth,
'runtime': runtime,
'include_defaults': include_defaults,
'attributes_only': attributes_only},
address)
return response
def read_attribute(self, name, address=[], include_defaults=True):
""" Read attribute of resource. """
response = self.execute('read-attribute',
{'name': name,
'include-defaults': include_defaults},
address)
return response.json()['result'] if util.is_success(response) else None
def write_attribute(self, name, value, address=[]):
""" Write value of attribute of resource. """
response = self.execute('write-attribute',
{'name': name, 'value': value},
address)
return response
def unset_attribute(self, name, address=[]):
""" Sets the value of an individual attribute to the undefined
value. """
response = self.execute('unset-attribute',
{'name': name},
address)
return response.json()['result'] if util.is_success(response) else None
def read_children_names(self, child_type, address=[]):
""" Returns a list of the names of all child resources of a
given type. """
response = self.execute('read-children-names',
{'child-type': child_type},
address)
return response.json()['result'] if util.is_success(response) else None
def read_children_resources(self, child_type, address=[], runtime=False):
"""
Returns a list of the resources of all child resources of a
given type.
"""
response = self.execute('read-children-resources',
{'child-type': child_type,
'include-runtime': runtime},
address)
logger.debug(response.json())
return response.json()['result'] if util.is_success(response) else None
def read_operation_names(self, address=[]):
""" Returns a list of the names of all the operations the resource
supports. """
response = self.execute('read-operation-names',
address)
return response.json()['result'] if util.is_success(response) else None
def read_operation_description(self, name, address=[]):
""" Returns the description of an operation, along with details of
its parameter types and its return value.
"""
response = self.execute('read-operation-description',
{'name': name},
address)
return response.json()['result'] if util.is_success(response) else None
def read_children_types(self, address=[]):
""" Returns a list of the types of child resources the resource
supports. """
response = self.execute('read-children-types',
address)
return response.json()['result'] if util.is_success(response) else None
def version(self):
""" Prints version of WildFly. """
result = self.read_attribute('release-version')
return result
def get_raw_server_groups_info(self):
"""
Run the equivalent WildFly CLI command /server-group=*:read-resource
:return a dictionary representing the response
"""
address = [{'server-group': '*'}]
resp = self.read_resource(address)
return resp.json()
def get_server_name(self, host):
"""
Return a single
Runs the equivalent WF CLI command: ls /host=<host>
:param host A Wildfly host as specified by the `ls /hosts` CLI command
:return A string representing a WF Server name for the given WF host.
"""
logger.debug("Retrieving server name for host '{host}'."
.format(host=host))
address = [{'host': host}, {'server': '*'}]
result = self.read_resource(address).json()
logger.debug("Retrieved server name for wf host {host}. Response is "
"{resp}".format(host=host, resp=result))
status = result.get(KEY_OUTCOME, None)
if status != 'success':
raise Exception("Something bad happened when trying th get servers "
"of WF host {host}".format(host=host))
logger.debug("Command is successful, processing response now...")
server = None
if KEY_RESULT in result and result[KEY_RESULT]:
for list_item in result[KEY_RESULT]:
address = list_item.get('address', [])
for item in address:
if 'server' in item:
server = item['server']
return server
def get_server_group(self, host, server_name):
"""
Gets the server group associated with a WF host and WF server. Runs
the equivalent WF CLI command:
/host=<host>/server=<server>:read_attribute(name=server-group,include-defaults=true)
:param host The WildFly host
:param server_name The WildFly server
:return The value of the server-group
"""
logger.debug("Retrieving server-group for host '{host}' and server "
"'{server}'...".format(host=host,
server=server_name))
address = [{'host': host}, {'server': server_name}]
val = self.read_attribute('server-group', address, include_defaults=True)
logger.debug("WildFly host {host} and server {server} has a server-group"
" of {sg}".format(host=host,
server=server_name,
sg=val))
return val
def get_hostname(self, host, svr_name):
"""
Returns the fully qualified host nanme associated with a given WF host
and WF server. Runs the equivalent WF CLI command:
/host=<host>/server=<server>/core-service=server-environment:read_attribute(name=qualified-host-name,include-defaults=true)
:param host The WildFly host
:param svr_name The WildFly server
:return The associated qualified host name
"""
logger.debug("Retrieving qualified hostname for host '{host}' and "
"server '{server}'...".format(host=host,
server=svr_name))
address = [{'host': host},
{'server': svr_name},
{'core-service': 'server-environment'}]
val = self.read_attribute('qualified-host-name',
address,
include_defaults=True)
logger.debug("WildFly host {wfhost} and server {server} has a hostname "
"of {host}".format(wfhost=host,
server=svr_name,
host=val))
return val
def get_server_group_host(self, server_group):
"""
Returns the hostname of a server group
:param server_group The server group
:return hostname associated with a server group
"""
if "_hostname_map" not in self.__dict__ or \
self._hostname_map is None or \
len(self._hostname_map) == 0:
# Initialize the hostname map if self._hostname_map is not defined
# in the class object, or is None, or is an empty dictionary
self._hostname_map = self._get_hostname_map()
logger.info("Getting server group {sg} hostname.".format(
sg=server_group))
hostnames = [hostname
for wf_host in self._hostname_map
for wf_server in self._hostname_map[wf_host]
for sg, hostname in self._hostname_map[wf_host][wf_server].items()
if server_group == sg]
logger.info("Server Group {sg} is running on hostnames "
"{hostnames}".format(sg=server_group,
hostnames=", ".join(hostnames)))
return hostnames
def get_application_hostnames(self, application, ext=None):
"""
Get the hostname where an application is deployed
:param application The application
:param ext The application extension e.g. war, jar
:return A list of host names where the application is running
"""
# Let's find the server groups that the apps is deployed against,
# because an app can be deployed to more than one server group
if ext:
application = ".".join([application, ext])
logger.info("Getting hostnames where application {app} is running."
.format(app=application))
# Get a list of deployed all
sg_info = self.get_deployed_apps()
server_groups = [sg
for sg in sg_info
for app in sg_info[sg]
if application in app]
logger.info("Application {app} is deployed in server-groups: "
"{sg}".format(app=application,
sg=", ".join(server_groups)))
hosts_list = set([host
for sg in server_groups
for host in self.get_server_group_host(sg)])
logger.info("Application {app} is deployed on host names: "
"{hosts}".format(app=application,
hosts=", ".join(hosts_list)))
return hosts_list
| 39.696049
| 135
| 0.552374
|
37c9702802e994d72ec588bcb654f21f5a644b98
| 1,232
|
py
|
Python
|
pyxpiral/tests/pyxpiral_test.py
|
elcodedocle/pyxpiral
|
cc2b104b386c3f6d49e28c21e27eaa9996e1938c
|
[
"MIT"
] | null | null | null |
pyxpiral/tests/pyxpiral_test.py
|
elcodedocle/pyxpiral
|
cc2b104b386c3f6d49e28c21e27eaa9996e1938c
|
[
"MIT"
] | null | null | null |
pyxpiral/tests/pyxpiral_test.py
|
elcodedocle/pyxpiral
|
cc2b104b386c3f6d49e28c21e27eaa9996e1938c
|
[
"MIT"
] | null | null | null |
"""
Unitary tests for pyxpiral.py.
:author: elcodedocle
:contact: gael.abadin@gmail.com
"""
# pylint:disable=C0103,C0111,W0212,W0611
import logging
import unittest
from .. import __main__ as pyxpiral
class TestPyxpiral(unittest.TestCase):
"""
Unitary tests for Pyxpiral.
"""
@classmethod
def setUpClass(cls):
'''
Global setUp.
'''
logging.basicConfig(level=logging.INFO)
def setUp(self):
'''
Test setUp.
'''
self.ppl = pyxpiral.Pyxpiral()
self.message = \
"Never go full electro (AKA Keep calm and read bits cycling in squared spirals)."
def test_encode(self):
self.ppl.encode(self.message)
def test_decode(self):
image = self.ppl.encode(self.message, upscale=1)
self.assertEqual(self.ppl.decode(image, downscale=1), self.message)
def test_encode_fractal(self):
images = self.ppl.encode_fractal(self.message, upscale=1)
self.assertEqual(self.ppl.decode(images[0], downscale=1), self.message)
def tearDown(self):
'''
Test tearDown.
'''
@classmethod
def tearDownClass(cls):
'''
Global tearDown.
'''
| 20.881356
| 93
| 0.609578
|
b24738a8fb8ba74cef0887b993642cfca33fecf4
| 23,068
|
py
|
Python
|
exotic_ld/ld_computation.py
|
hrwakeford/ExoTiC-LD
|
5fdd86bcaded787e2e0d9d9c1ca54fa1ceda421a
|
[
"MIT"
] | null | null | null |
exotic_ld/ld_computation.py
|
hrwakeford/ExoTiC-LD
|
5fdd86bcaded787e2e0d9d9c1ca54fa1ceda421a
|
[
"MIT"
] | null | null | null |
exotic_ld/ld_computation.py
|
hrwakeford/ExoTiC-LD
|
5fdd86bcaded787e2e0d9d9c1ca54fa1ceda421a
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
from scipy.io import readsav
from astropy.modeling.fitting import LevMarLSQFitter
from scipy.interpolate import interp1d, splev, splrep
from exotic_ld.ld_laws import quadratic_limb_darkening, \
nonlinear_limb_darkening
class StellarLimbDarkening(object):
"""
Stellar limb darkening class.
Compute the limb darkening coefficients for either 1D or 3D
stellar models. Limb darkening coefficients are available for
linear, quadratic, 3-parameter, and 4-parameter laws.
Parameters
----------
M_H : float
Stellar metallicity [dex].
Teff : float
Stellar effective temperature [kelvin].
logg : float
Stellar log(g) [dex].
ld_model : string, '1D' or '3D'
Use the 1D or 3D stellar models. Default '1D'.
ld_data_path : string
Path to ExoTiC-LD_data directory downloaded from Zenodo. These
data include the stellar models and instrument throughputs. See
the docs for further details.
Methods
-------
compute_linear_ld_coeffs()
compute_quadratic_ld_coeffs()
compute_3_parameter_non_linear_ld_coeffs()
compute_4_parameter_non_linear_ld_coeffs()
Examples
--------
>>> from exotic_ld import StellarLimbDarkening
>>> sld = StellarLimbDarkening(
M_H=0.1, Teff=6045, logg=4.2, ld_model='1D',
ld_data_path='path/to/ExoTiC-LD_data')
>>> c1, c2 = sld.compute_quadratic_ld_coeffs(
wavelength_range=np.array([20000., 30000.]),
mode='JWST_NIRSpec_prism')
"""
def __init__(self, M_H, Teff, logg, ld_model='1D', ld_data_path=''):
self.M_H_input = M_H
self.M_H_matched = None
self.Teff_input = Teff
self.Teff_matched = None
self.logg_input = logg
self.logg_matched = None
self.ld_model = ld_model
self.ld_data_path = ld_data_path
self._stellar_wavelengths = None
self._stellar_fluxes = None
self._mu = None
if ld_model == '1D':
# 1d data structures.
self._match_1d_stellar_model()
elif ld_model == '3D':
# 3d data structures.
self._match_3d_stellar_model()
else:
raise ValueError('ld_model must be either `1D` or `3D`.')
def __repr__(self):
return 'Stellar limb darkening: {} models.'.format(self.ld_model)
def _match_1d_stellar_model(self):
""" Find closest matching 1d stellar model. """
# 1d stellar models directory.
stellar_data_path = os.path.join(self.ld_data_path, 'Kurucz')
stellar_data_index = os.path.join(stellar_data_path, 'kuruczlist.sav')
# Define parameter grids available.
M_H_grid = np.array(
[-0.1, -0.2, -0.3, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0,
-3.5, -4.0, -4.5, -5.0, 0.0, 0.1, 0.2, 0.3, 0.5, 1.0])
Teff_grid = np.array(
[3500, 3750, 4000, 4250, 4500, 4750, 5000,
5250, 5500, 5750, 6000, 6250, 6500])
logg_grid = np.array([4.0, 4.5, 5.0])
# Define corresponding model load positions.
M_H_grid_load = np.array(
[0, 1, 2, 3, 5, 7, 8, 9, 10, 11, 12,
13, 14, 17, 20, 21, 22, 23, 24])
Teff_logg_grid_load = np.array(
[[8, 19, 30, 41, 52, 63, 74, 85, 96, 107, 118, 129, 138],
[9, 20, 31, 42, 53, 64, 75, 86, 97, 108, 119, 129, 139],
[10, 21, 32, 43, 54, 65, 76, 87, 98, 109, 120, 130, 140]])
# Select metallicity, Teff, and logg of stellar model.
matched_M_H_idx = (abs(self.M_H_input - M_H_grid)).argmin()
self.M_H_matched = M_H_grid[matched_M_H_idx]
matched_M_H_load = M_H_grid_load[matched_M_H_idx]
matched_Teff_idx = (abs(self.Teff_input - Teff_grid)).argmin()
self.Teff_matched = Teff_grid[matched_Teff_idx]
matched_logg_idx = (abs(self.logg_input - logg_grid)).argmin()
self.logg_matched = logg_grid[matched_logg_idx]
idl_sf_list = readsav(stellar_data_index)
stellar_model_name = bytes.decode(idl_sf_list['li'][matched_M_H_load])
load_number = Teff_logg_grid_load[matched_logg_idx][matched_Teff_idx]
# Read in the stellar model data.
n_header_rows = 3
n_freq_intervals = 1221
line_skip_data = (load_number + 1) * n_header_rows \
+ load_number * n_freq_intervals
try:
stellar_data = pd.read_fwf(
os.path.join(stellar_data_path, stellar_model_name),
widths=[9, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
header=None, skiprows=line_skip_data, nrows=n_freq_intervals)
except FileNotFoundError as err:
raise FileNotFoundError(
'File {}, corresponding to M_H={}, Teff={}, and logg={} '
'does not exist in the stellar models. \n Please try a '
'different combination of stellar parameters.'.format(
stellar_model_name, self.M_H_matched, self.Teff_matched,
self.logg_matched))
# Unpack the data.
self._stellar_wavelengths = stellar_data[0].values * 10.
self._stellar_fluxes = stellar_data.values.T[1:18]
self._stellar_fluxes[0] /= self._stellar_wavelengths**2
self._stellar_fluxes[1:17] *= self._stellar_fluxes[0] / 100000.
self._mu = np.array(
[1.000, .900, .800, .700, .600, .500, .400, .300, .250,
.200, .150, .125, .100, .075, .050, .025, .010])
def _match_3d_stellar_model(self):
""" Find closest matching 3d stellar model. """
# 3d stellar models directory.
stellar_data_path = os.path.join(self.ld_data_path, '3DGrid')
# Define parameter grids available.
M_H_grid = np.array([-3.0, -2.0, -1.0, 0.0])
Teff_grid = np.array([4000, 4500, 5000, 5500, 5777, 6000, 6500, 7000])
logg_grid = np.array([[1.5, 2.0, 2.5],
[2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0],
[2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0],
[3.0, 3.5, 4.0, 4.5, 5.0],
[4.4],
[3.5, 4.0, 4.5],
[4.0, 4.5],
[4.5]], dtype=object)
# Define corresponding model load positions.
M_H_grid_load = np.array(['30', '20', '10', '00'])
Teff_grid_load = np.array(['40', '45', '50', '55', '5777',
'60', '65', '70'])
logg_grid_load = np.array([['15', '20', '25'],
['20', '25', '30', '35', '40', '45', '50'],
['20', '25', '30', '35', '40', '45', '50'],
['30', '35', '40', '45', '50'],
['44'],
['35', '40', '45'],
['40', '45'],
['45']], dtype=object)
# Select metallicity, Teff, and logg of stellar model.
matched_M_H_idx = (abs(self.M_H_input - M_H_grid)).argmin()
self.M_H_matched = M_H_grid[matched_M_H_idx]
matched_Teff_idx = (abs(self.Teff_input - Teff_grid)).argmin()
self.Teff_matched = Teff_grid[matched_Teff_idx]
matched_logg_idx = (abs(
self.logg_input - np.array(logg_grid[matched_Teff_idx]))).argmin()
self.logg_matched = logg_grid[matched_Teff_idx][matched_logg_idx]
load_file = 'mmu_t' + Teff_grid_load[matched_Teff_idx] \
+ 'g' + logg_grid_load[matched_Teff_idx][matched_logg_idx] \
+ 'm' + M_H_grid_load[matched_M_H_idx] + 'v05.flx'
# Read in the stellar model data.
try:
sav = readsav(os.path.join(stellar_data_path, load_file))
except FileNotFoundError as err:
raise FileNotFoundError(
'File {}, corresponding to M_H={}, Teff={}, and logg={} '
'does not exist in the stellar models. \n Please try a '
'different combination of stellar parameters.'.format(
load_file, self.M_H_matched, self.Teff_matched,
self.logg_matched))
# Unpack the data.
self._stellar_wavelengths = sav['mmd'].lam[0]
self._stellar_fluxes = np.array(sav['mmd'].flx.tolist())
self._mu = sav['mmd'].mu
def compute_linear_ld_coeffs(self, wavelength_range, mode,
custom_wavelengths=None,
custom_throughput=None):
"""
Compute the linear limb-darkening coefficients.
Parameters
----------
wavelength_range : array_like, (start, end)
Wavelength range over which to compute the limb-darkening
coefficients. Wavelengths must be given in angstroms and
the values must fall within the supported range of the
corresponding instrument mode.
mode : string
Instrument mode which defines the throughput.
Modes supported for Hubble:
'HST_STIS_G430L', 'HST_STIS_G750L', 'HST_WFC3_G280p1',
'HST_WFC3_G280n1', 'HST_WFC3_G102', 'HST_WFC3_G141'.
Modes supported for JWST:
'JWST_NIRSpec_Prism', 'JWST_NIRSpec_G395H',
'JWST_NIRSpec_G395M', 'JWST_NIRSpec_G235H',
'JWST_NIRSpec_G235M', 'JWST_NIRSpec_G140H-f100',
'JWST_NIRSpec_G140M-f100', 'JWST_NIRSpec_G140H-f070',
'JWST_NIRSpec_G140M-f070', 'JWST_NIRISS_SOSSo1',
'JWST_NIRISS_SOSSo2', 'JWST_NIRCam_F322W2',
'JWST_NIRCam_F444', 'JWST_MIRI_LRS'.
Modes for photometry:
'Spitzer_IRAC_Ch1', 'Spitzer_IRAC_Ch2', 'TESS'.
Alternatively, use 'custom' mode. In this case the custom
wavelength and custom throughput must also be specified.
custom_wavelengths : array_like, optional
Wavelengths corresponding to custom_throughput [angstroms].
custom_throughput : array_like, optional
Throughputs corresponding to custom_wavelengths.
Returns
-------
(c1, ) : tuple
Limb-darkening coefficients for the linear law.
"""
# Compute the stellar limb-darkening.
mu, intensity = self._limb_dark_fit(wavelength_range, mode,
custom_wavelengths,
custom_throughput)
# Fit linear limb-darkening law.
fitter = LevMarLSQFitter()
linear = nonlinear_limb_darkening()
linear.c0.fixed = True
linear.c2.fixed = True
linear.c3.fixed = True
linear = fitter(linear, mu, intensity)
return (linear.c1.value, )
def compute_quadratic_ld_coeffs(self, wavelength_range, mode,
custom_wavelengths=None,
custom_throughput=None):
"""
Compute the quadratic limb-darkening coefficients.
Parameters
----------
wavelength_range : array_like, (start, end)
Wavelength range over which to compute the limb-darkening
coefficients. Wavelengths must be given in angstroms and
the values must fall within the supported range of the
corresponding instrument mode.
mode : string
Instrument mode which defines the throughput.
Modes supported for Hubble:
'HST_STIS_G430L', 'HST_STIS_G750L', 'HST_WFC3_G280p1',
'HST_WFC3_G280n1', 'HST_WFC3_G102', 'HST_WFC3_G141'.
Modes supported for JWST:
'JWST_NIRSpec_Prism', 'JWST_NIRSpec_G395H',
'JWST_NIRSpec_G395M', 'JWST_NIRSpec_G235H',
'JWST_NIRSpec_G235M', 'JWST_NIRSpec_G140H-f100',
'JWST_NIRSpec_G140M-f100', 'JWST_NIRSpec_G140H-f070',
'JWST_NIRSpec_G140M-f070', 'JWST_NIRISS_SOSSo1',
'JWST_NIRISS_SOSSo2', 'JWST_NIRCam_F322W2',
'JWST_NIRCam_F444', 'JWST_MIRI_LRS'.
Modes for photometry:
'Spitzer_IRAC_Ch1', 'Spitzer_IRAC_Ch2', 'TESS'.
Alternatively, use 'custom' mode. In this case the custom
wavelength and custom throughput must also be specified.
custom_wavelengths : array_like, optional
Wavelengths corresponding to custom_throughput [angstroms].
custom_throughput : array_like, optional
Throughputs corresponding to custom_wavelengths.
Returns
-------
(c1, c2) : tuple
Limb-darkening coefficients for the quadratic law.
"""
# Compute the stellar limb-darkening.
mu, intensity = self._limb_dark_fit(wavelength_range, mode,
custom_wavelengths,
custom_throughput)
# Fit linear limb-darkening law.
fitter = LevMarLSQFitter()
quadratic = quadratic_limb_darkening()
quadratic = fitter(quadratic, mu, intensity)
return quadratic.parameters
def compute_3_parameter_non_linear_ld_coeffs(self, wavelength_range, mode,
custom_wavelengths=None,
custom_throughput=None):
"""
Compute the three-parameter non-linear limb-darkening coefficients.
Parameters
----------
wavelength_range : array_like, (start, end)
Wavelength range over which to compute the limb-darkening
coefficients. Wavelengths must be given in angstroms and
the values must fall within the supported range of the
corresponding instrument mode.
mode : string
Instrument mode which defines the throughput.
Modes supported for Hubble:
'HST_STIS_G430L', 'HST_STIS_G750L', 'HST_WFC3_G280p1',
'HST_WFC3_G280n1', 'HST_WFC3_G102', 'HST_WFC3_G141'.
Modes supported for JWST:
'JWST_NIRSpec_Prism', 'JWST_NIRSpec_G395H',
'JWST_NIRSpec_G395M', 'JWST_NIRSpec_G235H',
'JWST_NIRSpec_G235M', 'JWST_NIRSpec_G140H-f100',
'JWST_NIRSpec_G140M-f100', 'JWST_NIRSpec_G140H-f070',
'JWST_NIRSpec_G140M-f070', 'JWST_NIRISS_SOSSo1',
'JWST_NIRISS_SOSSo2', 'JWST_NIRCam_F322W2',
'JWST_NIRCam_F444', 'JWST_MIRI_LRS'.
Modes for photometry:
'Spitzer_IRAC_Ch1', 'Spitzer_IRAC_Ch2', 'TESS'.
Alternatively, use 'custom' mode. In this case the custom
wavelength and custom throughput must also be specified.
custom_wavelengths : array_like, optional
Wavelengths corresponding to custom_throughput [angstroms].
custom_throughput : array_like, optional
Throughputs corresponding to custom_wavelengths.
Returns
-------
(c1, c2, c3) : tuple
Limb-darkening coefficients for the three-parameter
non-linear law.
"""
# Compute the stellar limb-darkening.
mu, intensity = self._limb_dark_fit(wavelength_range, mode,
custom_wavelengths,
custom_throughput)
# Fit linear limb-darkening law.
fitter = LevMarLSQFitter()
corot_3_param = nonlinear_limb_darkening()
corot_3_param.c0.fixed = True
corot_3_param = fitter(corot_3_param, mu, intensity)
return corot_3_param.parameters[1:]
def compute_4_parameter_non_linear_ld_coeffs(self, wavelength_range, mode,
custom_wavelengths=None,
custom_throughput=None):
"""
Compute the four-parameter non-linear limb-darkening coefficients.
Parameters
----------
wavelength_range : array_like, (start, end)
Wavelength range over which to compute the limb-darkening
coefficients. Wavelengths must be given in angstroms and
the values must fall within the supported range of the
corresponding instrument mode.
mode : string
Instrument mode which defines the throughput.
Modes supported for Hubble:
'HST_STIS_G430L', 'HST_STIS_G750L', 'HST_WFC3_G280p1',
'HST_WFC3_G280n1', 'HST_WFC3_G102', 'HST_WFC3_G141'.
Modes supported for JWST:
'JWST_NIRSpec_Prism', 'JWST_NIRSpec_G395H',
'JWST_NIRSpec_G395M', 'JWST_NIRSpec_G235H',
'JWST_NIRSpec_G235M', 'JWST_NIRSpec_G140H-f100',
'JWST_NIRSpec_G140M-f100', 'JWST_NIRSpec_G140H-f070',
'JWST_NIRSpec_G140M-f070', 'JWST_NIRISS_SOSSo1',
'JWST_NIRISS_SOSSo2', 'JWST_NIRCam_F322W2',
'JWST_NIRCam_F444', 'JWST_MIRI_LRS'.
Modes for photometry:
'Spitzer_IRAC_Ch1', 'Spitzer_IRAC_Ch2', 'TESS'.
Alternatively, use 'custom' mode. In this case the custom
wavelength and custom throughput must also be specified.
custom_wavelengths : array_like, optional
Wavelengths corresponding to custom_throughput [angstroms].
custom_throughput : array_like, optional
Throughputs corresponding to custom_wavelengths.
Returns
-------
(c1, c2, c3, c4) : tuple
Limb-darkening coefficients for the four-parameter
non-linear law.
"""
# Compute the stellar limb-darkening.
mu, intensity = self._limb_dark_fit(wavelength_range, mode,
custom_wavelengths,
custom_throughput)
# Fit linear limb-darkening law.
fitter = LevMarLSQFitter()
corot_4_param = nonlinear_limb_darkening()
corot_4_param = fitter(corot_4_param, mu, intensity)
return corot_4_param.parameters
def _limb_dark_fit(self, wavelength_range, mode, custom_wavelengths,
custom_throughput):
""" Compute stellar limb-darkening coefficients. """
if mode == 'custom':
# Custom throughput provided.
sen_wavelengths = custom_wavelengths
sen_throughputs = custom_throughput
else:
# Read in mode specific throughput.
sen_wavelengths, sen_throughputs = \
self._read_throughput_data(mode)
# Pad arrays.
sen_wavelengths = self._pad_array(
sen_wavelengths,
[sen_wavelengths[0] - 2., sen_wavelengths[0] - 1.],
[sen_wavelengths[-1] + 1., sen_wavelengths[-1] + 2.])
sen_throughputs = self._pad_array(
sen_throughputs, [0., 0.], [0., 0.])
bin_wavelengths = self._pad_array(
wavelength_range,
[wavelength_range[0] - 2., wavelength_range[0] - 1.],
[wavelength_range[-1] + 1., wavelength_range[-1] + 2.])
# Interpolate throughput onto stellar model wavelengths.
interpolator = interp1d(sen_wavelengths, sen_throughputs,
bounds_error=False, fill_value=0)
sen_interp = interpolator(self._stellar_wavelengths)
# Interpolate bin mask onto stellar model wavelengths.
bin_mask = np.zeros(bin_wavelengths.shape[0])
bin_mask[2:-2] = 1.
interpolator = interp1d(bin_wavelengths, bin_mask,
bounds_error=False, fill_value=0)
bin_mask_interp = interpolator(self._stellar_wavelengths)
if np.all(bin_mask_interp == 0):
# High resolution, mask interpolated to nothing.
# Select nearest point in stellar wavelength grid.
mid_bin_wavelengths = np.mean(bin_wavelengths)
nearest_stellar_wavelength_idx = (
abs(mid_bin_wavelengths - self._stellar_wavelengths)).argmin()
bin_mask_interp[nearest_stellar_wavelength_idx] = 1.
# Integrate per mu over spectra computing synthetic photometric points.
phot = np.zeros(self._stellar_fluxes.shape[0])
f = self._stellar_wavelengths * sen_interp * bin_mask_interp
tot = self._int_tabulated(self._stellar_wavelengths, f)
if tot == 0.:
raise ValueError(
'Input wavelength range {}-{} does not overlap with instrument '
'mode {} with range {}-{}.'.format(
wavelength_range[0], wavelength_range[-1], mode,
sen_wavelengths[0], sen_wavelengths[-1]))
for i in range(self._mu.shape[0]):
f_cal = self._stellar_fluxes[i, :]
phot[i] = self._int_tabulated(
self._stellar_wavelengths, f * f_cal, sort=True) / tot
if self.ld_model == '1D':
yall = phot / phot[0]
elif self.ld_model == '3D':
yall = phot / phot[10]
return self._mu[1:], yall[1:]
def _read_throughput_data(self, mode):
""" Read in throughput data. """
sensitivity_file = os.path.join(
self.ld_data_path,
'Sensitivity_files/{}_throughput.csv'.format(mode))
sensitivity_data = pd.read_csv(sensitivity_file)
sensitivity_wavelengths = sensitivity_data['wave'].values
sensitivity_throughputs = sensitivity_data['tp'].values
return sensitivity_wavelengths, sensitivity_throughputs
def _pad_array(self, array, values_start, values_end):
""" Pad array with values. """
array = np.concatenate(
(np.array(values_start), array, np.array(values_end)))
return array
def _int_tabulated(self, X, F, sort=False):
Xsegments = len(X) - 1
# Sort vectors into ascending order.
if not sort:
ii = np.argsort(X)
X = X[ii]
F = F[ii]
while (Xsegments % 4) != 0:
Xsegments = Xsegments + 1
Xmin = np.min(X)
Xmax = np.max(X)
# Uniform step size.
h = (Xmax + 0.0 - Xmin) / Xsegments
# Compute the interpolates at Xgrid.
# x values of interpolates >> Xgrid = h * FINDGEN(Xsegments + 1L)+Xmin
z = splev(h * np.arange(Xsegments + 1) + Xmin, splrep(X, F))
# Compute the integral using the 5-point Newton-Cotes formula.
ii = (np.arange((len(z) - 1) / 4, dtype=int) + 1) * 4
return np.sum(2.0 * h * (7.0 * (z[ii - 4] + z[ii])
+ 32.0 * (z[ii - 3] + z[ii - 1])
+ 12.0 * z[ii - 2]) / 45.0)
| 43.037313
| 80
| 0.577597
|
97c27953832563cfd06be77b30fe4c7052ef111e
| 4,549
|
py
|
Python
|
molteniron/tests/testDoClean.py
|
openstack/molteniron
|
348138603465cd4bfecfa8664435851225af29ef
|
[
"Apache-2.0"
] | 8
|
2016-11-20T08:00:59.000Z
|
2019-01-28T22:04:40.000Z
|
molteniron/tests/testDoClean.py
|
openstack/molteniron
|
348138603465cd4bfecfa8664435851225af29ef
|
[
"Apache-2.0"
] | null | null | null |
molteniron/tests/testDoClean.py
|
openstack/molteniron
|
348138603465cd4bfecfa8664435851225af29ef
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Tests the MoltenIron doClean command.
"""
# Copyright (c) 2016 IBM Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable-msg=C0103
import argparse
import os
import sys
from pkg_resources import resource_filename
import yaml
from molteniron import moltenirond
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Molteniron CLI tool")
parser.add_argument("-c",
"--conf-dir",
action="store",
type=str,
dest="conf_dir",
help="The directory where configuration is stored")
args = parser.parse_args(sys.argv[1:])
if args.conf_dir:
if not os.path.isdir(args.conf_dir):
msg = "Error: %s is not a valid directory" % (args.conf_dir, )
print(msg, file=sys.stderr)
sys.exit(1)
YAML_CONF = os.path.realpath("%s/conf.yaml" % (args.conf_dir, ))
else:
YAML_CONF = resource_filename("molteniron", "conf.yaml")
with open(YAML_CONF, "r") as fobj:
conf = yaml.load(fobj, Loader=yaml.SafeLoader)
request1 = {
"name": "pkvmci816",
"ipmi_ip": "10.228.219.134",
"status": "ready",
"provisioned": "",
"timestamp": "",
"allocation_pool": "10.228.112.10,10.228.112.11"
}
node1 = {
"ipmi_user": "user",
"ipmi_password": "e05cc5f061426e34",
"port_hwaddr": "f8:de:29:33:a4:ed",
"cpu_arch": "ppc64el",
"cpus": 20,
"ram_mb": 51000,
"disk_gb": 500
}
request2 = {
"name": "pkvmci818",
"ipmi_ip": "10.228.219.133",
"status": "ready",
"provisioned": "",
"timestamp": "",
"allocation_pool": "10.228.112.8,10.228.112.9"
}
node2 = {
"ipmi_user": "user",
"ipmi_password": "0614d63b6635ea3d",
"port_hwaddr": "4c:c5:da:28:2c:2d",
"cpu_arch": "ppc64el",
"cpus": 20,
"ram_mb": 51000,
"disk_gb": 500
}
request3 = {
"name": "pkvmci851",
"ipmi_ip": "10.228.118.129",
"status": "used",
"provisioned": "7a72eccd-3153-4d08-9848-c6d3b1f18f9f",
"timestamp": "1460489832",
"allocation_pool": "10.228.112.12,10.228.112.13"
}
node3 = {
"ipmi_user": "user",
"ipmi_password": "928b056134e4d770",
"port_hwaddr": "53:76:c6:09:50:64",
"cpu_arch": "ppc64el",
"cpus": 20,
"ram_mb": 51000,
"disk_gb": 500
}
request4 = {
"name": "pkvmci853",
"ipmi_ip": "10.228.118.133",
"status": "used",
"provisioned": "6b8823ef-3e14-4811-98b9-32e27397540d",
"timestamp": "1460491566",
"allocation_pool": "10.228.112.14,10.228.112.15"
}
node4 = {
"ipmi_user": "user",
"ipmi_password": "33f448a4fc176492",
"port_hwaddr": "85:e0:73:e9:fc:ca",
"cpu_arch": "ppc64el",
"cpus": 20,
"ram_mb": 51000,
"disk_gb": 500
}
# 8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----
database = moltenirond.DataBase(conf, moltenirond.TYPE_SQLITE_MEMORY)
ret = database.addBMNode(request1, node1)
print(ret)
assert ret == {'status': 200}
ret = database.addBMNode(request2, node2)
print(ret)
assert ret == {'status': 200}
ret = database.addBMNode(request3, node3)
print(ret)
assert ret == {'status': 200}
ret = database.addBMNode(request4, node4)
print(ret)
assert ret == {'status': 200}
ret = database.doClean(1)
print(ret)
assert ret == {'status': 400, 'message': 'The node at 1 has status ready'}
ret = database.doClean(2)
print(ret)
assert ret == {'status': 400, 'message': 'The node at 2 has status ready'}
ret = database.doClean(3)
print(ret)
assert ret == {'status': 200}
ret = database.doClean(4)
print(ret)
assert ret == {'status': 200}
| 28.974522
| 78
| 0.568037
|
efa3c386858cc8c37ec9479756f312faa0b14244
| 2,049
|
py
|
Python
|
sparse_data/exp_framework/random_forest.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
sparse_data/exp_framework/random_forest.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
sparse_data/exp_framework/random_forest.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train and evaluate random forest classifiers.
Train and evaluate random forest classification models using scikit-learn.
Perform parameter tuning with grid search.
"""
from sklearn import ensemble
from sparse_data.exp_framework.utils import generic_pipeline
def pipeline(x_train,
y_train,
x_test,
y_test,
param_dict=None,
problem='classification'):
"""Trains and evaluates a random forest classifier.
Args:
x_train: np.array or scipy.sparse.*matrix array of features of training data
y_train: np.array 1-D array of class labels of training data
x_test: np.array or scipy.sparse.*matrix array of features of test data
y_test: np.array 1-D array of class labels of the test data
param_dict: {string: ?} dictionary of parameters and their values
problem: string type of learning problem; values = 'classification',
'regression'
Returns:
model: sklearn.ensemble.RandomForestClassifier
trained random forest model
metrics: {str: float}
dictionary of metric scores
"""
assert problem in ['classification', 'regression']
if param_dict is None:
param_dict = {}
if problem == 'regression':
model = ensemble.RandomForestRegressor(**param_dict)
else:
model = ensemble.RandomForestClassifier(**param_dict)
return generic_pipeline(
model, x_train, y_train, x_test, y_test, problem=problem)
| 33.048387
| 80
| 0.725232
|
c950a29e711dec81dd7c9eae775b8480d1417be4
| 2,439
|
py
|
Python
|
app/crud/user.py
|
shiniao/soul-api
|
1438281c2dce237d735f7309c2ddb606c8d01e1e
|
[
"Apache-2.0"
] | 1
|
2021-02-27T09:05:40.000Z
|
2021-02-27T09:05:40.000Z
|
app/crud/user.py
|
shiniao/soulapi
|
1438281c2dce237d735f7309c2ddb606c8d01e1e
|
[
"Apache-2.0"
] | null | null | null |
app/crud/user.py
|
shiniao/soulapi
|
1438281c2dce237d735f7309c2ddb606c8d01e1e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Union, Dict, Any
from sqlalchemy.orm import Session
from .base import CRUDBase
from ..models.user import User
from ..schemas.user import UserCreate, UserUpdate
from ..utils import get_hashed_password, verify_password
class CRUDUser(CRUDBase[User, UserCreate, UserUpdate]):
"""crud for user"""
def get_by_email(self, db: Session, *, email: str) -> Optional[User]:
return db.query(User).filter(User.email == email).first()
def create(self, db: Session, *, obj: UserCreate, **kwargs) -> User:
"""create normal user"""
db_user = User(
email=obj.email,
hashed_password=get_hashed_password(obj.password),
full_name=obj.full_name,
created_at=obj.created_at,
updated_at=obj.updated_at,
)
if kwargs.get("is_superuser"):
db_user.is_superuser = True
if kwargs.get("is_confirm"):
db_user.is_confirm = True
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def create_superuser(self, db: Session, *, obj: UserCreate) -> User:
"""create super user"""
db_superuser = User(
email=obj.email,
hashed_password=get_hashed_password(obj.password),
full_name=obj.full_name,
is_superuser=True,
is_confirm=True,
created_at=obj.created_at,
updated_at=obj.updated_at,
)
db.add(db_superuser)
db.commit()
db.refresh(db_superuser)
return db_superuser
def update(
self, db: Session, *, db_obj: UserUpdate, obj: Union[UserUpdate, Dict[str, Any]]
) -> User:
if isinstance(obj, dict):
update_data = obj
else:
update_data = obj.dict(exclude_unset=True)
if update_data.get("password"):
hashed_password = get_hashed_password(update_data["password"])
del update_data["password"]
update_data["hashed_password"] = hashed_password
return super().update(db, db_obj=db_obj, obj=obj)
def authenticate(self, db: Session, *, email: str, password: str) -> Optional[User]:
user = self.get_by_email(db, email=email)
if not user:
return None
if not verify_password(password, user.hashed_password):
return None
return user
user = CRUDUser(User)
| 31.269231
| 88
| 0.612136
|
b804a71e19438be27a6781c6296e947462d84d4c
| 2,173
|
py
|
Python
|
scipy/sparse/csgraph/tests/test_spanning_tree.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | 1
|
2018-10-04T15:34:14.000Z
|
2018-10-04T15:34:14.000Z
|
scipy/sparse/csgraph/tests/test_spanning_tree.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/sparse/csgraph/tests/test_spanning_tree.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test the minimum spanning tree function"""
import numpy as np
from numpy.testing import assert_
import numpy.testing as npt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def test_minimum_spanning_tree():
# Create a graph with two connected components.
graph = [[0,1,0,0,0],
[1,0,0,0,0],
[0,0,0,8,5],
[0,0,8,0,1],
[0,0,5,1,0]]
graph = np.asarray(graph)
# Create the expected spanning tree.
expected = [[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,0,5],
[0,0,0,0,1],
[0,0,0,0,0]]
expected = np.asarray(expected)
# Ensure minimum spanning tree code gives this expected output.
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
# Ensure that the original graph was not modified.
npt.assert_array_equal(csgraph.todense(), graph,
'Original graph was modified.')
# Now let the algorithm modify the csgraph in place.
mintree = minimum_spanning_tree(csgraph, overwrite=True)
npt.assert_array_equal(mintree.todense(), expected,
'Graph was not properly modified to contain MST.')
np.random.seed(1234)
for N in (5, 10, 15, 20):
# Create a random graph.
graph = 3 + np.random.random((N, N))
csgraph = csr_matrix(graph)
# The spanning tree has at most N - 1 edges.
mintree = minimum_spanning_tree(csgraph)
assert_(mintree.nnz < N)
# Set the sub diagonal to 1 to create a known spanning tree.
idx = np.arange(N-1)
graph[idx,idx+1] = 1
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
# We expect to see this pattern in the spanning tree and otherwise
# have this zero.
expected = np.zeros((N, N))
expected[idx, idx+1] = 1
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
| 33.953125
| 74
| 0.601933
|
b1e7cf54220e257e24712c4f5743a39afd050ea4
| 8,933
|
py
|
Python
|
prody/__init__.py
|
kaynakb/ProDy
|
4366ad28142f51ff8a84f8a0f4ce659c0b949d55
|
[
"MIT"
] | null | null | null |
prody/__init__.py
|
kaynakb/ProDy
|
4366ad28142f51ff8a84f8a0f4ce659c0b949d55
|
[
"MIT"
] | null | null | null |
prody/__init__.py
|
kaynakb/ProDy
|
4366ad28142f51ff8a84f8a0f4ce659c0b949d55
|
[
"MIT"
] | null | null | null |
"""ProDy is a package for Protein Dynamics, Sequence, and Structure Analysis"""
__version__ = '2.0'
__release__ = __version__ # + '-dev' # comment out '-dev' before a release
import sys
import warnings
if sys.version_info[:2] < (2, 7):
sys.stderr.write('Python 2.6 and older is not supported\n')
sys.exit()
if sys.version_info[0] == 3:
if sys.version_info[1] < 4:
sys.stderr.write('Python 3.4 and older is not supported\n')
sys.exit()
try:
import numpy as np
except ImportError:
raise ImportError('Numpy is a required package for ProDy')
else:
if tuple(map(int, np.__version__.split('.')[:2])) < (1, 10):
raise ImportError('Numpy v1.10 or later is required for ProDy')
try:
import scipy
except ImportError:
raise ImportError('Scipy is a required package for ProDy')
DEPRECATION_WARNINGS = False
def deprecate(dep, alt, ver=None, sl=3):
"""Issue a deprecation warning for *dep* and recommend using *alt*."""
if ver is None:
ver = list(__version__.split('.')[:2])
ver[1] = str(int(ver[1]) + 1)
ver = '.'.join(ver)
warnings.warn('`{0:s}` is deprecated for removal in v{1:s}, use `{2:s}`.'
.format(dep, ver, alt), DeprecationWarning, stacklevel=sl)
def turnonDeprecationWarnings(action='always'):
"""Turn on deprecation warnings for the current session. By default
(``action='always'``), deprecation warnings will be printed every time
a function is called to help identification of multiple occurrence
of deprecated function and method names. When ``action='default'``
is passed, warning will be issued at the first call of a function.
The latter behavior will automatically kick in when v0.9 is released.
Until v0.9 is released, restarting the session will turn of warnings.
This function must be called as ``prody.turnonDeprecationWarnings``."""
global DEPRECATION_WARNINGS
DEPRECATION_WARNINGS = True
warnings.filterwarnings(action, category=DeprecationWarning)
_PY3K = PY3K = sys.version_info[0] > 2
PY2K = not PY3K
__all__ = ['checkUpdates', 'confProDy', 'startLogfile', 'closeLogfile', 'plog']
from . import utilities
from .utilities import *
from .utilities import LOGGER, PackageSettings
from .utilities import getPackagePath, joinRepr, tabulate
__all__.extend(utilities.__all__)
__all__.append('utilities')
SETTINGS = PackageSettings('prody', logger=LOGGER)
SETTINGS.load()
from . import kdtree
from .kdtree import *
__all__.extend(kdtree.__all__)
__all__.append('kdtree')
from . import atomic
from .atomic import *
__all__.extend(atomic.__all__)
__all__.append('atomic')
from .atomic import SELECT
from . import proteins
from .proteins import *
__all__.extend(proteins.__all__)
__all__.append('proteins')
from . import compounds
from .compounds import *
__all__.extend(compounds.__all__)
__all__.append('compounds')
from . import measure
from .measure import *
__all__.extend(measure.__all__)
__all__.append('measure')
from . import database
from .database import *
__all__.extend(database.__all__)
__all__.append('database')
from . import sequence
from .sequence import *
__all__.extend(sequence.__all__)
__all__.append('sequence')
from . import dynamics
from .dynamics import *
__all__.extend(dynamics.__all__)
__all__.append('dynamics')
from . import ensemble
from .ensemble import *
__all__.extend(ensemble.__all__)
__all__.append('ensemble')
from . import trajectory
from .trajectory import *
__all__.extend(trajectory.__all__)
__all__.append('trajectory')
from . import chromatin
from .chromatin import *
__all__.extend(chromatin.__all__)
__all__.append('chromatin')
from . import domain_decomposition
from .domain_decomposition import *
__all__.extend(domain_decomposition.__all__)
__all__.append('domain_decomposition')
#from . import comd
#from .comd import *
#__all__.extend(comd.__all__)
#__all__.append('comd')
import prody
__all__.append('prody')
# default, acceptable values, setter
CONFIGURATION = {
'backup': (False, None, None),
'backup_ext': ('.BAK', None, None),
'auto_show': (False, None, None),
'ligand_xml_save': (False, None, None),
'typo_warnings': (True, None, None),
'check_updates': (0, None, None),
'auto_secondary': (False, None, None),
'auto_bonds': (False, None, None),
'selection_warning': (True, None, None),
'verbosity': ('debug', list(utilities.LOGGING_LEVELS),
LOGGER._setverbosity),
'pdb_mirror_path': ('', None, proteins.pathPDBMirror),
'local_pdb_folder': ('', None, proteins.pathPDBFolder),
}
def confProDy(*args, **kwargs):
"""Configure ProDy."""
if args:
values = []
for option in args:
try:
values.append(SETTINGS[option])
except KeyError:
raise KeyError('{0:s} is not a valid configuration option'
.format(repr(option)))
if len(values) == 1:
return values[0]
else:
return values
for option, value in kwargs.items():
try:
default, acceptable, setter = CONFIGURATION[option]
except KeyError:
raise KeyError('{0:s} is not a valid configuration option'
.format(repr(option)))
else:
try:
value = type(default)(value)
except ValueError:
raise TypeError('{0:s} must be a {1:s}'
.format(option, type(default).__name__))
if acceptable is not None and value not in acceptable:
raise ValueError('{0:s} must be one of {1:s}'.format(option,
joinRepr(acceptable, sort=True,
last=', or ')))
SETTINGS[option] = value
LOGGER.info('ProDy is configured: {0:s}={1:s}'
.format(option, repr(value)))
SETTINGS.save()
if setter is not None:
setter(value)
_keys = list(CONFIGURATION)
_keys.sort()
_vals = []
for _key in _keys:
default, acceptable, setter = CONFIGURATION[_key]
try:
if not setter.func_name.startswith('_'):
seealso = ' See also :func:`.' + setter.func_name + '`.'
except AttributeError:
seealso = ''
if acceptable is None:
_vals.append(repr(default) + seealso)
else:
_vals.append(repr(default) + ' (' +
joinRepr(acceptable, sort=True, last=', or ') + ')' +
seealso)
if _key not in SETTINGS:
SETTINGS[_key] = default
LOGGER._setverbosity(confProDy('verbosity'))
confProDy.__doc__ += '\n\n' + tabulate(['Option'] + _keys,
['Default (acceptable values)'] + _vals
) + """
Usage example::
confProDy('backup')
confProDy('backup', 'backup_ext')
confProDy(backup=True, backup_ext='.bak')
confProDy(backup_ext='.BAK')"""
def plog(*text):
"""Log *text* using ProDy logger at log level info. Multiple arguments
are accepted. Each argument will be converted to string and joined using
a white space as delimiter."""
LOGGER.info(' '.join([str(s) for s in text]))
def startLogfile(filename, **kwargs):
LOGGER.start(filename, **kwargs)
startLogfile.__doc__ = LOGGER.start.__doc__
def closeLogfile(filename):
"""Close logfile with *filename*."""
LOGGER.close(filename)
def checkUpdates():
"""Check PyPI to see if there is a newer ProDy version available. Setting
ProDy configuration parameter *check_updates* to a positive integer will
make ProDy automatically check updates, e.g.::
confProDy(check_updates=7) # check at most once a week
confProDy(check_updates=0) # do not auto check updates
confProDy(check_updates=-1) # check at the start of every session"""
pypi_url = 'https://pypi.python.org/pypi'
if PY3K:
import xmlrpc.client
pypi = xmlrpc.client.Server(pypi_url)
else:
import xmlrpclib
pypi = xmlrpclib.Server(pypi_url)
releases = pypi.package_releases('ProDy')
if releases[0] == __version__:
LOGGER.info('You are using the latest ProDy release (v{0:s}).'
.format(__version__))
else:
LOGGER.info('ProDy v{0:s} is available, you are using {1:s}.'
.format(releases[0], __version__))
if SETTINGS['check_updates']:
import time
SETTINGS['last_check'] = time.time()
SETTINGS.save()
if SETTINGS['check_updates']:
if SETTINGS.get('last_check') is None:
SETTINGS['last_check'] = 0
import time
if ((time.time() - SETTINGS.get('last_check')) / 3600 / 24 >
SETTINGS['check_updates']):
LOGGER.info('Checking PyPI for ProDy updates:')
checkUpdates()
| 30.077441
| 79
| 0.643121
|
ad3812f4983c2412314f0f3e88e910b35591f22a
| 9,996
|
py
|
Python
|
tensorflow_probability/python/distributions/geometric.py
|
mjul/tensorflow-probability
|
c733f06bccceb983f3e9db8e6e3c98b3bd4d23c9
|
[
"Apache-2.0"
] | 1
|
2021-06-16T20:06:04.000Z
|
2021-06-16T20:06:04.000Z
|
tensorflow_probability/python/distributions/geometric.py
|
mjul/tensorflow-probability
|
c733f06bccceb983f3e9db8e6e3c98b3bd4d23c9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/geometric.py
|
mjul/tensorflow-probability
|
c733f06bccceb983f3e9db8e6e3c98b3bd4d23c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
def __init__(self,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name='Geometric'):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
if (probs is None) == (logits is None):
raise ValueError('Must pass probs or logits, but not both.')
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([logits, probs], dtype_hint=tf.float32)
self._probs = tensor_util.convert_nonref_to_tensor(
probs, dtype=dtype, name='probs')
self._logits = tensor_util.convert_nonref_to_tensor(
logits, dtype=dtype, name='logits')
super(Geometric, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _params_event_ndims(cls):
return dict(logits=0, probs=0)
@property
def logits(self):
"""Input argument `logits`."""
return self._logits
@property
def probs(self):
"""Input argument `probs`."""
return self._probs
def _batch_shape_tensor(self):
x = self._probs if self._logits is None else self._logits
return tf.shape(x)
def _batch_shape(self):
x = self._probs if self._logits is None else self._logits
return x.shape
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use
# `np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny`
# because it is the smallest, positive, 'normal' number. A 'normal' number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
probs = self._probs_parameter_no_checks()
sampled = tf.random.uniform(
tf.concat([[n], tf.shape(probs)], 0),
minval=np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return tf.floor(tf.math.log(sampled) / tf.math.log1p(-probs))
def _log_survival_function(self, x):
probs = self._probs_parameter_no_checks()
if not self.validate_args:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = tf.floor(x)
return tf.where(
x < 0.,
dtype_util.as_numpy_dtype(x.dtype)(-np.inf),
(1. + x) * tf.math.log1p(-probs))
def _log_cdf(self, x):
probs = self._probs_parameter_no_checks()
if not self.validate_args:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = tf.floor(x)
return tf.where(
x < 0.,
dtype_util.as_numpy_dtype(x.dtype)(-np.inf),
tfp_math.log1mexp((1. + x) * tf.math.log1p(-probs)))
def _log_prob(self, x):
probs = self._probs_parameter_no_checks()
if not self.validate_args:
# For consistency with cdf, we take the floor.
x = tf.floor(x)
return tf.math.xlog1py(x, -probs) + tf.math.log(probs)
def _entropy(self):
logits, probs = self._logits_and_probs_no_checks()
if not self.validate_args:
assertions = []
else:
assertions = [assert_util.assert_less(
probs, dtype_util.as_numpy_dtype(self.dtype)(1.),
message='Entropy is undefined when logits = inf or probs = 1.')]
with tf.control_dependencies(assertions):
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return tf.math.softplus(logits) / probs - logits
def _mean(self):
return tf.exp(-self._logits_parameter_no_checks())
def _variance(self):
logits, probs = self._logits_and_probs_no_checks()
return tf.exp(-logits) / probs
def _mode(self):
return tf.zeros(self.batch_shape_tensor(), dtype=self.dtype)
def logits_parameter(self, name=None):
"""Logits computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'logits_parameter'):
if self._logits is None:
return tf.math.log(self._probs) - tf.math.log1p(-self._probs)
return tf.identity(self._logits)
def probs_parameter(self, name=None):
"""Probs computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'probs_parameter'):
if self._logits is None:
return tf.identity(self._probs)
return tf.math.sigmoid(self._logits)
def _logits_parameter_no_checks(self):
if self._logits is None:
probs = tf.convert_to_tensor(self._probs)
return tf.math.log(probs) - tf.math.log1p(-probs)
return tf.identity(self._logits)
def _probs_parameter_no_checks(self):
if self._logits is None:
return tf.identity(self._probs)
return tf.math.sigmoid(self._logits)
def _logits_and_probs_no_checks(self):
if self._logits is None:
probs = tf.convert_to_tensor(self._probs)
logits = tf.math.log(probs) - tf.math.log1p(-probs)
else:
logits = tf.convert_to_tensor(self._logits)
probs = tf.math.sigmoid(logits)
return logits, probs
def _default_event_space_bijector(self):
return
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.extend(distribution_util.assert_nonnegative_integer_form(x))
return assertions
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if self._probs is not None:
if is_init != tensor_util.is_ref(self._probs):
probs = tf.convert_to_tensor(self._probs)
assertions.append(assert_util.assert_positive(
probs, message='Argument `probs` must be positive.'))
assertions.append(assert_util.assert_less_equal(
probs, dtype_util.as_numpy_dtype(self.dtype)(1.),
message='Argument `probs` must be less than or equal to 1.'))
return assertions
| 36.349091
| 79
| 0.668768
|
52e26a67d055781ad3182f8164e731cf47b66856
| 18,323
|
py
|
Python
|
dense_correspondence/network/dense_correspondence_network.py
|
masato-ka/pytorch-dense-correspondence
|
89a5f87fd773b210e93ebcfeb945c95e7417d0e9
|
[
"BSD-3-Clause"
] | null | null | null |
dense_correspondence/network/dense_correspondence_network.py
|
masato-ka/pytorch-dense-correspondence
|
89a5f87fd773b210e93ebcfeb945c95e7417d0e9
|
[
"BSD-3-Clause"
] | null | null | null |
dense_correspondence/network/dense_correspondence_network.py
|
masato-ka/pytorch-dense-correspondence
|
89a5f87fd773b210e93ebcfeb945c95e7417d0e9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import sys, os
import numpy as np
import warnings
import logging
import dense_correspondence_manipulation.utils.utils as utils
utils.add_dense_correspondence_to_python_path()
from PIL import Image
import torch
import torch.nn as nn
from torchvision import transforms
import pytorch_segmentation_detection.models.resnet_dilated as resnet_dilated
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
class DenseCorrespondenceNetwork(nn.Module):
IMAGE_TO_TENSOR = valid_transform = transforms.Compose([transforms.ToTensor(), ])
def __init__(self, fcn, descriptor_dimension, image_width=640,
image_height=480, normalize=False):
"""
:param fcn:
:type fcn:
:param descriptor_dimension:
:type descriptor_dimension:
:param image_width:
:type image_width:
:param image_height:
:type image_height:
:param normalize: If True normalizes the feature vectors to lie on unit ball
:type normalize:
"""
super(DenseCorrespondenceNetwork, self).__init__()
self._fcn = fcn
self._descriptor_dimension = descriptor_dimension
self._image_width = image_width
self._image_height = image_height
# this defaults to the identity transform
self._image_mean = np.zeros(3)
self._image_std_dev = np.ones(3)
# defaults to no image normalization, assume it is done by dataset loader instead
self.config = dict()
self._descriptor_image_stats = None
self._normalize = normalize
self._constructed_from_model_folder = False
@property
def fcn(self):
return self._fcn
@property
def config(self):
return self._config
@config.setter
def config(self, value):
self._config = value
@property
def descriptor_dimension(self):
return self._descriptor_dimension
@property
def image_shape(self):
return [self._image_height, self._image_width]
@property
def image_mean(self):
return self._image_mean
@image_mean.setter
def image_mean(self, value):
"""
Sets the image mean used in normalizing the images before
being passed through the network
:param value: list of floats
:type value:
:return:
:rtype:
"""
self._image_mean = value
self.config['image_mean'] = value
self._update_normalize_tensor_transform()
@property
def image_std_dev(self):
return self._image_std_dev
@image_std_dev.setter
def image_std_dev(self, value):
"""
Sets the image std dev used in normalizing the images before
being passed through the network
:param value: list of floats
:type value:
:return:
:rtype:
"""
self._image_std_dev = value
self.config['image_std_dev'] = value
self._update_normalize_tensor_transform()
@property
def image_to_tensor(self):
return self._image_to_tensor
@image_to_tensor.setter
def image_to_tensor(self, value):
self._image_to_tensor = value
@property
def normalize_tensor_transform(self):
return self._normalize_tensor_transform
@property
def path_to_network_params_folder(self):
if not 'path_to_network_params_folder' in self.config:
raise ValueError("DenseCorrespondenceNetwork: Config doesn't have a `path_to_network_params_folder`"
"entry")
return self.config['path_to_network_params_folder']
@property
def descriptor_image_stats(self):
"""
Returns the descriptor normalization parameters, if possible.
If they have not yet been loaded then it loads them
:return:
:rtype:
"""
# if it isn't already set, then attempt to load it
if self._descriptor_image_stats is None:
path_to_params = utils.convert_to_absolute_path(self.path_to_network_params_folder)
descriptor_stats_file = os.path.join(path_to_params, "descriptor_statistics.yaml")
self._descriptor_image_stats = utils.getDictFromYamlFilename(descriptor_stats_file)
return self._descriptor_image_stats
@property
def constructed_from_model_folder(self):
"""
Returns True if this model was constructed from
:return:
:rtype:
"""
return self._constructed_from_model_folder
@constructed_from_model_folder.setter
def constructed_from_model_folder(self, value):
self._constructed_from_model_folder = value
@property
def unique_identifier(self):
"""
Return the unique identifier for this network, if it has one.
If no identifier.yaml found (or we don't even have a model params folder)
then return None
:return:
:rtype:
"""
try:
path_to_network_params_folder = self.path_to_network_params_folder
except ValueError:
return None
identifier_file = os.path.join(path_to_network_params_folder, 'identifier.yaml')
if not os.path.exists(identifier_file):
return None
if not self.constructed_from_model_folder:
return None
d = utils.getDictFromYamlFilename(identifier_file)
unique_identifier = d['id'] + "+" + self.config['model_param_filename_tail']
return unique_identifier
def _update_normalize_tensor_transform(self):
"""
Updates the image to tensor transform using the current image mean and
std dev
:return: None
:rtype:
"""
self._normalize_tensor_transform = transforms.Normalize(self.image_mean, self.image_std_dev)
def forward_on_img(self, img, cuda=True):
"""
Runs the network forward on an image
:param img: img is an image as a numpy array in opencv format [0,255]
:return:
"""
img_tensor = DenseCorrespondenceNetwork.IMAGE_TO_TENSOR(img)
if cuda:
img_tensor.cuda()
return self.forward(img_tensor)
def forward_on_img_tensor(self, img):
"""
Deprecated, use `forward` instead
Runs the network forward on an img_tensor
:param img: (C x H X W) in range [0.0, 1.0]
:return:
"""
warnings.warn("use forward method instead", DeprecationWarning)
img = img.unsqueeze(0)
img = torch.tensor(img, device=torch.device("cuda"))
res = self.fcn(img)
res = res.squeeze(0)
res = res.permute(1, 2, 0)
res = res.data.cpu().numpy().squeeze()
return res
def forward(self, img_tensor):
"""
Simple forward pass on the network.
Does NOT normalize the image
D = descriptor dimension
N = batch size
:param img_tensor: input tensor img.shape = [N, D, H , W] where
N is the batch size
:type img_tensor: torch.Variable or torch.Tensor
:return: torch.Variable with shape [N, D, H, W],
:rtype:
"""
res = self.fcn(img_tensor)
if self._normalize:
#print "normalizing descriptor norm"
norm = torch.norm(res, 2, 1) # [N,1,H,W]
res = res/norm
return res
def forward_single_image_tensor(self, img_tensor):
"""
Simple forward pass on the network.
Assumes the image has already been normalized (i.e. subtract mean, divide by std dev)
Color channel should be RGB
:param img_tensor: torch.FloatTensor with shape [3,H,W]
:type img_tensor:
:return: torch.FloatTensor with shape [H, W, D]
:rtype:
"""
assert len(img_tensor.shape) == 3
# transform to shape [1,3,H,W]
img_tensor = img_tensor.unsqueeze(0)
# make sure it's on the GPU
img_tensor = torch.tensor(img_tensor, device=torch.device("cuda"))
res = self.forward(img_tensor) # shape [1,D,H,W]
# print "res.shape 1", res.shape
res = res.squeeze(0) # shape [D,H,W]
# print "res.shape 2", res.shape
res = res.permute(1,2,0) # shape [H,W,D]
# print "res.shape 3", res.shape
return res
def process_network_output(self, image_pred, N):
"""
Processes the network output into a new shape
:param image_pred: output of the network img.shape = [N,descriptor_dim, H , W]
:type image_pred: torch.Tensor
:param N: batch size
:type N: int
:return: same as input, new shape is [N, W*H, descriptor_dim]
:rtype:
"""
W = self._image_width
H = self._image_height
image_pred = image_pred.view(N, self.descriptor_dimension, W * H)
image_pred = image_pred.permute(0, 2, 1)
return image_pred
def clip_pixel_to_image_size_and_round(self, uv):
"""
Clips pixel to image coordinates and converts to int
:param uv:
:type uv:
:return:
:rtype:
"""
u = min(int(round(uv[0])), self._image_width - 1)
v = min(int(round(uv[1])), self._image_height - 1)
return [u, v]
def load_training_dataset(self):
"""
Loads the dataset that this was trained on
:return: a dataset object, loaded with the config as set in the dataset.yaml
:rtype: SpartanDataset
"""
network_params_folder = self.path_to_network_params_folder
network_params_folder = utils.convert_to_absolute_path(network_params_folder)
dataset_config_file = os.path.join(network_params_folder, 'dataset.yaml')
config = utils.getDictFromYamlFilename(dataset_config_file)
return SpartanDataset(config_expanded=config)
@staticmethod
def get_unet(config):
"""
Returns a Unet nn.module that satisifies the fcn properties stated in get_fcn() docstring
"""
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(os.path.join(dc_source_dir, 'external/unet-pytorch'))
from unet_model import UNet
model = UNet(num_classes=config["descriptor_dimension"]).cuda()
return model
@staticmethod
def get_fcn(config):
"""
Returns a pytorch nn.module that satisfies these properties:
1. autodiffs
2. has forward() overloaded
3. can accept a ~Nx3xHxW (should double check)
4. outputs a ~NxDxHxW (should double check)
:param config: Dict with dcn configuration parameters
"""
if config["backbone"]["model_class"] == "Resnet":
resnet_model = config["backbone"]["resnet_name"]
fcn = getattr(resnet_dilated, resnet_model)(num_classes=config['descriptor_dimension'])
elif config["backbone"]["model_class"] == "Unet":
fcn = DenseCorrespondenceNetwork.get_unet(config)
else:
raise ValueError("Can't build backbone network. I don't know this backbone model class!")
return fcn
@staticmethod
def from_config(config, load_stored_params=True, model_param_file=None):
"""
Load a network from a configuration
:param config: Dict specifying details of the network architecture
:param load_stored_params: whether or not to load stored params, if so there should be
a "path_to_network" entry in the config
:type load_stored_params: bool
e.g.
path_to_network: /home/manuelli/code/dense_correspondence/recipes/trained_models/10_drill_long_3d
parameter_file: dense_resnet_34_8s_03505.pth
descriptor_dimensionality: 3
image_width: 640
image_height: 480
:return: DenseCorrespondenceNetwork
:rtype:
"""
if "backbone" not in config:
# default to CoRL 2018 backbone!
config["backbone"] = dict()
config["backbone"]["model_class"] = "Resnet"
config["backbone"]["resnet_name"] = "Resnet34_8s"
fcn = DenseCorrespondenceNetwork.get_fcn(config)
if 'normalize' in config:
normalize = config['normalize']
else:
normalize = False
dcn = DenseCorrespondenceNetwork(fcn, config['descriptor_dimension'],
image_width=config['image_width'],
image_height=config['image_height'],
normalize=normalize)
if load_stored_params:
assert model_param_file is not None
config['model_param_file'] = model_param_file # should be an absolute path
try:
dcn.load_state_dict(torch.load(model_param_file))
except:
logging.info("loading params with the new style failed, falling back to dcn.fcn.load_state_dict")
dcn.fcn.load_state_dict(torch.load(model_param_file))
dcn.cuda()
dcn.train()
dcn.config = config
return dcn
@staticmethod
def from_model_folder(model_folder, load_stored_params=True, model_param_file=None,
iteration=None):
"""
Loads a DenseCorrespondenceNetwork from a model folder
:param model_folder: the path to the folder where the model is stored. This direction contains
files like
- 003500.pth
- training.yaml
:type model_folder:
:return: a DenseCorrespondenceNetwork objecc t
:rtype:
"""
from_model_folder = False
model_folder = utils.convert_to_absolute_path(model_folder)
if model_param_file is None:
model_param_file, _, _ = utils.get_model_param_file_from_directory(model_folder, iteration=iteration)
from_model_folder = True
model_param_file = utils.convert_to_absolute_path(model_param_file)
training_config_filename = os.path.join(model_folder, "training.yaml")
training_config = utils.getDictFromYamlFilename(training_config_filename)
config = training_config["dense_correspondence_network"]
config["path_to_network_params_folder"] = model_folder
config["model_param_filename_tail"] = os.path.split(model_param_file)[1]
dcn = DenseCorrespondenceNetwork.from_config(config,
load_stored_params=load_stored_params,
model_param_file=model_param_file)
# whether or not network was constructed from model folder
dcn.constructed_from_model_folder = from_model_folder
dcn.model_folder = model_folder
return dcn
@staticmethod
def find_best_match(pixel_a, res_a, res_b, debug=False):
"""
Compute the correspondences between the pixel_a location in image_a
and image_b
:param pixel_a: vector of (u,v) pixel coordinates
:param res_a: array of dense descriptors res_a.shape = [H,W,D]
:param res_b: array of dense descriptors
:param pixel_b: Ground truth . . .
:return: (best_match_uv, best_match_diff, norm_diffs)
best_match_idx is again in (u,v) = (right, down) coordinates
"""
descriptor_at_pixel = res_a[pixel_a[1], pixel_a[0]]
height, width, _ = res_a.shape
if debug:
print("height: ", height)
print("width: ", width)
print("res_b.shape: ", res_b.shape)
# non-vectorized version
# norm_diffs = np.zeros([height, width])
# for i in xrange(0, height):
# for j in xrange(0, width):
# norm_diffs[i,j] = np.linalg.norm(res_b[i,j] - descriptor_at_pixel)**2
norm_diffs = np.sqrt(np.sum(np.square(res_b - descriptor_at_pixel), axis=2))
best_match_flattened_idx = np.argmin(norm_diffs)
best_match_xy = np.unravel_index(best_match_flattened_idx, norm_diffs.shape)
best_match_diff = norm_diffs[best_match_xy]
best_match_uv = (best_match_xy[1], best_match_xy[0])
return best_match_uv, best_match_diff, norm_diffs
@staticmethod
def find_best_match_for_descriptor(descriptor, res):
"""
Compute the correspondences between the given descriptor and the descriptor image
res
:param descriptor:
:type descriptor:
:param res: array of dense descriptors res = [H,W,D]
:type res: numpy array with shape [H,W,D]
:return: (best_match_uv, best_match_diff, norm_diffs)
best_match_idx is again in (u,v) = (right, down) coordinates
:rtype:
"""
height, width, _ = res.shape
norm_diffs = np.sqrt(np.sum(np.square(res - descriptor), axis=2))
best_match_flattened_idx = np.argmin(norm_diffs)
best_match_xy = np.unravel_index(best_match_flattened_idx, norm_diffs.shape)
best_match_diff = norm_diffs[best_match_xy]
best_match_uv = (best_match_xy[1], best_match_xy[0])
return best_match_uv, best_match_diff, norm_diffs
def evaluate_descriptor_at_keypoints(self, res, keypoint_list):
"""
:param res: result of evaluating the network
:type res: torch.FloatTensor [D,W,H]
:param img:
:type img: img_tensor
:param kp: list of cv2.KeyPoint
:type kp:
:return: numpy.ndarray (N,D) N = num keypoints, D = descriptor dimension
This is the same format as sift.compute from OpenCV
:rtype:
"""
raise NotImplementedError("This function is currently broken")
N = len(keypoint_list)
D = self.descriptor_dimension
des = np.zeros([N,D])
for idx, kp in enumerate(keypoint_list):
uv = self.clip_pixel_to_image_size_and_round([kp.pt[0], kp.pt[1]])
des[idx,:] = res[uv[1], uv[0], :]
# cast to float32, need this in order to use cv2.BFMatcher() with bf.knnMatch
des = np.array(des, dtype=np.float32)
return des
| 31.537005
| 113
| 0.630301
|
bf7fc5d90c685232a6be1ce5d33788d73b14cf8b
| 4,574
|
py
|
Python
|
src/func.py
|
kevinmarquesp/terminal-shoppingList
|
138eda191f9f037db74bc63bd046355be52963b1
|
[
"MIT"
] | null | null | null |
src/func.py
|
kevinmarquesp/terminal-shoppingList
|
138eda191f9f037db74bc63bd046355be52963b1
|
[
"MIT"
] | null | null | null |
src/func.py
|
kevinmarquesp/terminal-shoppingList
|
138eda191f9f037db74bc63bd046355be52963b1
|
[
"MIT"
] | null | null | null |
from os import system, name, remove
from datetime import date
def intToDecimal(n):
return f'R$ {n/100}'.replace( '.', ',')
def tableWrite( array=list(), total=dict()):
system( 'clear' if name!='nt' else 'cls')
if len(array)==0:
print()
else:
print( '\n\033[32m Nada adicionado ainda... \033[m\n'.center(50))
print('-'*50)
def myInput(msg):
user = input(msg)
result = list()
# Este trecho remove os espaços da string e substituí por "_", altere isso depois...
if '"' in user:
string = user[
user.find('"')+1:
user.rfind('"')
]
user = user.replace( f'"{string}"', string.replace(' ', '_'))
for i in user.split():
j = i
if '_' in j:
j = j.replace( '_', ' ')
elif ',' in j:
j = j.replace( ',', '.')
try:
result.append(float(j))
except:
result.append(j)
return result
def tableWrite(arr):
system('clear')
if len(arr):
print( '\n' +'\033[32mLISTA DE COMPRAS\033[m'.center(68) +'\n')
print(
'\033[4m'+
'IN.',
f'{"Nome":<30}',
f'{"Preço":<10}',
'Qt.',
f'{"Total":<10}'
+'\033[m'
)
total = {
'products': len(arr),
'items': 0,
'price': 0
}
for k,v in enumerate(arr):
print(
f'{k:^3}',
f'{v["name"]:<30}',
f'{intToDecimal(v["price"]):<10}',
f'{int(v["amount"]):<3}',
'\033[32m' +intToDecimal(v["total"]) +'\033[m'
)
total["items"] += int(v["amount"])
total["price"] += v["total"]
print()
for k,v1 in total.items():
v2 = f'R$ {intToDecimal(v1)}' if k=='price' else v1
print( f'\033[47;30m {k} \033[42m {v2} \033[m', end=' ')
print()
else:
print( '\n' +'NADA ADICIONADO AINDA'.center(60))
print('\n' +'-'*60)
try:
return total
except:
return None
def saveDetails(arr, total):
with open('index.html', 'w') as index:
index.write('''<html lang="pt-br">
<head>
<meta charset="UTF-8">
<style>
* { font-family: Arial; padding: 0;
margin: 0; font-size: .9rem; }
main { padding: 35px 20px; }
h1 { font-weight: normal; }
h1 span { font-size: 1.5rem; }
table { text-align: left; margin-top: 1.5rem; }
table, table * { border-collapse: collapse; }
table tr td, table tr th { padding: 5px 20px; }
#final tr th { text-align: center; }
.head { background-color: #112d79; color: white; }
.gray { background-color: #eeeeee; }
.blue { color: #112d79; }
.bold { font-weight: bold; }
</style>
</head>
<body>
<main>
\t''')
index.write(f'<h1> <span class="bold blue">Planilha registrada em:</span> <span>{date.today()}</span> </h1>')
index.write('''
<table id="list">
<tr class="head">
<th> Nome </th>
<th> Preço </th>
<th> Quantidade </th>
<th> Total </th>
</tr>''')
for k,v in enumerate(arr):
if k%2==0:
color = 'normal'
else:
color = 'gray'
index.write( f'<tr class="{color}"> <td> {v["name"]} </td> <td> {intToDecimal(v["price"])} </td> <td> {int(v["amount"])} </td> <td> {intToDecimal(v["total"])} </td> </tr>')
index.write( f'''</table>
<table id="final">
<tr class="head">
<th colspan="2"> Dados finais </th>
</tr>
<tr class="normal">
<td class="bold blue"> Produtos </td>
<td> {total["products"]} </td>
</tr>
<tr class="gray">
<td class="bold blue"> Itens </td>
<td> {total["items"]} </td>
</tr>
<tr class="normal">
<td class="bold blue"> Preço </td>
<td> {intToDecimal( total["price"])} </td>
</tr>
</table>
</main>
</body>
</html>
''')
# add Arroz 24,84 3
| 27.721212
| 184
| 0.415173
|
41780dc5c030660347320bf6b7e61914a7414c5d
| 592
|
py
|
Python
|
Day01-15/code/Day07/findmax.py
|
bdfd/Python_Zero2Hero_DS
|
9dafe90b8112fdc3d07e1aa02e41ed3f019f733c
|
[
"MIT"
] | 3
|
2022-01-15T19:06:19.000Z
|
2022-01-18T16:47:27.000Z
|
Day01-15/code/Day07/findmax.py
|
bdfd/4.5_Data-Science-Python-Zero2Hero-
|
9dafe90b8112fdc3d07e1aa02e41ed3f019f733c
|
[
"MIT"
] | null | null | null |
Day01-15/code/Day07/findmax.py
|
bdfd/4.5_Data-Science-Python-Zero2Hero-
|
9dafe90b8112fdc3d07e1aa02e41ed3f019f733c
|
[
"MIT"
] | 1
|
2022-02-28T23:37:59.000Z
|
2022-02-28T23:37:59.000Z
|
"""
找出列表中最大或最小的元素
Version: 0.1
Author: BDFD
Date: 2018-03-06
"""
def main():
fruits = ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya']
# 直接使用内置的max和min函数找出列表中最大和最小元素
# print(max(fruits))
# print(min(fruits))
max_value = min_value = fruits[0]
for index in range(1, len(fruits)):
if fruits[index] > max_value:
max_value = fruits[index]
elif fruits[index] < min_value:
min_value = fruits[index]
print('Max:', max_value)
print('Min:', min_value)
if __name__ == '__main__':
main()
# 想一想如果最大的元素有两个要找出第二大的又该怎么做
| 21.142857
| 67
| 0.614865
|
f19ef97a0bed5b13ab219e5ef779979b33f0b9e5
| 529
|
py
|
Python
|
test/start_all_pwm.py
|
adafruit/adafruit-beaglebone-io-python
|
7361d9a997bce4ab79c83c4ef30442e8dd13f363
|
[
"MIT"
] | 305
|
2015-01-03T07:03:01.000Z
|
2022-03-13T07:35:45.000Z
|
test/start_all_pwm.py
|
silver2row/adafruit-beaglebone-io-python
|
7284fb18a960ed65351ac2f5128d516372ca74a3
|
[
"MIT"
] | 280
|
2015-01-05T16:13:10.000Z
|
2022-03-19T19:21:15.000Z
|
test/start_all_pwm.py
|
silver2row/adafruit-beaglebone-io-python
|
7284fb18a960ed65351ac2f5128d516372ca74a3
|
[
"MIT"
] | 179
|
2015-01-01T03:05:13.000Z
|
2022-03-25T18:08:34.000Z
|
import Adafruit_BBIO.PWM as PWM
pins = [
"P9_16", #PWM1B
"P9_21", #PWM0B
"P9_22", #PWM0A
"P9_28", #ECAP2
"P9_29", #PWM0B
"P9_31", #PWM0A
"P9_42", #ECAP0
"P8_13", #PWM2B
"P8_19", #PWM2A
"P8_34", #PWM1B
"P8_36", #PWM1A
"P8_45", #PWM2A
"P8_46" #PWM2B
]
# /sys/devices/platform/ocp/48300000.epwmss/48300100.ecap/pwm/pwmchip0/pwm-0:0/duty_cycle
# /sys/devices/platform/ocp/48304000.epwmss/48304100.ecap/pwm/pwmchip5/pwm-5:0/duty_cycle
for pin in pins:
print(pin)
PWM.start(pin, 50, 2000, 1)
PWM.stop(pin)
PWM.cleanup()
| 18.892857
| 89
| 0.689981
|
9597976a1b48cedb45e636e6ec1355c12251efb7
| 1,400
|
py
|
Python
|
sandbox/TrainDataMaker/Tournament/evaluator.py
|
YuMurata/UserPreferencePredictor
|
bb7c725147b65a9a0c0a40b2f3cafc105db5bc89
|
[
"MIT"
] | null | null | null |
sandbox/TrainDataMaker/Tournament/evaluator.py
|
YuMurata/UserPreferencePredictor
|
bb7c725147b65a9a0c0a40b2f3cafc105db5bc89
|
[
"MIT"
] | null | null | null |
sandbox/TrainDataMaker/Tournament/evaluator.py
|
YuMurata/UserPreferencePredictor
|
bb7c725147b65a9a0c0a40b2f3cafc105db5bc89
|
[
"MIT"
] | null | null | null |
import UserPreferencePredictor.TrainDataMaker.Tournament as Tournament
import logging
import random
class NumberPlayer(Tournament.Player):
def __init__(self, param):
super().__init__(param)
def decode(self):
return self.param
def _param_diff(param, target_param):
return 1/(abs(param-target_param)+1)
if __name__ == "__main__":
random.seed(0)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
param_list = list(range(10))
player_list = [NumberPlayer(param) for param in param_list]
game = Tournament.TournamentGame(player_list, handler=handler)
while not game.is_complete:
left, right = game.new_match()
if left.param > 5 and right.param > 5:
winner = Tournament.GameWin.BOTH_LOSE
elif left.param < 3 and right.param < 3:
winner = Tournament.GameWin.BOTH_WIN
elif left.param < right.param:
winner = Tournament.GameWin.LEFT
elif left.param > right.param:
winner = Tournament.GameWin.RIGHT
game.compete(winner)
print('param| score')
for player in game.player_list:
print(f'{player.param:>5}| {player.score:<.3f}')
evaluator = Tournament.Evaluator(game.player_list, _param_diff)
print('param| evaluate')
for param in param_list:
print(f'{param:>5}| {evaluator.evaluate(param):<.3f}')
| 26.923077
| 70
| 0.665714
|
ffedcb8260288efc6cb8503b08d4c6eb617bd398
| 4,664
|
py
|
Python
|
homeassistant/components/climate/vera.py
|
Norien/Home-Assistant
|
0aa8933df675aa3ea93126c0b1eb7c9a77208331
|
[
"Apache-2.0"
] | 2
|
2017-02-25T00:27:06.000Z
|
2017-02-25T03:09:30.000Z
|
homeassistant/components/climate/vera.py
|
Norien/Home-Assistant
|
0aa8933df675aa3ea93126c0b1eb7c9a77208331
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/climate/vera.py
|
Norien/Home-Assistant
|
0aa8933df675aa3ea93126c0b1eb7c9a77208331
|
[
"Apache-2.0"
] | null | null | null |
"""
Support for Vera thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.vera/
"""
import logging
from homeassistant.util import convert
from homeassistant.components.climate import ClimateDevice, ENTITY_ID_FORMAT
from homeassistant.const import (
TEMP_FAHRENHEIT,
TEMP_CELSIUS,
ATTR_TEMPERATURE)
from homeassistant.components.vera import (
VERA_CONTROLLER, VERA_DEVICES, VeraDevice)
DEPENDENCIES = ['vera']
_LOGGER = logging.getLogger(__name__)
OPERATION_LIST = ["Heat", "Cool", "Auto Changeover", "Off"]
FAN_OPERATION_LIST = ["On", "Auto", "Cycle"]
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Find and return Vera thermostats."""
add_devices_callback(
VeraThermostat(device, VERA_CONTROLLER) for
device in VERA_DEVICES['climate'])
class VeraThermostat(VeraDevice, ClimateDevice):
"""Representation of a Vera Thermostat."""
def __init__(self, vera_device, controller):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
mode = self.vera_device.get_hvac_mode()
if mode == "HeatOn":
return OPERATION_LIST[0] # heat
elif mode == "CoolOn":
return OPERATION_LIST[1] # cool
elif mode == "AutoChangeOver":
return OPERATION_LIST[2] # auto
elif mode == "Off":
return OPERATION_LIST[3] # off
return "Off"
@property
def operation_list(self):
"""List of available operation modes."""
return OPERATION_LIST
@property
def current_fan_mode(self):
"""Return the fan setting."""
mode = self.vera_device.get_fan_mode()
if mode == "ContinuousOn":
return FAN_OPERATION_LIST[0] # on
elif mode == "Auto":
return FAN_OPERATION_LIST[1] # auto
elif mode == "PeriodicOn":
return FAN_OPERATION_LIST[2] # cycle
return "Auto"
@property
def fan_list(self):
"""List of available fan modes."""
return FAN_OPERATION_LIST
def set_fan_mode(self, mode):
"""Set new target temperature."""
if mode == FAN_OPERATION_LIST[0]:
self.vera_device.fan_on()
elif mode == FAN_OPERATION_LIST[1]:
self.vera_device.fan_auto()
elif mode == FAN_OPERATION_LIST[2]:
return self.vera_device.fan_cycle()
@property
def current_power_mwh(self):
"""Current power usage in mWh."""
power = self.vera_device.power
if power:
return convert(power, float, 0.0) * 1000
def update(self):
"""Called by the vera device callback to update state."""
self._state = self.vera_device.get_hvac_mode()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
vera_temp_units = (
self.vera_device.vera_controller.temperature_units)
if vera_temp_units == 'F':
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.vera_device.get_current_temperature()
@property
def operation(self):
"""Return current operation ie. heat, cool, idle."""
return self.vera_device.get_hvac_state()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.vera_device.get_current_goal_temperature()
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self.vera_device.set_temperature(kwargs.get(ATTR_TEMPERATURE))
def set_operation_mode(self, operation_mode):
"""Set HVAC mode (auto, cool, heat, off)."""
if operation_mode == OPERATION_LIST[3]: # off
self.vera_device.turn_off()
elif operation_mode == OPERATION_LIST[2]: # auto
self.vera_device.turn_auto_on()
elif operation_mode == OPERATION_LIST[1]: # cool
self.vera_device.turn_cool_on()
elif operation_mode == OPERATION_LIST[0]: # heat
self.vera_device.turn_heat_on()
def turn_fan_on(self):
"""Turn fan on."""
self.vera_device.fan_on()
def turn_fan_off(self):
"""Turn fan off."""
self.vera_device.fan_auto()
| 31.727891
| 76
| 0.642367
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.