hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17c121d0899135ab18f101d5687dfc9db718713e
| 258
|
py
|
Python
|
2021/examples-in-class-2021-10-08/stars_forloop.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | 1
|
2021-02-04T16:59:11.000Z
|
2021-02-04T16:59:11.000Z
|
2021/examples-in-class-2021-10-08/stars_forloop.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | null | null | null |
2021/examples-in-class-2021-10-08/stars_forloop.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | 1
|
2019-10-30T14:37:48.000Z
|
2019-10-30T14:37:48.000Z
|
str_N = input("Please enter number: ")
N = int(str_N)
#N = 5
for index in range(0,N):
space_count = (N-index)
star_count = (index * 2)+1
spaces = (" " * space_count)
stars = ("*" * star_count )
line = spaces + stars
print(line )
| 18.428571
| 38
| 0.562016
|
71f10676a9f48fa2686e2f79b8240ae7da09c665
| 1,655
|
py
|
Python
|
dcp_py/profile/__init__.py
|
sraaphorst/Daily-Coding-Problem
|
acfcf83a66099f3e4b69e2447600b8208cd9ab1b
|
[
"MIT"
] | null | null | null |
dcp_py/profile/__init__.py
|
sraaphorst/Daily-Coding-Problem
|
acfcf83a66099f3e4b69e2447600b8208cd9ab1b
|
[
"MIT"
] | null | null | null |
dcp_py/profile/__init__.py
|
sraaphorst/Daily-Coding-Problem
|
acfcf83a66099f3e4b69e2447600b8208cd9ab1b
|
[
"MIT"
] | null | null | null |
# From https://stackoverflow.com/a/53301648/3180151
import time
import os
import psutil
import inspect
def elapsed_since(start):
# return time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
elapsed = time.time() - start
if elapsed < 1:
return str(round(elapsed * 1000, 2)) + "ms"
if elapsed < 60:
return str(round(elapsed, 2)) + "s"
if elapsed < 3600:
return str(round(elapsed / 60, 2)) + "min"
else:
return str(round(elapsed / 3600, 2)) + "hrs"
def get_process_memory():
process = psutil.Process(os.getpid())
mi = process.memory_info()
return mi.rss, mi.vms
def format_bytes(bytes):
if abs(bytes) < 1000:
return str(bytes) + "B"
elif abs(bytes) < 1e6:
return str(round(bytes / 1e3, 2)) + "kB"
elif abs(bytes) < 1e9:
return str(round(bytes / 1e6, 2)) + "MB"
else:
return str(round(bytes / 1e9, 2)) + "GB"
def profile(func, *args, **kwargs):
def wrapper(*args, **kwargs):
rss_before, vms_before = get_process_memory()
start = time.time()
result = func(*args, **kwargs)
elapsed_time = elapsed_since(start)
rss_after, vms_after = get_process_memory()
print("Profiling: {:>20} RSS: {:>8} | VMS: {:>8} | time: {:>8}"
.format("<" + func.__name__ + ">",
format_bytes(rss_after - rss_before),
format_bytes(vms_after - vms_before),
elapsed_time))
return result
if inspect.isfunction(func):
return wrapper
elif inspect.ismethod(func):
return wrapper(*args, **kwargs)
| 29.553571
| 72
| 0.575831
|
eb01a51f3e425152288180a08a51ab2f1f668888
| 1,771
|
py
|
Python
|
support/ComputeTableSizes.py
|
abolz/Json1
|
704cc4f4f4b5cad302afdc8193fc72d0c5a4f1b6
|
[
"MIT"
] | 4
|
2019-01-21T03:52:03.000Z
|
2019-02-11T02:46:18.000Z
|
support/ComputeTableSizes.py
|
abolz/Json1
|
704cc4f4f4b5cad302afdc8193fc72d0c5a4f1b6
|
[
"MIT"
] | null | null | null |
support/ComputeTableSizes.py
|
abolz/Json1
|
704cc4f4f4b5cad302afdc8193fc72d0c5a4f1b6
|
[
"MIT"
] | 2
|
2018-07-06T21:37:18.000Z
|
2021-12-08T01:29:11.000Z
|
# Translated from:
# https://github.com/ulfjack/ryu/blob/c9c3fb19791c44fbe35701ad3b8ca4dc0977eb08/src/main/java/info/adams/ryu/analysis/ComputeTableSizes.java
# Copyright 2018 Ulf Adams
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def FloorLog10Pow2(e):
return (e * 315653) >> 20
def FloorLog10Pow5(e):
return (e * 732923) >> 20
def ComputeTableSizes(exponentBits, mantissaBits):
bias = 2**(exponentBits - 1) - 1
minE2 = 1 - bias - mantissaBits - 2
maxE2 = (2**exponentBits - 2) - bias - mantissaBits - 2
posE2maxQ = max(0, FloorLog10Pow2(maxE2) - 1)
negE2maxQ = max(0, FloorLog10Pow5(-minE2) - 1)
maxNegExp = posE2maxQ
maxPosExp = -minE2 - negE2maxQ
return [maxNegExp, maxPosExp]
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
# float16: [0, 9]
b = ComputeTableSizes(5, 10)
print(b)
# float32: [29, 47]
b = ComputeTableSizes(8, 23)
print(b)
# float64: [290, 325]
b = ComputeTableSizes(11, 52)
print(b)
# float80: [4911, 4953]
b = ComputeTableSizes(15, 63)
print(b)
# float128: [4896, 4967]
b = ComputeTableSizes(15, 112)
print(b)
# float256: [78840, 78986]
b = ComputeTableSizes(19, 236)
print(b)
| 27.246154
| 139
| 0.643704
|
0f41cb8acabb173f4ee15757a6e47780e0492e3e
| 1,603
|
py
|
Python
|
src/provisioning.py
|
sbarbett/ssp-sdk-python
|
03677bcd32f268f84b1e490a7d6c687ee33762e9
|
[
"Apache-2.0"
] | null | null | null |
src/provisioning.py
|
sbarbett/ssp-sdk-python
|
03677bcd32f268f84b1e490a7d6c687ee33762e9
|
[
"Apache-2.0"
] | null | null | null |
src/provisioning.py
|
sbarbett/ssp-sdk-python
|
03677bcd32f268f84b1e490a7d6c687ee33762e9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .networks import Networks
from .vips import Vips
from .subnets import Subnets
from .categories import Categories
from .blacklists import Blacklists
# from .whitelists import Whitelists
# from .policies import Policies
# from .messages import Messages
class Provisioning:
def __init__(self, connection):
self.connection = connection
self.base_uri = "/rdns/provisioning/v1"
def networks(self):
"""Create a Network object."""
return Networks(self.connection, self.base_uri)
def vips(self):
return Vips(self.connection, self.base_uri)
def subnets(self):
return Subnets(self.connection, self.base_uri)
def categories(self):
return Categories(self.connection, self.base_uri)
def blacklists(self):
return Blacklists(self.connection, self.base_uri)
# def whitelists(self):
# return Whitelists(self.connection, self.base_uri)
# def policies(self):
# return Policies(self.connection, self.base_uri)
# def messages(self):
# return Messages(self.connection, self.base_uri)
| 30.245283
| 74
| 0.763568
|
2b0645f4bb16d4421b81b21e768a9b5f4a7a5e05
| 1,292
|
py
|
Python
|
loadcfs.py
|
bwright8/csproject
|
d9b758f57cadde8c1803dc4e3e32cb9166b21c99
|
[
"OLDAP-2.4",
"OLDAP-2.8"
] | null | null | null |
loadcfs.py
|
bwright8/csproject
|
d9b758f57cadde8c1803dc4e3e32cb9166b21c99
|
[
"OLDAP-2.4",
"OLDAP-2.8"
] | null | null | null |
loadcfs.py
|
bwright8/csproject
|
d9b758f57cadde8c1803dc4e3e32cb9166b21c99
|
[
"OLDAP-2.4",
"OLDAP-2.8"
] | null | null | null |
import numpy
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from learnermodv2 import *
from joblib import dump,load
gnb = GaussianNB()
nb_classifier = gnb.fit(Xp,y)
wsvcs = []
def lsvc():
for i in range(10):
wsvcs.append(numpy.loadtxt("wsv"+str(i)+".csv"))
lsvc()
wlrcs = []
def llrc():
for i in range(10):
wlrcs.append(numpy.loadtxt("lr"+str(i)+".csv"))
llrc()
def digit_selector(datapoint,digitclassifiers):
nmax = -10000
kmax = 0
for i in range(10):
p = datapoint@digitclassifiers[i]
if(p > nmax):
nmax = p
kmax = i
return(kmax)
wrongsv = 0
wronglr = 0
wrongnb = 0
CAP = n_train
#CAP = 5000
clf = MLPClassifier(max_iter = 2000,solver = 'lbfgs',alpha = .00001,hidden_layer_sizes = (546,210,126),random_state = 1)
clf.fit(Xp[0:CAP,:],y[0:CAP])
"""
for i in range(n_test):
if(digit_selector(Xpt[i],wlrcs) != y_test[i]):
wronglr += 1
if(digit_selector(Xpt[i],wsvcs) != y_test[i]):
wrongsv += 1
if(nb_classifier.predict(Xpt[i])!= y_test[i]):
wrongnb += 1
"""
yp = clf.predict(Xpt)
dump(clf,'nn.joblib')
print(accuracy_score(y_test,yp))
| 21.533333
| 120
| 0.641641
|
6f9a5e08966b85b456bbd2e3ba865ddabc92b424
| 499
|
py
|
Python
|
flake8_codes/wemake_python_styleguide/violations/keep_till_first_div.py
|
flake8-codes/flake8-codes.github.io
|
7af252692e05b8448e121d2734259271bb76f247
|
[
"MIT"
] | 2
|
2021-12-10T20:57:06.000Z
|
2022-03-04T20:16:17.000Z
|
flake8_codes/wemake_python_styleguide/violations/keep_till_first_div.py
|
octadocs/flake8.codes
|
7af252692e05b8448e121d2734259271bb76f247
|
[
"MIT"
] | 11
|
2021-09-11T13:45:32.000Z
|
2021-09-24T01:12:48.000Z
|
flake8_codes/wemake_python_styleguide/violations/keep_till_first_div.py
|
octadocs/flake8-codes
|
7af252692e05b8448e121d2734259271bb76f247
|
[
"MIT"
] | null | null | null |
from itertools import takewhile
def keep_till_first_div(text: str) -> str:
"""
Keep text content till the first div element.
This is useful for WPS violation modules docstrings, where first goes a
piece of useful text and then Sphinx autodoc tags which are converted to
<div>'s by pandoc.
"""
lines = text.strip().split('\n')
filtered_lines = takewhile(
lambda line: not line.startswith('<div'),
lines,
)
return '\n'.join(filtered_lines)
| 24.95
| 76
| 0.663327
|
88e1b44cf37ea4aab7010c734730a1d5d7b5ceb3
| 1,666
|
py
|
Python
|
aispace/layers/activations.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 32
|
2020-01-16T07:59:03.000Z
|
2022-03-31T09:24:00.000Z
|
aispace/layers/activations.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 9
|
2020-06-05T03:27:06.000Z
|
2022-03-12T01:00:17.000Z
|
aispace/layers/activations.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 3
|
2020-06-09T02:22:50.000Z
|
2021-07-19T06:07:07.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-05 14:04
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : activations.py
import math
import numpy as np
import tensorflow as tf
__all__ = [
"gelu",
"gelu_new",
"swish",
"ACT2FN"
]
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
:param input:
:return:
"""
# cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.sigmoid(x)
ACT2FN = {
"gelu": tf.keras.layers.Activation(gelu),
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"elu": tf.keras.activations.elu,
"hard_sigmoid": tf.keras.activations.hard_sigmoid,
"linear": tf.keras.activations.linear,
"relu": tf.keras.activations.relu,
"selu": tf.keras.activations.selu,
"sigmoid": tf.keras.activations.sigmoid,
"softmax": tf.keras.activations.softmax,
"softplus": tf.keras.activations.softplus,
"softsign": tf.keras.activations.softsign,
"tanh": tf.keras.activations.tanh
}
| 25.242424
| 66
| 0.614046
|
3865ad65465bafc2067f580a060cc8b51d63b75e
| 2,298
|
py
|
Python
|
tests/test_offchain_jws.py
|
firstdag/client-sdk-python
|
fbdbc155fbe304ca22f2123b70fc6c988f57e81e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_offchain_jws.py
|
firstdag/client-sdk-python
|
fbdbc155fbe304ca22f2123b70fc6c988f57e81e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_offchain_jws.py
|
firstdag/client-sdk-python
|
fbdbc155fbe304ca22f2123b70fc6c988f57e81e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from diem import offchain, LocalAccount
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
import cryptography, pytest
def test_serialize_deserialize():
account = LocalAccount.generate()
response = offchain.CommandResponseObject(
status=offchain.CommandResponseStatus.success,
cid="3185027f05746f5526683a38fdb5de98",
)
ret = offchain.jws.serialize(response, account.private_key.sign)
resp = offchain.jws.deserialize(
ret,
offchain.CommandResponseObject,
account.private_key.public_key().verify,
)
assert resp == response
def test_deserialize_error_if_not_3_parts():
with pytest.raises(ValueError):
offchain.jws.deserialize(
b".".join([b"header", b"payload"]),
offchain.CommandResponseObject,
lambda: None,
)
def test_deserialize_error_for_mismatched_protected_header():
with pytest.raises(ValueError):
offchain.jws.deserialize(
b".".join([b"header", b"payload", b"sig"]),
offchain.CommandResponseObject,
lambda: None,
)
def test_deserialize_error_for_invalid_signature():
account = LocalAccount.generate()
response = offchain.CommandResponseObject(
status=offchain.CommandResponseStatus.success,
cid="3185027f05746f5526683a38fdb5de98",
)
data = offchain.jws.serialize(response, account.private_key.sign)
account2 = LocalAccount.generate()
with pytest.raises(cryptography.exceptions.InvalidSignature):
offchain.jws.deserialize(
data,
offchain.CommandResponseObject,
account2.private_key.public_key().verify,
)
def test_deserialize_example_jws():
example = "eyJhbGciOiJFZERTQSJ9.U2FtcGxlIHNpZ25lZCBwYXlsb2FkLg.dZvbycl2Jkl3H7NmQzL6P0_lDEW42s9FrZ8z-hXkLqYyxNq8yOlDjlP9wh3wyop5MU2sIOYvay-laBmpdW6OBQ"
public_key = "bd47e3e7afb94debbd82e10ab7d410a885b589db49138628562ac2ec85726129"
body, sig, msg = offchain.jws.deserialize_string(example.encode("utf-8"))
assert body == "Sample signed payload."
key = Ed25519PublicKey.from_public_bytes(bytes.fromhex(public_key))
key.verify(sig, msg)
| 33.794118
| 154
| 0.721062
|
0f511cadf9687ba0b8bc8273998e0ccb4095253f
| 1,160
|
py
|
Python
|
setup.py
|
trojsten/submit
|
1f2da17422b0fc89daa996dcc55866fc52b54d63
|
[
"BSD-3-Clause"
] | 1
|
2016-12-15T17:56:31.000Z
|
2016-12-15T17:56:31.000Z
|
setup.py
|
trojsten/submit
|
1f2da17422b0fc89daa996dcc55866fc52b54d63
|
[
"BSD-3-Clause"
] | 23
|
2017-01-09T10:57:04.000Z
|
2021-06-10T19:29:33.000Z
|
setup.py
|
maaario/submit
|
1f2da17422b0fc89daa996dcc55866fc52b54d63
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='django-trojsten-submit',
packages=[
'submit',
],
version='0.1.0',
description='Django app for storing submits and reviews, used in Trojsten seminary web apps',
author='Mário Lipovský',
author_email='mario.lipovsky@trojsten.sk',
url='https://github.com/trojsten/submit',
include_package_data=True,
install_requires=[
'Django>=1.9',
'django-bootstrap-form>=3.2',
'django-sendfile>=0.3.10',
'Unidecode>=0.4.19',
'djangorestframework>=3.5.4',
],
keywords = ['submit', 'review'],
classifiers = [
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
license='BSD',
)
| 29
| 97
| 0.589655
|
392d0a120782bbc8ae5de9ae89a0d03e598b7a10
| 12,400
|
py
|
Python
|
otrkeydecode/otrkeydecode.py
|
omza/otrkeydecode
|
236f7e7a5a5854d578c4e339d447e49a15e95543
|
[
"MIT"
] | 1
|
2018-10-28T20:46:57.000Z
|
2018-10-28T20:46:57.000Z
|
otrkeydecode/otrkeydecode.py
|
mstroehle/otrkeydecode
|
236f7e7a5a5854d578c4e339d447e49a15e95543
|
[
"MIT"
] | null | null | null |
otrkeydecode/otrkeydecode.py
|
mstroehle/otrkeydecode
|
236f7e7a5a5854d578c4e339d447e49a15e95543
|
[
"MIT"
] | 1
|
2018-10-28T20:46:28.000Z
|
2018-10-28T20:46:28.000Z
|
import subprocess
import ftplib
from datetime import datetime, timedelta
import os, fnmatch
from sys import stdout, stderr
import logging
import logging.handlers
import signal
import urllib.request
import configparser
import re
""" helper """
def safe_cast(val, to_type, default=None):
try:
result = None
if val is None:
result = default
else:
if to_type is bool:
result = str(val).lower() in ("yes", "true", "t", "1")
else:
result = to_type(val)
return result
except (ValueError, TypeError):
return default
stopsignal = False
def handler_stop_signals(signum, frame):
global stopsignal
stopsignal = True
signal.signal(signal.SIGINT, handler_stop_signals)
signal.signal(signal.SIGTERM, handler_stop_signals)
""" Logging Configuration """
log = logging.getLogger('otrkeydecode')
def config_logger(log, loglevel):
formatter = logging.Formatter('%(asctime)s | %(name)s:%(lineno)d | %(funcName)s | %(levelname)s | %(message)s')
consolehandler = logging.StreamHandler(stdout)
consolehandler.setFormatter(formatter)
consolehandler.setLevel(loglevel)
logfilename = '/usr/log/otrkeydecoder.log'
filehandler = logging.handlers.RotatingFileHandler(logfilename, 10240, 5)
filehandler.setFormatter(formatter)
filehandler.setLevel(loglevel)
log.setLevel(loglevel)
log.addHandler(consolehandler)
log.addHandler(filehandler)
""" Main configuration """
def config_module():
config = {}
config['source_path'] = '/usr/otrkey/'
config['loglevel'] = safe_cast(os.environ.get('LOG_LEVEL'), str, 'INFO')
config['otrdecoder_executable'] = 'otrdecoder'
config['otr_user'] = safe_cast(os.environ.get('OTR_USER'), str, 'x@y.z')
config['otr_pass'] = safe_cast(os.environ.get('OTR_PASS'), str, 'supersecret')
config['waitseconds'] = safe_cast(os.environ.get('DECODE_INTERVAL'),int, 3600)
config['use_subfolders'] = safe_cast(os.environ.get('USE_SUBFOLDERS'), bool, False)
config['use_cutlists'] = safe_cast(os.environ.get('USE_CUTLIST'), bool, False)
config['temp_path'] = '/tmp/'
config['ftp_user'] = safe_cast(os.environ.get('FTP_USER'), str, 'x@y.z')
config['ftp_pass'] = safe_cast(os.environ.get('FTP_PASS'), str, 'supersecret')
config['ftp_server'] = safe_cast(os.environ.get('FTP_SERVER'), str, 'ftp.something.com')
config['ftp_port'] = safe_cast(os.environ.get('FTP_PORT'), int, 21)
config['ftp_path'] = safe_cast(os.environ.get('FTP_PATH'), str, '/')
return config
""" class otrkey logic """
class otrkey():
""" class to handle otrkey files """
def get_cutlist(self):
log.debug('retrieve cutlist for {} file!'.format(self.source_file))
try:
cutlist_file = None
""" Is cutlist already in tmp folder ? """
pattern = str(self.video_file).split('_TVOON_')[0] + '*.cutlist'
match = fnmatch.filter(os.listdir(self.temp_path), pattern)
for file in match:
cutlist_file = os.path.join(self.temp_path, file)
log.info('Already existing cutlist: {}'.format(cutlist_file))
break
if cutlist_file is None:
""" download list of cutlists into string """
url = 'http://www.onlinetvrecorder.com/getcutlistini.php?filename=' + self.source_file
response = urllib.request.urlopen(url)
content = str(response.read().decode('utf-8', 'ignore'))
""" parse list of cutlists """
cutlists = configparser.ConfigParser(strict=False, allow_no_value=True)
cutlists.read_string(content)
""" download the first cutlist to file in /tmp """
if cutlists.has_option('FILE1','filename'):
curlist_url = cutlists.get('FILE1','filename')
cutlist_file = os.path.join(self.temp_path, os.path.basename(curlist_url))
urllib.request.urlretrieve(curlist_url, cutlist_file)
""" success """
log.info('downloaded cutlist to {}...'.format(cutlist_file))
else:
log.info('no cutlist for {} file!'.format(self.source_file))
return cutlist_file
except:
log.exception('Exception Traceback:')
return None
def cwd_subfolder(self, ftp):
""" change ftp folder to an subfolder if exists """
log.debug('change ftp folder to an subfolder if exists...'.format(self.source_file))
try:
""" retrieve directories in ftp folder """
items = []
ftp.retrlines('LIST', items.append )
items = map( str.split, items )
dirlist = [ item.pop() for item in items if item[0][0] == 'd' ]
fileparts = self.source_file.split('_')
if ('_' + fileparts[0] in dirlist):
ftp.cwd('_' + fileparts[0])
return True
if (fileparts[0] in dirlist):
ftp.cwd(fileparts[0])
return True
if self.source_file[0] in ['0', '1', '2', '3','4','5','6','7','8','9']:
subdir = '_1-9'
elif self.source_file[0].upper() in ['I', 'J']:
subdir = '_I-J'
elif self.source_file[0].upper() in ['N', 'O']:
subdir = '_N-O'
elif self.source_file[0].upper() in ['P', 'Q']:
subdir = '_P-Q'
elif self.source_file[0].upper() in ['U', 'V', 'W', 'X', 'Y', 'Z']:
subdir = '_U-Z'
else:
subdir = '_' + self.source_file[0].upper()
if (subdir not in dirlist):
ftp.mkd(subdir)
log.debug("folder does not exitst, ftp.mkd: " + self.video_subfolder)
ftp.cwd(subdir)
return True
except:
log.exception('Exception Traceback:')
return False
def decode(self):
""" decode file ------------------------------------------------------------"""
if not self.decoded:
log.debug('try to decode {} with cutlist {!s}'.format(self.source_fullpath, self.cutlist_fullpath))
try:
if os.path.exists(self.video_temp_fullpath):
log.info('Already decoded in former session: {!s}.'.format(self.video_temp_fullpath))
self.decoded = True
else:
call = self.otrdecoder_executable + ' -i ' + self.source_fullpath + ' -o ' + self.temp_path + ' -e ' + self.otr_user + ' -p ' + self.otr_pass + ' -f'
if self.use_cutlists:
self.cutlist_fullpath = self.get_cutlist()
if not self.cutlist_fullpath is None:
call = call + ' -C ' + self.cutlist_fullpath
log.debug('decode call: {} !'.format(call))
process = subprocess.Popen(call, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
""" decoding successful ? """
if process.returncode != 0:
log.error('decoding failed with code {!s} and output {!s}'.format(process.returncode, process.stderr.read()))
else:
log.info('Decoding succesfull with returncode {!s}.'.format(process.returncode))
self.decoded = True
except:
log.exception('Exception Traceback:')
def move(self):
""" move decoded videofile to ftp destination """
if not self.moved and self.decoded:
log.debug('try to move {} to ftp.//{}'.format(self.video_temp_fullpath, self.ftp_server))
try:
""" login to ftp server """
ftp = ftplib.FTP()
if self.loglevel == 'DEBUG':
ftp.set_debuglevel = 2
else:
ftp.set_debuglevel = 1
ftp.connect(self.ftp_server, self.ftp_port)
ftp.login(user=self.ftp_user, passwd=self.ftp_pass)
""" check fpt_path exist ? """
ftp.cwd(self.ftp_path)
""" make subfolder if not exists """
if self.cwd_subfolder(ftp):
""" move file """
ftp.storbinary('STOR ' + self.video_file, open(self.video_temp_fullpath, 'rb'))
self.moved = True
log.info('{} successfully moved to ftp {}'.format(self.video_file, self.ftp_server))
""" logout ftp session """
ftp.quit()
except ftplib.all_errors as e:
log.error('Error in ftp session ({!s}:{!s}) = {!s}'.format(self.ftp_server, self.ftp_port, e))
def __init__(self, otrkey_file, data):
""" parse data dictionary into instance var """
for key, value in data.items():
if (not key in vars(self)):
setattr(self, key, value)
""" initiate instance members """
self.source_file = otrkey_file
self.source_fullpath = os.path.join(self.source_path, self.source_file)
self.cutlist_fullpath = None
self.video_file = os.path.splitext(os.path.basename(self.source_file))[0]
self.video_temp_fullpath = os.path.join(self.temp_path, self.video_file)
self.decoded = False
self.moved = False
log.info('operate {}'.format(self.video_file))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
""" clean up files """
if self.moved:
log.debug('try cleanup {}'.format(self.source_file))
try:
pattern = str(self.video_file).split('_TVOON_')[0] + '*.cutlist'
match = fnmatch.filter(os.listdir(self.temp_path), pattern)
for file in match:
os.remove(os.path.join(self.temp_path, file))
if not self.video_temp_fullpath is None:
if os.path.exists(self.video_temp_fullpath):
os.remove(self.video_temp_fullpath)
if not self.source_fullpath is None:
if os.path.exists(self.source_fullpath):
os.remove(self.source_fullpath)
log.info('cleanup successful for {}'.format(self.source_file))
except:
log.exception('exception on {!s}'.format(__name__))
""" Main """
def main():
""" configuration """
config = config_module()
config_logger(log, config['loglevel'])
nextrun = datetime.utcnow()
log.info('otrkey decoder start main....')
""" log configuration in debug mode """
if config['loglevel'] == 'DEBUG':
for key, value in config.items():
log.debug('otrkeydecoder configuration: {} = {!s}'.format(key, value))
""" run until stopsignal """
while not stopsignal:
if (datetime.utcnow() >= nextrun):
""" loop all *.otrkey files in sourcefolder/volume """
log.info('run {!s}'.format(__name__))
for file in os.listdir(config['source_path']):
if file.endswith(".otrkey"):
log.info('try...{!s}'.format(file))
with otrkey(file, config) as otrkey_file:
otrkey_file.decode()
otrkey_file.move()
nextrun = datetime.utcnow() + timedelta(seconds=config['waitseconds'])
log.info('next runtime in {!s} seconds at {!s}'.format(config['waitseconds'], nextrun))
""" goodby """
log.info('otrkey decoder main terminated. Goodby!')
""" run main if not imported """
if __name__ == '__main__':
main()
| 36.470588
| 169
| 0.539758
|
1dd31c50d9e2c83448fd6c432382999961fc350f
| 11,900
|
py
|
Python
|
src/the_tale/the_tale/game/heroes/tests/test_requests.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/heroes/tests/test_requests.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/heroes/tests/test_requests.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class HeroRequestsTestBase(utils_testcase.TestCase):
def setUp(self):
super(HeroRequestsTestBase, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account)
self.hero = self.storage.accounts_to_heroes[self.account.id]
self.request_login(self.account.email)
class HeroIndexRequestsTests(HeroRequestsTestBase):
def test_index(self):
chronicle_tt_services.chronicle.cmd_debug_clear_service()
response = self.request_html(utils_urls.url('game:heroes:'))
self.assertRedirects(response, '/', status_code=302, target_status_code=200)
class MyHeroRequestsTests(HeroRequestsTestBase):
def test_unloginned(self):
self.request_logout()
request_url = utils_urls.url('game:heroes:my-hero')
self.check_redirect(request_url, accounts_logic.login_page_url(request_url))
def test_redirect(self):
self.check_redirect(utils_urls.url('game:heroes:my-hero'), utils_urls.url('game:heroes:show', self.hero.id))
class HeroPageRequestsTests(HeroRequestsTestBase):
def setUp(self):
super(HeroPageRequestsTests, self).setUp()
def test_wrong_hero_id(self):
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', 'dsdsd')), texts=[('heroes.hero.wrong_format', 1)])
def test_own_hero_page(self):
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', self.hero.id)),
texts=(('pgf-health-percents', 2),
('pgf-experience-percents', 2),
('pgf-physic-power value', 1),
('pgf-magic-power value', 1),
('pgf-money', 1),
('"pgf-health"', 2),
('pgf-max-health', 2),
('pgf-choose-ability-button', 2),
('pgf-free-destiny-points', 7),
('pgf-no-destiny-points', 2),
('pgf-settings-container', 2),
('pgf-settings-tab-button', 2),
('pgf-moderation-container', 0),
'pgf-no-folclor'))
def test_other_hero_page(self):
texts = (('pgf-health-percents', 2),
('pgf-experience-percents', 0),
('pgf-physic-power value', 1),
('pgf-magic-power value', 1),
('pgf-money', 1),
('"pgf-health"', 2),
('pgf-max-health', 2),
('pgf-choose-ability-button', 0),
('pgf-no-destiny-points', 0),
('pgf-free-destiny-points', 1),
('pgf-settings-container', 0),
('pgf-settings-tab-button', 1),
('pgf-moderation-container', 0),
'pgf-no-folclor')
self.request_logout()
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', self.hero.id)), texts=texts)
account_2 = self.accounts_factory.create_account()
self.request_login(account_2.email)
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', self.hero.id)), texts=texts)
def test_folclor(self):
blogs_helpers.prepair_forum()
blogs_helpers.create_post_for_meta_object(self.accounts_factory.create_account(), 'folclor-1-caption', 'folclor-1-text',
meta_relations.Hero.create_from_object(self.hero))
blogs_helpers.create_post_for_meta_object(self.accounts_factory.create_account(), 'folclor-2-caption', 'folclor-2-text',
meta_relations.Hero.create_from_object(self.hero))
blogs_helpers.create_post_for_meta_object(self.accounts_factory.create_account(), 'folclor-3-caption', 'folclor-3-text',
meta_relations.Hero.create_from_object(self.hero))
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', self.hero.id)), texts=(('pgf-no-folclor', 0), 'folclor-1-caption', 'folclor-2-caption', 'folclor-3-caption'))
def test_moderation_tab(self):
account_2 = self.accounts_factory.create_account()
self.request_login(account_2.email)
group = utils_permissions.sync_group('accounts moderators group', ['accounts.moderate_account'])
group.user_set.add(account_2._model)
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', self.hero.id)), texts=['pgf-moderation-container'])
class ChangeHeroRequestsTests(HeroRequestsTestBase):
def test_hero_page(self):
self.check_html_ok(self.request_html(utils_urls.url('game:heroes:show', self.hero.id)), texts=[jinja2.escape(self.hero.name)])
def get_post_data(self,
name='новое имя',
gender=game_relations.GENDER.MALE,
race=game_relations.RACE.DWARF,
description='some description'):
data = {'gender': gender,
'race': race,
'description': description}
data.update(linguistics_helpers.get_word_post_data(game_names.generator().get_test_name(name=name), prefix='name'))
return data
def test_chane_hero_ownership(self):
account_2 = self.accounts_factory.create_account()
self.request_logout()
self.request_login(account_2.email)
self.check_ajax_error(self.client.post(utils_urls.url('game:heroes:change-hero', self.hero.id), self.get_post_data()),
'heroes.not_owner')
def test_change_hero_form_errors(self):
self.check_ajax_error(self.client.post(utils_urls.url('game:heroes:change-hero', self.hero.id), {}),
'heroes.change_name.form_errors')
def test_change_hero(self):
self.assertEqual(PostponedTask.objects.all().count(), 0)
response = self.client.post(utils_urls.url('game:heroes:change-hero', self.hero.id), self.get_post_data())
self.assertEqual(PostponedTask.objects.all().count(), 1)
task = PostponedTaskPrototype._db_get_object(0)
self.check_ajax_processing(response, task.status_url)
self.assertEqual(task.internal_logic.name, game_names.generator().get_test_name(name='новое имя', properties=[utg_relations.NUMBER.SINGULAR]))
self.assertEqual(task.internal_logic.gender, game_relations.GENDER.MALE)
self.assertEqual(task.internal_logic.race, game_relations.RACE.DWARF)
self.assertEqual(logic.get_hero_description(self.hero.id), 'some description')
def test_change_hero__ban_forum(self):
self.account.ban_forum(1)
self.account.save()
logic.set_hero_description(hero_id=self.hero.id, text='old description')
with self.check_not_changed(PostponedTask.objects.all().count):
response = self.client.post(utils_urls.url('game:heroes:change-hero', self.hero.id), self.get_post_data())
self.check_ajax_error(response, 'common.ban_forum')
with self.check_delta(PostponedTask.objects.all().count, 1):
response = self.client.post(utils_urls.url('game:heroes:change-hero', self.hero.id),
self.get_post_data(description='old description'))
task = PostponedTaskPrototype._db_get_object(0)
self.check_ajax_processing(response, task.status_url)
self.assertEqual(task.internal_logic.name,
game_names.generator().get_test_name(name='новое имя', properties=[utg_relations.NUMBER.SINGULAR]))
self.assertEqual(task.internal_logic.gender, game_relations.GENDER.MALE)
self.assertEqual(task.internal_logic.race, game_relations.RACE.DWARF)
self.assertEqual(logic.get_hero_description(self.hero.id), 'old description')
class ResetNameRequestsTests(HeroRequestsTestBase):
def setUp(self):
super(ResetNameRequestsTests, self).setUp()
self.account_2 = self.accounts_factory.create_account()
group = utils_permissions.sync_group('accounts moderators group', ['accounts.moderate_account'])
group.user_set.add(self.account_2._model)
self.request_login(self.account_2.email)
def test_chane_hero_moderation(self):
self.request_logout()
self.request_login(self.account.email)
self.check_ajax_error(self.client.post(utils_urls.url('game:heroes:reset-name', self.hero.id)), 'heroes.moderator_rights_required')
def test_change_hero(self):
self.hero.set_utg_name(game_names.generator().get_test_name('x'))
logic.save_hero(self.hero)
self.assertEqual(PostponedTask.objects.all().count(), 0)
response = self.client.post(utils_urls.url('game:heroes:reset-name', self.hero.id))
self.assertEqual(PostponedTask.objects.all().count(), 1)
task = PostponedTaskPrototype._db_get_object(0)
self.check_ajax_processing(response, task.status_url)
self.assertNotEqual(task.internal_logic.name, self.hero.name)
self.assertEqual(task.internal_logic.gender, self.hero.gender)
self.assertEqual(task.internal_logic.race, self.hero.race)
class ResetDescriptionRequestsTests(HeroRequestsTestBase):
def setUp(self):
super().setUp()
self.account_2 = self.accounts_factory.create_account()
group = utils_permissions.sync_group('accounts moderators group', ['accounts.moderate_account'])
group.user_set.add(self.account_2._model)
self.request_login(self.account_2.email)
def test_chane_hero_moderation(self):
self.request_logout()
self.request_login(self.account.email)
logic.set_hero_description(hero_id=self.hero.id, text='test-description')
self.check_ajax_error(self.client.post(utils_urls.url('game:heroes:reset-description', self.hero.id)),
'heroes.moderator_rights_required')
self.assertEqual(logic.get_hero_description(hero_id=self.hero.id), 'test-description')
def test_change_hero(self):
logic.set_hero_description(hero_id=self.hero.id, text='test-description')
self.check_ajax_ok(self.client.post(utils_urls.url('game:heroes:reset-description', self.hero.id)))
self.assertEqual(logic.get_hero_description(hero_id=self.hero.id), '')
class ForceSaveRequestsTests(HeroRequestsTestBase):
def setUp(self):
super(ForceSaveRequestsTests, self).setUp()
self.account_2 = self.accounts_factory.create_account()
group = utils_permissions.sync_group('accounts moderators group', ['accounts.moderate_account'])
group.user_set.add(self.account_2._model)
self.request_login(self.account_2.email)
def test_no_moderation_rights(self):
self.request_logout()
self.request_login(self.account.email)
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_force_save') as cmd_force_save:
self.check_ajax_error(self.client.post(utils_urls.url('game:heroes:force-save', self.hero.id)), 'heroes.moderator_rights_required')
self.assertEqual(cmd_force_save.call_args_list, [])
def test_force_save(self):
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_force_save') as cmd_force_save:
self.check_ajax_ok(self.client.post(utils_urls.url('game:heroes:force-save', self.hero.id)))
self.assertEqual(cmd_force_save.call_args_list, [mock.call(account_id=self.hero.account_id)])
| 43.589744
| 189
| 0.655798
|
f48ce1439d4087548a28d137d609190dba08449b
| 5,467
|
py
|
Python
|
buildscripts/BuildRoot/Configs.py
|
YuanYuLin/iopcbuilder
|
19537c9d651a7cd42432b9f6f6654c1ddeb0dbf0
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/BuildRoot/Configs.py
|
YuanYuLin/iopcbuilder
|
19537c9d651a7cd42432b9f6f6654c1ddeb0dbf0
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/BuildRoot/Configs.py
|
YuanYuLin/iopcbuilder
|
19537c9d651a7cd42432b9f6f6654c1ddeb0dbf0
|
[
"Apache-2.0"
] | null | null | null |
import os
import ConfigParser
from Singleton import Singleton
class Configs(Singleton):
flags = {
"IS_DEFAULT_ROOT_OVERRIDE" :True,
}
dicts = {
"BUILDROOT_TOP" :"",
"BUILDSCRIPT" :"",
"PROJECT_TOP" :"",
"OUTPUT_TOP" :"",
"OUTPUT_LINK" :"",
"PROJECT_CONFIG" :"",
"DEFAULT_CONFIG_BUILDROOT" :"",
"DEFAULT_CONFIG_BUSYBOX" :"",
"DEFAULT_CONFIG_LINUX" :"",
"DEFAULT_ROOTFS" :{},
"ROOTFS_SKELETON" :"",
"PROJECT_NAME" :"",
"DOWNLOAD_TOP" :"",
"OUTPUT_NAME" :"",
"METHOD_TABLE" :{},
}
'''
Begin
'''
def get_buildroot_top(self):
return self.dicts["BUILDROOT_TOP"]
def get_buildscript(self):
return self.dicts["BUILDSCRIPT"]
def get_project_top(self):
return self.dicts["PROJECT_TOP"]
def get_output_top(self):
return self.dicts["OUTPUT_TOP"]
def get_output_link(self):
return self.dicts["OUTPUT_LINK"]
def get_project_config(self):
return self.dicts["PROJECT_CONFIG"]
def get_project_name(self):
return self.dicts["PROJECT_NAME"]
def get_download_top(self):
return self.dicts["DOWNLOAD_TOP"]
def get_default_config_buildroot(self):
return self.dicts["DEFAULT_CONFIG_BUILDROOT"]
def get_default_config_busybox(self):
return self.dicts["DEFAULT_CONFIG_BUSYBOX"]
def get_default_config_linux(self):
return self.dicts["DEFAULT_CONFIG_LINUX"]
def get_default_rootfs(self):
return self.dicts["DEFAULT_ROOTFS"]
def get_rootfs_skeleton(self):
return self.dicts["ROOTFS_SKELETON"]
def is_default_rootfs_override(self):
return self.flags["IS_DEFAULT_ROOT_OVERRIDE"]
def get_method_table(self):
return self.dicts["METHOD_TABLE"]
def get_output_name(self):
return self.dicts["OUTPUT_NAME"]
'''
End
'''
def __default_rootfs_init(self, iniparser):
rootfs_list = {}
try:
''' Section DEFAULT_ROOTFS_OVERRIDE '''
rootfs_list = iniparser.items("DEFAULT_ROOTFS_OVERRIDE")
self.flags["IS_DEFAULT_ROOT_OVERRIDE"] = True
except ConfigParser.Error, e:
self.flags["IS_DEFAULT_ROOT_OVERRIDE"] = False
try:
''' Section DEFAULT_ROOTFS '''
rootfs_list = iniparser.items("DEFAULT_ROOTFS_OVERLAY")
self.flags["IS_DEFAULT_ROOT_OVERRIDE"] = False
except ConfigParser.Error, e:
self.flags["IS_DEFAULT_ROOT_OVERRIDE"] = True
return rootfs_list
def _constructor(self, buildscript, buildroot_top, project_config, output_name):
buildroot_folder = os.path.abspath(buildroot_top + os.sep + "..") + os.sep
self.dicts["OUTPUT_NAME"] = output_name
self.dicts["BUILDSCRIPT"] = os.path.abspath(buildscript)
self.dicts["BUILDROOT_TOP"] = os.path.abspath(buildroot_top)
self.dicts["PROJECT_CONFIG"] = os.path.abspath(project_config)
self.dicts["PROJECT_TOP"] = os.path.abspath(os.path.dirname(project_config))
self.dicts["OUTPUT_TOP"] = os.path.abspath(buildroot_top + os.sep + output_name)
self.dicts["OUTPUT_LINK"] = os.path.abspath(buildroot_folder + os.sep + ".." + os.sep + output_name)
try:
project_top = self.dicts["PROJECT_TOP"] + os.sep
iniparser = ConfigParser.ConfigParser()
iniparser.read(project_config)
''' Section PROJECT_INFO '''
self.dicts["PROJECT_NAME"] = iniparser.get("PROJECT_INFO", "PROJECT_NAME")
self.dicts["DOWNLOAD_TOP"] = buildroot_folder + iniparser.get("PROJECT_INFO", "DOWNLOAD_TOP")
''' Section DEFAULT_CONFIGS '''
self.dicts["DEFAULT_CONFIG_BUILDROOT"] = project_top + iniparser.get("DEFAULT_CONFIGS", "DEFAULT_CONFIG_BUILDROOT")
self.dicts["DEFAULT_CONFIG_BUSYBOX"] = project_top + iniparser.get("DEFAULT_CONFIGS", "DEFAULT_CONFIG_BUSYBOX")
self.dicts["DEFAULT_CONFIG_LINUX"] = project_top + iniparser.get("DEFAULT_CONFIGS", "DEFAULT_CONFIG_LINUX")
''' Set DEFAULT_ROOTFS'''
rootfs_list = {}
for root in self.__default_rootfs_init(iniparser):
name = root[0].upper()
value = root[1]
rootfs_list[name] = project_top + value
self.dicts["DEFAULT_ROOTFS"] = rootfs_list
self.dicts["ROOTFS_SKELETON"] = self.dicts["OUTPUT_TOP"] + os.sep + "rootfs_skeleton"
''' Section METHOD_TABLE '''
self.dicts["METHOD_TABLE"] = iniparser.items("METHOD_TABLE")
supported_method_in_project = {}
for method in self.dicts["METHOD_TABLE"]:
name = method[0].upper()
value = method[1]
if value == 'YES':
supported_method_in_project[name] = value
self.dicts["METHOD_TABLE"] = supported_method_in_project
except ConfigParser.Error, e:
print e
def __init__(self, buildscript, buildroot_top, project_config, output_name):
self._constructor(buildscript, buildroot_top, project_config, output_name)
def get_configs(self):
return self.dicts
def get_config(self, key):
return self.dicts[key]
def get_config_keys(self):
return self.dicts.keys()
| 33.746914
| 127
| 0.622279
|
9d63951b2fe40b69472b2f6ad33c116fbcc2d8a4
| 36
|
py
|
Python
|
PyYouTube/__init__.py
|
Soebb/PyYouTube
|
125b9f63b677eeece9d95790505a3818ed26c94d
|
[
"MIT"
] | 1
|
2021-08-09T11:44:34.000Z
|
2021-08-09T11:44:34.000Z
|
PyYouTube/__init__.py
|
Soebb/PyYouTube
|
125b9f63b677eeece9d95790505a3818ed26c94d
|
[
"MIT"
] | null | null | null |
PyYouTube/__init__.py
|
Soebb/PyYouTube
|
125b9f63b677eeece9d95790505a3818ed26c94d
|
[
"MIT"
] | null | null | null |
from .pyyoutube import Data ,Search
| 18
| 35
| 0.805556
|
1388c2d3b9334e24c2029538e15c1d2acca04585
| 114
|
py
|
Python
|
variables.py
|
P33ry/mudpi-core
|
28cc009b368c230f6062382085db5a1aee65b430
|
[
"MIT"
] | null | null | null |
variables.py
|
P33ry/mudpi-core
|
28cc009b368c230f6062382085db5a1aee65b430
|
[
"MIT"
] | 1
|
2020-11-10T12:53:04.000Z
|
2020-11-10T12:53:04.000Z
|
variables.py
|
P33ry/mudpi-core
|
28cc009b368c230f6062382085db5a1aee65b430
|
[
"MIT"
] | 1
|
2020-11-06T07:28:15.000Z
|
2020-11-06T07:28:15.000Z
|
PREVIOUS_LINE="\x1b[1F"
RED_BACK="\x1b[41;37m"
GREEN_BACK="\x1b[42;30m"
YELLOW_BACK="\x1b[43;30m"
RESET="\x1b[0m"
| 19
| 25
| 0.710526
|
cb773caf56a1b53ea22a696af8fa31ea476cf630
| 13,444
|
py
|
Python
|
rl_games/algos_torch/models.py
|
icleen/rl_games
|
b9a54548eead17387c396ccfcb84eb98b65be372
|
[
"MIT"
] | null | null | null |
rl_games/algos_torch/models.py
|
icleen/rl_games
|
b9a54548eead17387c396ccfcb84eb98b65be372
|
[
"MIT"
] | null | null | null |
rl_games/algos_torch/models.py
|
icleen/rl_games
|
b9a54548eead17387c396ccfcb84eb98b65be372
|
[
"MIT"
] | null | null | null |
import rl_games.algos_torch.layers
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
import rl_games.common.divergence as divergence
from rl_games.algos_torch.torch_ext import CategoricalMasked
from torch.distributions import Categorical
from rl_games.algos_torch.sac_helper import SquashedNormal
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
class BaseModel():
def __init__(self, model_class):
self.model_class = model_class
def is_rnn(self):
return False
def is_separate_critic(self):
return False
def build(self, config):
obs_shape = config['input_shape']
normalize_value = config.get('normalize_value', False)
normalize_input = config.get('normalize_input', False)
value_size = config.get('value_size', 1)
return self.Network(self.network_builder.build(self.model_class, **config), obs_shape=obs_shape,
normalize_value=normalize_value, normalize_input=normalize_input, value_size=value_size)
class BaseModelNetwork(nn.Module):
def __init__(self, obs_shape, normalize_value, normalize_input, value_size):
nn.Module.__init__(self)
self.obs_shape = obs_shape
self.normalize_value = normalize_value
self.normalize_input = normalize_input
self.value_size = value_size
if normalize_value:
self.value_mean_std = RunningMeanStd((self.value_size,))
if normalize_input:
if isinstance(obs_shape, dict):
self.running_mean_std = RunningMeanStdObs(obs_shape)
else:
self.running_mean_std = RunningMeanStd(obs_shape)
def norm_obs(self, observation):
return self.running_mean_std(observation) if self.normalize_input else observation
def unnorm_value(self, value):
return self.value_mean_std(value, unnorm=True) if self.normalize_value else value
class ModelA2C(BaseModel):
def __init__(self, network):
BaseModel.__init__(self, 'a2c')
self.network_builder = network
class Network(BaseModelNetwork):
def __init__(self, a2c_network, **kwargs):
BaseModelNetwork.__init__(self,**kwargs)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
input_dict['obs'] = self.norm_obs(input_dict['obs'])
logits, value, states = self.a2c_network(input_dict)
if is_train:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
prev_neglogp = -categorical.log_prob(prev_actions)
entropy = categorical.entropy()
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : categorical.logits,
'values' : value,
'entropy' : entropy,
'rnn_states' : states
}
return result
else:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
selected_action = categorical.sample().long()
neglogp = -categorical.log_prob(selected_action)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : self.unnorm_value(value),
'actions' : selected_action,
'logits' : categorical.logits,
'rnn_states' : states
}
return result
class ModelA2CMultiDiscrete(BaseModel):
def __init__(self, network):
BaseModel.__init__(self, 'a2c')
self.network_builder = network
class Network(BaseModelNetwork):
def __init__(self, a2c_network, **kwargs):
BaseModelNetwork.__init__(self, **kwargs)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete_list(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
input_dict['obs'] = self.norm_obs(input_dict['obs'])
logits, value, states = self.a2c_network(input_dict)
if is_train:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
prev_actions = torch.split(prev_actions, 1, dim=-1)
prev_neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, prev_actions)]
prev_neglogp = torch.stack(prev_neglogp, dim=-1).sum(dim=-1)
entropy = [c.entropy() for c in categorical]
entropy = torch.stack(entropy, dim=-1).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : [c.logits for c in categorical],
'values' : value,
'entropy' : torch.squeeze(entropy),
'rnn_states' : states
}
return result
else:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
selected_action = [c.sample().long() for c in categorical]
neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, selected_action)]
selected_action = torch.stack(selected_action, dim=-1)
neglogp = torch.stack(neglogp, dim=-1).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : self.unnorm_value(value),
'actions' : selected_action,
'logits' : [c.logits for c in categorical],
'rnn_states' : states
}
return result
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self, 'a2c')
self.network_builder = network
class Network(BaseModelNetwork):
def __init__(self, a2c_network, **kwargs):
BaseModelNetwork.__init__(self, **kwargs)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['mu'], p_dict['sigma']
q = q_dict['mu'], q_dict['sigma']
return divergence.d_kl_normal(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
input_dict['obs'] = self.norm_obs(input_dict['obs'])
mu, sigma, value, states = self.a2c_network(input_dict)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = -distr.log_prob(prev_actions).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'value' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample().squeeze()
neglogp = -distr.log_prob(selected_action).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : self.unnorm_value(value),
'actions' : selected_action,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
BaseModel.__init__(self, 'a2c')
self.network_builder = network
class Network(BaseModelNetwork):
def __init__(self, a2c_network, **kwargs):
BaseModelNetwork.__init__(self, **kwargs)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def freeze_actor(self):
return self.a2c_network.freeze_actor()
def unfreeze_actor(self):
return self.a2c_network.unfreeze_actor()
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
input_dict['obs'] = self.norm_obs(input_dict['obs'])
mu, logstd, value, states = self.a2c_network(input_dict)
sigma = torch.exp(logstd) # subtract 5 or 10 to reduce the value without zeroing out in other places
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = self.neglogp(prev_actions, mu, sigma, logstd)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'values' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample()
neglogp = self.neglogp(selected_action, mu, sigma, logstd)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : self.unnorm_value(value),
'actions' : selected_action,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
def neglogp(self, x, mean, std, logstd):
return 0.5 * (((x - mean) / std)**2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ logstd.sum(dim=-1)
class ModelCentralValue(BaseModel):
def __init__(self, network):
BaseModel.__init__(self, 'a2c')
self.network_builder = network
class Network(BaseModelNetwork):
def __init__(self, a2c_network, **kwargs):
BaseModelNetwork.__init__(self, **kwargs)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
return None # or throw exception?
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
input_dict['obs'] = self.norm_obs(input_dict['obs'])
value, states = self.a2c_network(input_dict)
if not is_train:
value = self.unnorm_value(value)
result = {
'values': value,
'rnn_states': states
}
return result
class ModelSACContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self, 'sac')
self.network_builder = network
class Network(BaseModelNetwork):
def __init__(self, sac_network,**kwargs):
BaseModelNetwork.__init__(self,**kwargs)
self.sac_network = sac_network
def critic(self, obs, action):
return self.sac_network.critic(obs, action)
def critic_target(self, obs, action):
return self.sac_network.critic_target(obs, action)
def actor(self, obs):
return self.sac_network.actor(obs)
def is_rnn(self):
return False
def forward(self, input_dict):
is_train = input_dict.pop('is_train', True)
mu, sigma = self.sac_network(input_dict)
dist = SquashedNormal(mu, sigma)
return dist
| 38.632184
| 124
| 0.570738
|
d74d0d7684292e26585f680a6efa79402f16d952
| 2,100
|
py
|
Python
|
potado/tests/unit_test_default_schedule_builder.py
|
pikesley/potado
|
ce3f908f764d798253709053abec08100ef240b7
|
[
"MIT"
] | null | null | null |
potado/tests/unit_test_default_schedule_builder.py
|
pikesley/potado
|
ce3f908f764d798253709053abec08100ef240b7
|
[
"MIT"
] | null | null | null |
potado/tests/unit_test_default_schedule_builder.py
|
pikesley/potado
|
ce3f908f764d798253709053abec08100ef240b7
|
[
"MIT"
] | null | null | null |
import json
import logging
from mock import MagicMock, mock_open, patch
from lib.client import TadoClient
from lib.default_schedule_builder import DefaultScheduleBuilder
class TestDefaultScheduleBuilder:
"""Test the DefaultScheduleBuilder."""
def setup_method(self):
"""Do some initialisation."""
fixture = json.loads(open("tests/fixtures/api-data/zones.json").read())
TadoClient.zones = MagicMock(return_value=fixture)
def test_data(self):
"""Test it has the right data."""
logging.Logger.info = MagicMock()
fixture_data = open("tests/fixtures/conf/credentials.yaml").read()
with patch("builtins.open", mock_open(read_data=fixture_data)):
builder = DefaultScheduleBuilder()
assert builder.zone_names == ["Heating", "Bedroom", "Kitchen"]
assert builder.zones[1] == {
"zone": "Bedroom",
"schedule": [
{"days": "all", "periods": [{"start": "07:00", "end": "23:00"}]}
],
}
def test_writing(self):
"""Test it writes good yaml."""
logging.Logger.info = MagicMock()
fixture_data = open("tests/fixtures/conf/credentials.yaml").read()
with patch("builtins.open", mock_open(read_data=fixture_data)):
builder = DefaultScheduleBuilder()
builder.yamlise("/tmp/schedule.yaml")
output = open("/tmp/schedule.yaml").read()
assert output.split("\n") == [
"- zone: Heating",
" schedule:",
" - days: all",
" periods:",
" - start: '07:00'",
" end: '23:00'",
"- zone: Bedroom",
" schedule:",
" - days: all",
" periods:",
" - start: '07:00'",
" end: '23:00'",
"- zone: Kitchen",
" schedule:",
" - days: all",
" periods:",
" - start: '07:00'",
" end: '23:00'",
"",
"",
]
| 32.8125
| 84
| 0.509524
|
1e3747e351073704f390308b4aa8b1ef4d41e227
| 7,228
|
py
|
Python
|
app/main/routes.py
|
BolajiOlajide/BucketListAPI
|
8b6eb06f4f6c3345e58c7218827b6309b55905b8
|
[
"MIT"
] | 3
|
2018-05-07T11:39:16.000Z
|
2019-04-26T13:32:39.000Z
|
app/main/routes.py
|
BolajiOlajide/BucketListAPI
|
8b6eb06f4f6c3345e58c7218827b6309b55905b8
|
[
"MIT"
] | null | null | null |
app/main/routes.py
|
BolajiOlajide/BucketListAPI
|
8b6eb06f4f6c3345e58c7218827b6309b55905b8
|
[
"MIT"
] | null | null | null |
"""
This script contains the routes for the different API methods.
This handles the overall routing of the application.
"""
from flask import g, jsonify, request
from flask_cors import cross_origin
from . import main
from app import db, errors
from app.auth.routes import auth
from app.decorators import json, paginate
from app.models import BucketList, Items
import sqlalchemy
@main.route('/bucketlists/', methods=['GET', 'OPTIONS'])
@cross_origin()
@auth.login_required
@paginate()
def get_bucketlists():
"""
List all the created BucketLists.
Displays a json of all the created BucketLists and the various items
associated with them.
"""
if request.args.get('q'):
return BucketList.query.filter_by(created_by=g.user.user_id).filter(
BucketList.name.contains(request.args.get('q')))
else:
return BucketList.query.filter_by(created_by=g.user.user_id)
@main.route('/bucketlists/<int:list_id>', methods=['GET'])
@auth.login_required
@json
def get_bucketlist(list_id):
"""
Get single bucket list.
Return a json of all the information as regards a particular BucketList.
"""
bucketlist = BucketList.query.filter_by(bucketlist_id=list_id).first()
if not bucketlist or bucketlist.created_by != g.user.user_id:
return errors.not_found("The BucketList with the id: {0} doesn't"
" exist.".format(list_id))
return bucketlist, 200
@main.route('/bucketlists/', methods=['POST'], strict_slashes=False)
@auth.login_required
@json
def create_bucketlist():
"""
Create a new BucketList.
Add a new bucketlist and returns the bucketlist for the user to view
"""
if not request.json or 'name' not in request.json:
return errors.bad_request("Only JSON object is accepted. Please "
"confirm that the key 'name' exists.")
try:
bucketlist = BucketList(
name=request.json.get('name'),
created_by=g.user.user_id
)
bucketlist.save()
except (sqlalchemy.exc.IntegrityError, sqlalchemy.exc.InvalidRequestError):
db.session().rollback()
return errors.bad_request(
"A BucketList with the name {0} exists.".format(
request.json.get('name')))
return bucketlist, 201
@main.route('/bucketlists/<int:list_id>', methods=['PUT'])
@auth.login_required
@json
def update_bucketlist(list_id):
"""
Update BucketList.
Update a BucketList name.
"""
if not request.json:
return errors.bad_request("Invalid Input. Only JSON input is allowed.")
elif 'name' not in request.json:
return errors.bad_request("The key 'name' not in the JSON")
else:
bucketlist = BucketList.query.filter_by(bucketlist_id=list_id).first()
if not bucketlist or bucketlist.created_by != g.user.user_id:
return errors.not_found("The BucketList with the id: {0} doesn't"
" exist.".format(list_id))
else:
bucketlist.name = request.json.get('name')
bucketlist.save()
return bucketlist, 200
@main.route('/bucketlists/<int:list_id>', methods=['DELETE'])
@auth.login_required
def delete_bucketlist(list_id):
"""
Delete a BucketList.
Deletes a BucketList and all items associated with it.
"""
bucketlist = BucketList.query.filter_by(bucketlist_id=list_id).first()
if not bucketlist or bucketlist.created_by != g.user.user_id:
return errors.not_found("The BucketList with the id: {0} doesn't"
" exist.".format(list_id))
else:
bucketlist.delete()
return jsonify({'Delete': True}), 200
@main.route(
'/bucketlists/<int:list_id>/items/', methods=['POST'], strict_slashes=False
)
@auth.login_required
@json
def add_bucketlist_item(list_id):
"""
Add new item.
This function adds a new item to a BucketList. It gets the name and done
keys from the json supplied and saves to the database.
"""
bucketlist = BucketList.query.filter_by(bucketlist_id=list_id).first()
if not bucketlist or bucketlist.created_by != g.user.user_id:
return errors.not_found("The BucketList with the id: {0} doesn't"
" exist.".format(list_id))
if not request.json or (('name' or 'done') not in request.json):
return errors.bad_request("Only JSON object is accepted.Please confirm"
" that the key 'name' or 'done' exists.")
item = Items().from_json(request.json)
item.bucketlist_id = bucketlist.bucketlist_id
item.save()
return bucketlist, 201
@main.route(
'/bucketlists/<int:list_id>/items/<int:item_id>', methods=['PUT']
)
@auth.login_required
@json
def update_bucketlist_item(list_id, item_id):
"""
Update item.
This function updates an item in a BucketList.
"""
bucketlist = BucketList.query.filter_by(bucketlist_id=list_id).first()
if not bucketlist or bucketlist.created_by != g.user.user_id:
return errors.not_found("The BucketList with the id: {0} doesn't"
" exist.".format(list_id))
item = Items.query.get(item_id)
if not item or (item.bucketlist_id != bucketlist.bucketlist_id):
return errors.not_found("The item with the ID: {0} doesn't exist"
.format(item_id))
if not request.json:
return errors.bad_request("Invalid Input. Only JSON input is allowed.")
elif ('name' or 'done') not in request.json:
return errors.bad_request(
"The key 'name' or 'done' cannot be found in the JSON provided.")
else:
item.name = request.json.get('name')
item.done = request.json.get('done')
item.save()
return bucketlist, 200
@main.route(
'/bucketlists/<int:list_id>/items/<int:item_id>', methods=['DELETE']
)
@auth.login_required
@json
def delete_bucketlist_item(list_id, item_id):
"""
Delete an item.
This function deletes an item from a BucketList.
"""
bucketlist = BucketList.query.filter_by(bucketlist_id=list_id).first()
item = Items.query.filter_by(item_id=item_id).first()
if not bucketlist or bucketlist.created_by != g.user.user_id:
return errors.not_found("The BucketList with the id: {0} doesn't"
" exist.".format(list_id))
elif not item or (item.bucketlist_id != bucketlist.bucketlist_id):
return errors.not_found("The item with the ID: {0} doesn't exist"
.format(item_id))
else:
item.delete()
return bucketlist, 200
@main.route('/login', methods=["POST"], strict_slashes=False)
def login2():
"""
Secure redundant route.
Protect the route: /login from being accessed by the end user.
"""
return errors.not_found("The page doesn't exist")
@main.route('/register', methods=["POST"], strict_slashes=False)
def register_user2():
"""
Secure redundant route.
Protect the route: /register from being accessed by the end user.
"""
return errors.not_found("The Page doesn't exist")
| 31.982301
| 79
| 0.649696
|
c205a02f89e3b987fa9a055c0b6a84bdf7e49b9a
| 140
|
py
|
Python
|
sum_alternative.py
|
mlastovski/kse-sum
|
1094a1dd352d291d7549ef37b3356e65c77eacd7
|
[
"MIT"
] | null | null | null |
sum_alternative.py
|
mlastovski/kse-sum
|
1094a1dd352d291d7549ef37b3356e65c77eacd7
|
[
"MIT"
] | null | null | null |
sum_alternative.py
|
mlastovski/kse-sum
|
1094a1dd352d291d7549ef37b3356e65c77eacd7
|
[
"MIT"
] | null | null | null |
# final
def sum_of_args(args):
result = 0
for i in args:
result += i
return result
print(sum_of_args(args=[1,2,3,4]))
| 14
| 34
| 0.592857
|
2572f92bff70d6df27dbfcc1a8f361e602f653c4
| 89
|
py
|
Python
|
examples/apps/django_multi_apps/landing/apps.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 1,130
|
2019-11-23T09:53:37.000Z
|
2022-03-31T11:30:07.000Z
|
examples/apps/django_multi_apps/landing/apps.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 2,265
|
2019-11-20T17:09:09.000Z
|
2022-03-31T22:09:38.000Z
|
examples/apps/django_multi_apps/landing/apps.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 215
|
2019-11-26T11:49:04.000Z
|
2022-03-30T10:23:11.000Z
|
from django.apps import AppConfig
class LandingConfig(AppConfig):
name = 'landing'
| 14.833333
| 33
| 0.752809
|
46518735760cabd69b7950d794fe35472f513a5b
| 1,013
|
py
|
Python
|
Defense/SimpleTopology.py
|
rprabhuh/SDNDDoS
|
91a23c0b817999bce5b07359681c5b2dddf89b9d
|
[
"MIT"
] | 3
|
2016-10-11T06:46:32.000Z
|
2021-02-07T10:28:41.000Z
|
Defense/SimpleTopology.py
|
rprabhuh/SDNDDoS
|
91a23c0b817999bce5b07359681c5b2dddf89b9d
|
[
"MIT"
] | null | null | null |
Defense/SimpleTopology.py
|
rprabhuh/SDNDDoS
|
91a23c0b817999bce5b07359681c5b2dddf89b9d
|
[
"MIT"
] | 5
|
2015-09-03T10:38:15.000Z
|
2020-08-26T13:12:31.000Z
|
#!/usr/bin/env python
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.cli import CLI
from subprocess import call
net = Mininet()
#cli=CLI()
#Creating nodes in the network
c0=net.addController()
h0=net.addHost('h0')
s1=net.addSwitch('s1')
h1=net.addHost('h1')
h2=net.addHost('h2')
#Creating links between nodes in network (2-ways)
net.addLink(h0,s1)
net.addLink(h1,s1)
net.addLink(h2,s1)
net.start()
#Addition of the flow rule
print("Flow Rule Added")
call( 'ovs-ofctl add-flow s1 priority=10,action=normal', shell=True )
net.pingAll()
#To stop the flow from host 0 with ip 10.0.0.1
print("Stop the flow from host 0 with ip 10.0.0.1")
call( 'ovs-ofctl add-flow s1 priority=11,dl_type=0x0800,nw_src=10.0.0.1,action=drop', shell=True )
net.pingAll()
#To restore the flo back for host 0 after quarantine
print("Restore communication with the host 0")
call( 'ovs-ofctl --strict del-flows s1 priority=11,dl_type=0x0800,nw_src=10.0.0.1', shell=True )
net.pingAll()
CLI(net)
net.stop()
| 26.657895
| 99
| 0.737414
|
eb137d33b757c4b6b62c29fa9043ac0ad03c2aab
| 19,670
|
py
|
Python
|
src/robotide/editor/settingeditors.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robotide/editor/settingeditors.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robotide/editor/settingeditors.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
from robotide.controller.commands import (UpdateVariable, UpdateDocumentation,
SetValues, AddLibrary, AddResource, AddVariablesFileImport,
ClearSetting)
from robotide.editor.listeditor import ListEditorBase
from robotide.publish.messages import RideImportSetting, RideOpenVariableDialog, RideExecuteSpecXmlImport, RideSaving
from robotide.utils import overrides
from robotide.widgets import ButtonWithHandler, Label, HtmlWindow, PopupMenu, PopupMenuItems
from robotide.publish import PUBLISHER
from robotide import utils
from .formatters import ListToStringFormatter
from .gridcolorizer import ColorizationSettings
from .editordialogs import (EditorDialog, DocumentationDialog, MetadataDialog,
ScalarVariableDialog, ListVariableDialog, LibraryDialog,
ResourceDialog, VariablesDialog)
from .listeditor import ListEditor
from .popupwindow import HtmlPopupWindow, HtmlDialog
from .tags import TagsDisplay
class SettingEditor(wx.Panel, utils.RideEventHandler):
def __init__(self, parent, controller, plugin, tree):
wx.Panel.__init__(self, parent)
self._controller = controller
self.plugin = plugin
self._datafile = controller.datafile
self._create_controls()
self._tree = tree
self._editing = False
self.plugin.subscribe(self.update_value, RideImportSetting)
def _create_controls(self):
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add((5,0))
sizer.Add(Label(self, label=self._controller.label,
size=(context.SETTING_LABEL_WIDTH,
context.SETTING_ROW_HEIGTH)))
self._value_display = self._create_value_display()
self.update_value()
self._tooltip = self._get_tooltip()
sizer.Add(self._value_display, 1, wx.EXPAND)
self._add_edit(sizer)
sizer.Add(ButtonWithHandler(self, 'Clear'))
sizer.Layout()
self.SetSizer(sizer)
def _add_edit(self, sizer):
sizer.Add(ButtonWithHandler(self, 'Edit'), flag=wx.LEFT|wx.RIGHT, border=5)
def _create_value_display(self):
display = self._value_display_control()
display.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterWindow)
display.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
display.Bind(wx.EVT_MOTION, self.OnDisplayMotion)
return display
def _value_display_control(self):
ctrl = SettingValueDisplay(self)
ctrl.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
ctrl.Bind(wx.EVT_KEY_DOWN, self.OnKey)
return ctrl
def _get_tooltip(self):
return HtmlPopupWindow(self, (500, 350))
def OnKey(self, event):
self._tooltip.hide()
event.Skip()
def OnDisplayMotion(self, event):
self._tooltip.hide()
def refresh(self, controller):
self._controller = controller
self.update_value()
def refresh_datafile(self, item, event):
self._tree.refresh_datafile(item, event)
def OnEdit(self, event=None):
self._hide_tooltip()
self._editing = True
dlg = self._crete_editor_dialog()
if dlg.ShowModal() == wx.ID_OK:
self._set_value(dlg.get_value(), dlg.get_comment())
self._update_and_notify()
dlg.Destroy()
self._editing = False
def _crete_editor_dialog(self):
dlg_class = EditorDialog(self._controller)
return dlg_class(self._datafile, self._controller, self.plugin)
def _set_value(self, value_list, comment):
self._controller.execute(SetValues(value_list, comment))
def _hide_tooltip(self):
self._stop_popup_timer()
self._tooltip.hide()
def _stop_popup_timer(self):
if hasattr(self, 'popup_timer'):
self.popup_timer.Stop()
def OnEnterWindow(self, event):
if self._mainframe_has_focus():
self.popup_timer = wx.CallLater(500, self.OnPopupTimer)
def _mainframe_has_focus(self):
return wx.GetTopLevelParent(self.FindFocus()) == \
wx.GetTopLevelParent(self)
def OnLeaveWindow(self, event):
self._stop_popup_timer()
def OnPopupTimer(self, event):
if self.Parent.tooltip_allowed(self._tooltip):
details, title = self._get_details_for_tooltip()
if details:
self._tooltip.set_content(details, title)
self._tooltip.show_at(self._tooltip_position())
def _get_details_for_tooltip(self):
kw = self._controller.keyword_name
return self.plugin.get_keyword_details(kw), kw
def _tooltip_position(self):
ms = wx.GetMouseState()
# ensure that the popup gets focus immediately
return ms.x-3, ms.y-3
def OnLeftUp(self, event):
if event.ControlDown() or event.CmdDown():
self._navigate_to_user_keyword()
else:
if self._has_selected_area() and not self._editing:
wx.CallAfter(self.OnEdit, event)
event.Skip()
def _has_selected_area(self):
selection = self._value_display.GetSelection()
if selection is None:
return False
return selection[0] == selection[1]
def _navigate_to_user_keyword(self):
uk = self.plugin.get_user_keyword(self._controller.keyword_name)
if uk:
self._tree.select_user_keyword_node(uk)
def _update_and_notify(self):
self.update_value()
def OnClear(self, event):
self._controller.execute(ClearSetting())
self._update_and_notify()
def update_value(self, event=None):
if self._controller is None:
return
if self._controller.is_set:
self._value_display.set_value(self._controller, self.plugin)
else:
self._value_display.clear()
def get_selected_datafile_controller(self):
return self._controller.datafile_controller
def close(self):
self._controller = None
self.plugin.unsubscribe(self.update_value, RideImportSetting)
def highlight(self, text):
return self._value_display.highlight(text)
def clear_highlight(self):
return self._value_display.clear_highlight()
def contains(self, text):
return self._value_display.contains(text)
class SettingValueDisplay(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, size=(-1, context.SETTING_ROW_HEIGTH),
style=wx.TE_RICH|wx.TE_MULTILINE)
self.SetEditable(False)
self._colour_provider = ColorizationSettings(parent.plugin.global_settings)
self._empty_values()
def _empty_values(self):
self._value = None
self._is_user_keyword = False
def set_value(self, controller, plugin):
self._value = controller.display_value
self._keyword_name = controller.keyword_name
self._is_user_keyword = plugin.is_user_keyword(self._keyword_name)
self.SetValue(self._value)
self._colorize_data()
def _colorize_data(self, match=None):
self._colorize_background(match)
self._colorize_possible_user_keyword()
def _colorize_background(self, match=None):
self.SetBackgroundColour(self._get_background_colour(match))
def _get_background_colour(self, match=None):
if self._value is None:
return 'light grey'
if match is not None and self.contains(match):
return self._colour_provider.get_highlight_color()
return 'white'
def _colorize_possible_user_keyword(self):
if not self._is_user_keyword:
return
font = self.GetFont()
font.SetUnderlined(True)
self.SetStyle(0, len(self._keyword_name),
wx.TextAttr('blue', self._get_background_colour(), font))
def clear(self):
self.Clear()
self._empty_values()
self._colorize_background()
def contains(self, text):
if self._value is None:
return False
return [item for item in self._value.split(' | ') if utils.highlight_matcher(text, item)] != []
def highlight(self, text):
self._colorize_data(match=text)
def clear_highlight(self):
self._colorize_data()
class DocumentationEditor(SettingEditor):
def _value_display_control(self):
ctrl = HtmlWindow(self, (-1, 100))
ctrl.Bind(wx.EVT_LEFT_DOWN, self.OnEdit)
return ctrl
def update_value(self, event=None):
if self._controller:
self._value_display.SetPage(self._controller.visible_value)
def _get_tooltip(self):
return HtmlPopupWindow(self, (500, 350), detachable=False)
def _get_details_for_tooltip(self):
return self._controller.visible_value, None
def _crete_editor_dialog(self):
return DocumentationDialog(self._datafile,
self._controller.editable_value)
def _set_value(self, value_list, comment):
if value_list:
self._controller.execute(UpdateDocumentation(value_list[0]))
def contains(self, text):
return False
def highlight(self, text):
pass
def clear_highlight(self):
pass
class TagsEditor(SettingEditor):
def __init__(self, parent, controller, plugin, tree):
SettingEditor.__init__(self, parent, controller, plugin, tree)
self.plugin.subscribe(self._saving, RideSaving)
self.Bind(wx.EVT_SIZE, self.OnSize)
def _saving(self, message):
self._tags_display.saving()
def OnSize(self, event):
self.SetSizeHints(-1, max(self._tags_display.get_height(), 25))
event.Skip()
def _value_display_control(self):
self._tags_display = TagsDisplay(self, self._controller)
self._tags_display.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self._tags_display.Bind(wx.EVT_KEY_DOWN, self.OnKey)
return self._tags_display
def contains(self, text):
return False
def highlight(self, text):
pass
def clear_highlight(self):
pass
def close(self):
self._tags_display.close()
self.plugin.unsubscribe(self._saving, RideSaving)
SettingEditor.close(self)
class _AbstractListEditor(ListEditor):
def __init__(self, parent, tree, controller):
ListEditor.__init__(self, parent, self._titles, controller)
self._datafile = controller.datafile
self._tree = tree
def get_selected_datafile_controller(self):
return self._controller.datafile_controller
def refresh_datafile(self, item, event):
self._tree.refresh_datafile(item, event)
def update_data(self):
ListEditor.update_data(self)
def update_value(self):
pass
def close(self):
pass
def highlight(self, text, expand=False):
pass
class VariablesListEditor(_AbstractListEditor):
_titles = ['Variable', 'Value', 'Comment']
_buttons = ['Add Scalar', 'Add List']
def __init__(self, parent, tree, controller):
PUBLISHER.subscribe(self._update_vars, 'ride.variable.added', key=self)
PUBLISHER.subscribe(self._update_vars, 'ride.variable.updated', key=self)
PUBLISHER.subscribe(self._update_vars, 'ride.variable.removed', key=self)
PUBLISHER.subscribe(self._open_variable_dialog, RideOpenVariableDialog)
_AbstractListEditor.__init__(self, parent, tree, controller)
def _update_vars(self, event):
ListEditor.update_data(self)
def get_column_values(self, item):
return [item.name, item.value if isinstance(item.value, basestring)
else ' | '.join(item.value), ListToStringFormatter(item.comment).value]
def OnMoveUp(self, event):
_AbstractListEditor.OnMoveUp(self, event)
self._list.SetFocus()
def OnMoveDown(self, event):
_AbstractListEditor.OnMoveDown(self, event)
self._list.SetFocus()
def OnAddScalar(self, event):
dlg = ScalarVariableDialog(self._controller)
if dlg.ShowModal() == wx.ID_OK:
ctrl = self._controller.add_variable(*dlg.get_value())
ctrl.set_comment(dlg.get_comment())
self.update_data()
dlg.Destroy()
def OnAddList(self, event):
dlg = ListVariableDialog(self._controller, plugin=self.Parent.plugin)
if dlg.ShowModal() == wx.ID_OK:
ctrl = self._controller.add_variable(*dlg.get_value())
ctrl.set_comment(dlg.get_comment())
self.update_data()
dlg.Destroy()
def OnEdit(self, event):
var = self._controller[self._selection]
self._open_var_dialog(var)
def _open_variable_dialog(self, message):
self._open_var_dialog(message.controller)
def _open_var_dialog(self, var):
if var.name.startswith('${'):
dlg = ScalarVariableDialog(self._controller, item=var)
else:
dlg = ListVariableDialog(self._controller, item=var,
plugin=self.Parent.plugin)
if dlg.ShowModal() == wx.ID_OK:
name, value = dlg.get_value()
var.execute(UpdateVariable(name, value, dlg.get_comment()))
self.update_data()
dlg.Destroy()
def close(self):
PUBLISHER.unsubscribe_all(key=self)
class ImportSettingListEditor(_AbstractListEditor):
_titles = ['Import', 'Name / Path', 'Arguments', 'Comment']
_buttons = ['Library', 'Resource', 'Variables', 'Import Failed Help']
def __init__(self, parent, tree, controller):
self._import_failed_shown = False
_AbstractListEditor.__init__(self, parent, tree, controller)
@overrides(ListEditorBase)
def _create_buttons(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(Label(self, label='Add Import', size=wx.Size(120, 20), style=wx.ALIGN_CENTER))
for label in self._buttons:
sizer.Add(ButtonWithHandler(self, label, width=120), 0, wx.ALL, 1)
return sizer
def OnLeftClick(self, event):
if not self.is_selected:
return
if wx.GetMouseState().ControlDown() or wx.GetMouseState().CmdDown():
self.navigate_to_tree()
def navigate_to_tree(self):
setting = self._get_setting()
if self.has_link_target(setting):
self._tree.select_node_by_data(setting.get_imported_controller())
def has_link_target(self, controller):
return controller.is_resource and controller.get_imported_controller()
@overrides(ListEditorBase)
def has_error(self, controller):
return controller.has_error()
@overrides(ListEditorBase)
def OnRightClick(self, event):
PopupMenu(self, PopupMenuItems(self, self._create_item_menu()))
def _create_item_menu(self):
menu = self._menu
item = self._controller[self._selection]
if item.has_error() and item.type == 'Library':
menu = menu[:] + ['Import Library Spec XML']
return menu
def OnImportLibrarySpecXml(self, event):
RideExecuteSpecXmlImport().publish()
def OnEdit(self, event):
setting = self._get_setting()
self._show_import_editor_dialog(EditorDialog(setting),
lambda v, c: setting.execute(SetValues(v, c)),
setting,
on_empty=self._delete_selected)
def OnLibrary(self, event):
self._show_import_editor_dialog(LibraryDialog,
lambda v, c: self._controller.execute(AddLibrary(v, c)))
def OnResource(self, event):
self._show_import_editor_dialog(ResourceDialog,
lambda v, c: self._controller.execute(AddResource(v, c)))
def OnVariables(self, event):
self._show_import_editor_dialog(VariablesDialog,
lambda v, c: self._controller.execute(AddVariablesFileImport(v, c)))
def OnImportFailedHelp(self, event):
if self._import_failed_shown:
return
dialog = HtmlDialog('Import failure handling', '''
<br>Possible corrections and notes:<br>
<ul>
<li>Import failure is shown with red color.</li>
<li>See Tools / View RIDE Log for detailed information about the failure.</li>
<li>If the import contains a variable that RIDE has not initialized, consider adding the variable
to variable table with a default value.</li>
<li>For library import failure: Consider importing library spec XML (Tools / Import Library Spec XML or by
adding the XML file with the correct name to PYTHONPATH) to enable keyword completion
for example for Java libraries.
Library spec XML can be created using libdoc tool from Robot Framework.
For more information see <a href="https://github.com/robotframework/RIDE/wiki/Keyword-Completion#wiki-using-library-specs">wiki</a>.
</li>
</ul>''')
dialog.Bind(wx.EVT_CLOSE, self._import_failed_help_closed)
dialog.Show()
self._import_failed_shown = True
def _import_failed_help_closed(self, event):
self._import_failed_shown = False
event.Skip()
def _get_setting(self):
return self._controller[self._selection]
def _show_import_editor_dialog(self, dialog, creator_or_setter, item=None, on_empty=None):
dlg = dialog(self._controller, item=item)
if dlg.ShowModal() == wx.ID_OK:
value = dlg.get_value()
if not self._empty_name(value):
creator_or_setter(value, dlg.get_comment())
elif on_empty:
on_empty()
self.update_data()
dlg.Destroy()
def _empty_name(self, value):
return not value[0]
def get_column_values(self, item):
return [item.type, item.name, item.display_value, ListToStringFormatter(item.comment).value]
class MetadataListEditor(_AbstractListEditor):
_titles = ['Metadata', 'Value', 'Comment']
_buttons = ['Add Metadata']
_sortable = False
def OnEdit(self, event):
meta = self._controller[self._selection]
dlg = MetadataDialog(self._controller.datafile, item=meta)
if dlg.ShowModal() == wx.ID_OK:
meta.set_value(*dlg.get_value())
meta.set_comment(dlg.get_comment())
self.update_data()
dlg.Destroy()
def OnAddMetadata(self, event):
dlg = MetadataDialog(self._controller.datafile)
if dlg.ShowModal() == wx.ID_OK:
ctrl = self._controller.add_metadata(*dlg.get_value())
ctrl.set_comment(dlg.get_comment())
self.update_data()
dlg.Destroy()
def get_column_values(self, item):
return [item.name, utils.html_escape(item.value), ListToStringFormatter(item.comment).value]
| 35.250896
| 144
| 0.658465
|
bd27c53fcd7d56c3121c00b512ae6c0e4e9abb9c
| 49,881
|
py
|
Python
|
src/awkward/_v2/contents/unionarray.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | null | null | null |
src/awkward/_v2/contents/unionarray.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | null | null | null |
src/awkward/_v2/contents/unionarray.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
# pylint: disable=consider-using-enumerate
import copy
import ctypes
from collections.abc import Iterable
import awkward as ak
from awkward._v2.index import Index
from awkward._v2.index import Index8
from awkward._v2.index import Index64
from awkward._v2.contents.content import Content
from awkward._v2.forms.unionform import UnionForm
from awkward._v2.forms.form import _parameters_equal
np = ak.nplike.NumpyMetadata.instance()
numpy = ak.nplike.Numpy.instance()
class UnionArray(Content):
is_UnionType = True
def __init__(
self, tags, index, contents, identifier=None, parameters=None, nplike=None
):
if not (isinstance(tags, Index) and tags.dtype == np.dtype(np.int8)):
raise ak._v2._util.error(
TypeError(
"{} 'tags' must be an Index with dtype=int8, not {}".format(
type(self).__name__, repr(tags)
)
)
)
if not isinstance(index, Index) and index.dtype in (
np.dtype(np.int32),
np.dtype(np.uint32),
np.dtype(np.int64),
):
raise ak._v2._util.error(
TypeError(
"{} 'index' must be an Index with dtype in (int32, uint32, int64), "
"not {}".format(type(self).__name__, repr(index))
)
)
if not isinstance(contents, Iterable):
raise ak._v2._util.error(
TypeError(
"{} 'contents' must be iterable, not {}".format(
type(self).__name__, repr(contents)
)
)
)
if not isinstance(contents, list):
contents = list(contents)
for content in contents:
if not isinstance(content, Content):
raise ak._v2._util.error(
TypeError(
"{} all 'contents' must be Content subclasses, not {}".format(
type(self).__name__, repr(content)
)
)
)
if (
tags.nplike.known_shape
and index.nplike.known_shape
and tags.length > index.length
):
raise ak._v2._util.error(
ValueError(
"{} len(tags) ({}) must be <= len(index) ({})".format(
type(self).__name__, tags.length, index.length
)
)
)
if nplike is None:
for content in contents:
if nplike is None:
nplike = content.nplike
break
elif nplike is not content.nplike:
raise ak._v2._util.error(
TypeError(
"{} 'contents' must use the same array library (nplike): {} vs {}".format(
type(self).__name__,
type(nplike).__name__,
type(content.nplike).__name__,
)
)
)
if nplike is None:
nplike = tags.nplike
self._tags = tags
self._index = index
self._contents = contents
self._init(identifier, parameters, nplike)
@property
def tags(self):
return self._tags
@property
def index(self):
return self._index
def content(self, index):
return self._contents[index]
@property
def contents(self):
return self._contents
Form = UnionForm
def _form_with_key(self, getkey):
form_key = getkey(self)
return self.Form(
self._tags.form,
self._index.form,
[x._form_with_key(getkey) for x in self._contents],
has_identifier=self._identifier is not None,
parameters=self._parameters,
form_key=form_key,
)
def _to_buffers(self, form, getkey, container, nplike):
assert isinstance(form, self.Form)
key1 = getkey(self, form, "tags")
key2 = getkey(self, form, "index")
container[key1] = ak._v2._util.little_endian(self._tags.raw(nplike))
container[key2] = ak._v2._util.little_endian(self._index.raw(nplike))
for i, content in enumerate(self._contents):
content._to_buffers(form.content(i), getkey, container, nplike)
@property
def typetracer(self):
tt = ak._v2._typetracer.TypeTracer.instance()
return UnionArray(
ak._v2.index.Index(self._tags.raw(tt)),
ak._v2.index.Index(self._index.raw(tt)),
[x.typetracer for x in self._contents],
self._typetracer_identifier(),
self._parameters,
tt,
)
@property
def length(self):
return self._tags.length
def _forget_length(self):
return UnionArray(
self._tags.forget_length(),
self._index,
self._contents,
self._identifier,
self._parameters,
self._nplike,
)
def __repr__(self):
return self._repr("", "", "")
def _repr(self, indent, pre, post):
out = [indent, pre, "<UnionArray len="]
out.append(repr(str(self.length)))
out.append(">")
out.extend(self._repr_extra(indent + " "))
out.append("\n")
out.append(self._tags._repr(indent + " ", "<tags>", "</tags>\n"))
out.append(self._index._repr(indent + " ", "<index>", "</index>\n"))
for i, x in enumerate(self._contents):
out.append(f"{indent} <content index={repr(str(i))}>\n")
out.append(x._repr(indent + " ", "", "\n"))
out.append(f"{indent} </content>\n")
out.append(indent + "</UnionArray>")
out.append(post)
return "".join(out)
def merge_parameters(self, parameters):
return UnionArray(
self._tags,
self._index,
self._contents,
self._identifier,
ak._v2._util.merge_parameters(self._parameters, parameters),
self._nplike,
)
def _getitem_nothing(self):
return self._getitem_range(slice(0, 0))
def _getitem_at(self, where):
if not self._nplike.known_data:
return ak._v2._typetracer.OneOf(
[x._getitem_at(where) for x in self._contents]
)
if where < 0:
where += self.length
if self._nplike.known_shape and not 0 <= where < self.length:
raise ak._v2._util.indexerror(self, where)
tag, index = self._tags[where], self._index[where]
return self._contents[tag]._getitem_at(index)
def _getitem_range(self, where):
if not self._nplike.known_shape:
return self
start, stop, step = where.indices(self.length)
assert step == 1
return UnionArray(
self._tags[start:stop],
self._index[start:stop],
self._contents,
self._range_identifier(start, stop),
self._parameters,
self._nplike,
)
def _getitem_field(self, where, only_fields=()):
return UnionArray(
self._tags,
self._index,
[x._getitem_field(where, only_fields) for x in self._contents],
self._field_identifier(where),
None,
self._nplike,
).simplify_uniontype()
def _getitem_fields(self, where, only_fields=()):
return UnionArray(
self._tags,
self._index,
[x._getitem_fields(where, only_fields) for x in self._contents],
self._fields_identifier(where),
None,
self._nplike,
).simplify_uniontype()
def _carry(self, carry, allow_lazy):
assert isinstance(carry, ak._v2.index.Index)
try:
nexttags = self._tags[carry.data]
nextindex = self._index[: self._tags.length][carry.data]
except IndexError as err:
raise ak._v2._util.indexerror(self, carry.data, str(err))
return UnionArray(
nexttags,
nextindex,
self._contents,
self._carry_identifier(carry),
self._parameters,
self._nplike,
)
def project(self, index):
lentags = self._tags.length
assert not self._index.length < lentags
lenout = ak._v2.index.Index64.empty(1, self._nplike)
tmpcarry = ak._v2.index.Index64.empty(lentags, self._nplike)
assert (
lenout.nplike is self._nplike
and tmpcarry.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_project",
lenout.dtype.type,
tmpcarry.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
](
lenout.data,
tmpcarry.data,
self._tags.data,
self._index.data,
lentags,
index,
)
)
nextcarry = ak._v2.index.Index64(tmpcarry.data[: lenout[0]], self._nplike)
return self._contents[index]._carry(nextcarry, False)
@staticmethod
def regular_index(tags, IndexClass=Index64, nplike=None):
if nplike is None:
nplike = ak.nplike.of(tags)
lentags = tags.length
size = ak._v2.index.Index64.empty(1, nplike)
assert size.nplike is nplike and tags.nplike is nplike
Content._selfless_handle_error(
nplike[
"awkward_UnionArray_regular_index_getsize",
size.dtype.type,
tags.dtype.type,
](
size.data,
tags.data,
lentags,
)
)
current = IndexClass.empty(size[0], nplike)
outindex = IndexClass.empty(lentags, nplike)
assert (
outindex.nplike is nplike
and current.nplike is nplike
and tags.nplike is nplike
)
Content._selfless_handle_error(
nplike[
"awkward_UnionArray_regular_index",
outindex.dtype.type,
current.dtype.type,
tags.dtype.type,
](
outindex.data,
current.data,
size[0],
tags.data,
lentags,
)
)
return outindex
def _regular_index(self, tags):
return self.regular_index(
tags, IndexClass=type(self._index), nplike=self._nplike
)
@staticmethod
def nested_tags_index(
offsets, counts, TagsClass=Index8, IndexClass=Index64, nplike=None
):
if nplike is None:
nplike = ak.nplike.of(offsets, counts)
f_offsets = ak._v2.index.Index64(copy.deepcopy(offsets.data))
contentlen = f_offsets[f_offsets.length - 1]
tags = TagsClass.empty(contentlen, nplike)
index = IndexClass.empty(contentlen, nplike)
for tag, count in enumerate(counts):
assert (
tags.nplike is nplike
and index.nplike is nplike
and f_offsets.nplike is nplike
and count.nplike is nplike
)
Content._selfless_handle_error(
nplike[
"awkward_UnionArray_nestedfill_tags_index",
tags.dtype.type,
index.dtype.type,
f_offsets.dtype.type,
count.dtype.type,
](
tags.data,
index.data,
f_offsets.data,
tag,
count.data,
f_offsets.length - 1,
)
)
return (tags, index)
def _nested_tags_index(self, offsets, counts):
return self.nested_tags_index(
offsets,
counts,
TagsClass=type(self._tags),
IndexClass=type(self._index),
nplike=self._nplike,
)
def _getitem_next_jagged_generic(self, slicestarts, slicestops, slicecontent, tail):
simplified = self.simplify_uniontype()
if isinstance(simplified, ak._v2.contents.UnionArray):
raise ak._v2._util.indexerror(
self,
ak._v2.contents.ListArray(
slicestarts, slicestops, slicecontent, None, None, self._nplike
),
"cannot apply jagged slices to irreducible union arrays",
)
return simplified._getitem_next_jagged(
slicestarts, slicestops, slicecontent, tail
)
def _getitem_next_jagged(self, slicestarts, slicestops, slicecontent, tail):
return self._getitem_next_jagged_generic(
slicestarts, slicestops, slicecontent, tail
)
def _getitem_next(self, head, tail, advanced):
if head == ():
return self
elif isinstance(
head, (int, slice, ak._v2.index.Index64, ak._v2.contents.ListOffsetArray)
):
outcontents = []
for i in range(len(self._contents)):
projection = self.project(i)
outcontents.append(projection._getitem_next(head, tail, advanced))
outindex = self._regular_index(self._tags)
out = UnionArray(
self._tags,
outindex,
outcontents,
self._identifier,
self._parameters,
self._nplike,
)
return out.simplify_uniontype()
elif ak._util.isstr(head):
return self._getitem_next_field(head, tail, advanced)
elif isinstance(head, list):
return self._getitem_next_fields(head, tail, advanced)
elif head is np.newaxis:
return self._getitem_next_newaxis(tail, advanced)
elif head is Ellipsis:
return self._getitem_next_ellipsis(tail, advanced)
elif isinstance(head, ak._v2.contents.IndexedOptionArray):
return self._getitem_next_missing(head, tail, advanced)
else:
raise ak._v2._util.error(AssertionError(repr(head)))
def simplify_uniontype(self, merge=True, mergebool=False):
if self._nplike.known_shape and self._index.length < self._tags.length:
raise ak._v2._util.error(
ValueError("invalid UnionArray: len(index) < len(tags)")
)
length = self._tags.length
tags = ak._v2.index.Index8.empty(length, self._nplike)
index = ak._v2.index.Index64.empty(length, self._nplike)
contents = []
for i, self_cont in enumerate(self._contents):
if isinstance(self_cont, UnionArray):
innertags = self_cont._tags
innerindex = self_cont._index
innercontents = self_cont._contents
for j, inner_cont in enumerate(innercontents):
unmerged = True
for k in range(len(contents)):
if merge and contents[k].mergeable(inner_cont, mergebool):
assert (
tags.nplike is self._nplike
and index.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
and innertags.nplike is self._nplike
and innerindex.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_simplify",
tags.dtype.type,
index.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
innertags.dtype.type,
innerindex.dtype.type,
](
tags.data,
index.data,
self._tags.data,
self._index.data,
innertags.data,
innerindex.data,
k,
j,
i,
length,
contents[k].length,
)
)
contents[k] = contents[k].merge(inner_cont)
unmerged = False
break
if unmerged:
assert (
tags.nplike is self._nplike
and index.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
and innertags.nplike is self._nplike
and innerindex.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_simplify",
tags.dtype.type,
index.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
innertags.dtype.type,
innerindex.dtype.type,
](
tags.data,
index.data,
self._tags.data,
self._index.data,
innertags.data,
innerindex.data,
len(contents),
j,
i,
length,
0,
)
)
contents.append(inner_cont)
else:
unmerged = True
for k in range(len(contents)):
if contents[k] is self_cont:
assert (
tags.nplike is self._nplike
and index.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_simplify_one",
tags.dtype.type,
index.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
](
tags.data,
index.data,
self._tags.data,
self._index.data,
k,
i,
length,
0,
)
)
unmerged = False
break
elif merge and contents[k].mergeable(self_cont, mergebool):
assert (
tags.nplike is self._nplike
and index.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_simplify_one",
tags.dtype.type,
index.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
](
tags.data,
index.data,
self._tags.data,
self._index.data,
k,
i,
length,
contents[k].length,
)
)
contents[k] = contents[k].merge(self_cont)
unmerged = False
break
if unmerged:
assert (
tags.nplike is self._nplike
and index.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_simplify_one",
tags.dtype.type,
index.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
](
tags.data,
index.data,
self._tags.data,
self._index.data,
len(contents),
i,
length,
0,
)
)
contents.append(self_cont)
if len(contents) > 2**7:
raise ak._v2._util.error(
NotImplementedError(
"FIXME: handle UnionArray with more than 127 contents"
)
)
if len(contents) == 1:
return contents[0]._carry(index, True)
else:
return UnionArray(
tags, index, contents, self._identifier, self._parameters, self._nplike
)
def num(self, axis, depth=0):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
out = self.length
if ak._v2._util.isint(out):
return np.int64(out)
else:
return out
else:
contents = []
for content in self._contents:
contents.append(content.num(posaxis, depth))
out = UnionArray(
self._tags, self._index, contents, None, self._parameters, self._nplike
)
return out.simplify_uniontype(True, False)
def _offsets_and_flattened(self, axis, depth):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
raise ak._v2._util.error(np.AxisError("axis=0 not allowed for flatten"))
else:
has_offsets = False
offsetsraws = self._nplike.empty(len(self._contents), dtype=np.intp)
contents = []
for i in range(len(self._contents)):
offsets, flattened = self._contents[i]._offsets_and_flattened(
posaxis, depth
)
offsetsraws[i] = offsets.ptr
contents.append(flattened)
has_offsets = offsets.length != 0
if has_offsets:
total_length = ak._v2.index.Index64.empty(1, self._nplike)
assert (
total_length.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_flatten_length",
total_length.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
np.int64,
](
total_length.data,
self._tags.data,
self._index.data,
self._tags.length,
offsetsraws.ctypes.data_as(
ctypes.POINTER(ctypes.POINTER(ctypes.c_int64))
),
)
)
totags = ak._v2.index.Index8.empty(total_length[0], self._nplike)
toindex = ak._v2.index.Index64.empty(total_length[0], self._nplike)
tooffsets = ak._v2.index.Index64.empty(
self._tags.length + 1, self._nplike
)
assert (
totags.nplike is self._nplike
and toindex.nplike is self._nplike
and tooffsets.nplike is self._nplike
and self._tags.nplike is self._nplike
and self._index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_flatten_combine",
totags.dtype.type,
toindex.dtype.type,
tooffsets.dtype.type,
self._tags.dtype.type,
self._index.dtype.type,
np.int64,
](
totags.data,
toindex.data,
tooffsets.data,
self._tags.data,
self._index.data,
self._tags.length,
offsetsraws.ctypes.data_as(
ctypes.POINTER(ctypes.POINTER(ctypes.c_int64))
),
)
)
return (
tooffsets,
UnionArray(
totags, toindex, contents, None, self._parameters, self._nplike
),
)
else:
offsets = ak._v2.index.Index64.zeros(0, self._nplike, dtype=np.int64)
return (
offsets,
UnionArray(
self._tags,
self._index,
contents,
None,
self._parameters,
self._nplike,
),
)
def mergeable(self, other, mergebool):
if not _parameters_equal(self._parameters, other._parameters):
return False
return True
def merging_strategy(self, others):
if len(others) == 0:
raise ak._v2._util.error(
ValueError(
"to merge this array with 'others', at least one other must be provided"
)
)
head = [self]
tail = []
for i in range(len(others)):
head.append(others[i])
if any(
isinstance(x.nplike, ak._v2._typetracer.TypeTracer) for x in head + tail
):
head = [
x
if isinstance(x.nplike, ak._v2._typetracer.TypeTracer)
else x.typetracer
for x in head
]
tail = [
x
if isinstance(x.nplike, ak._v2._typetracer.TypeTracer)
else x.typetracer
for x in tail
]
return (head, tail)
def _reverse_merge(self, other):
theirlength = other.length
mylength = self.length
tags = ak._v2.index.Index8.empty(theirlength + mylength, self._nplike)
index = ak._v2.index.Index64.empty(theirlength + mylength, self._nplike)
contents = [other]
contents.extend(self.contents)
assert tags.nplike is self._nplike
self._handle_error(
self._nplike["awkward_UnionArray_filltags_const", tags.dtype.type](
tags.data,
0,
theirlength,
0,
)
)
assert index.nplike is self._nplike
self._handle_error(
self._nplike["awkward_UnionArray_fillindex_count", index.dtype.type](
index.data,
0,
theirlength,
)
)
assert tags.nplike is self._nplike and self.tags.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_UnionArray_filltags",
tags.dtype.type,
self.tags.dtype.type,
](
tags.data,
theirlength,
self.tags.data,
mylength,
1,
)
)
assert index.nplike is self._nplike and self.index.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_UnionArray_fillindex",
index.dtype.type,
self.index.dtype.type,
](
index.data,
theirlength,
self.index.data,
mylength,
)
)
if len(contents) > 2**7:
raise ak._v2._util.error(
AssertionError("FIXME: handle UnionArray with more than 127 contents")
)
parameters = ak._v2._util.merge_parameters(self._parameters, other._parameters)
return ak._v2.contents.unionarray.UnionArray(
tags, index, contents, None, parameters, self._nplike
)
def mergemany(self, others):
if len(others) == 0:
return self
head, tail = self._merging_strategy(others)
total_length = 0
for array in head:
total_length += array.length
nexttags = ak._v2.index.Index8.empty(total_length, self._nplike)
nextindex = ak._v2.index.Index64.empty(total_length, self._nplike)
nextcontents = []
length_so_far = 0
parameters = self._parameters
for array in head:
parameters = ak._v2._util.merge_parameters(
self._parameters, array._parameters, True
)
if isinstance(array, ak._v2.contents.unionarray.UnionArray):
union_tags = ak._v2.index.Index(array.tags)
union_index = ak._v2.index.Index(array.index)
union_contents = array.contents
assert (
nexttags.nplike is self._nplike
and union_tags.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_filltags",
nexttags.dtype.type,
union_tags.dtype.type,
](
nexttags.data,
length_so_far,
union_tags.data,
array.length,
len(nextcontents),
)
)
assert (
nextindex.nplike is self._nplike
and union_index.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_UnionArray_fillindex",
nextindex.dtype.type,
union_index.dtype.type,
](
nextindex.data,
length_so_far,
union_index.data,
array.length,
)
)
length_so_far += array.length
nextcontents.extend(union_contents)
elif isinstance(array, ak._v2.contents.emptyarray.EmptyArray):
pass
else:
assert nexttags.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_UnionArray_filltags_const",
nexttags.dtype.type,
](
nexttags.data,
length_so_far,
array.length,
len(nextcontents),
)
)
assert nextindex.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_UnionArray_fillindex_count", nextindex.dtype.type
](nextindex.data, length_so_far, array.length)
)
length_so_far += array.length
nextcontents.append(array)
if len(nextcontents) > 127:
raise ak._v2._util.error(
ValueError("FIXME: handle UnionArray with more than 127 contents")
)
next = ak._v2.contents.unionarray.UnionArray(
nexttags, nextindex, nextcontents, None, parameters, self._nplike
)
# Given UnionArray's merging_strategy, tail is always empty, but just to be formal...
if len(tail) == 0:
return next
reversed = tail[0]._reverse_merge(next)
if len(tail) == 1:
return reversed
else:
return reversed.mergemany(tail[1:])
def fillna(self, value):
contents = []
for content in self._contents:
contents.append(content.fillna(value))
out = UnionArray(
self._tags,
self._index,
contents,
self._identifier,
self._parameters,
self._nplike,
)
return out.simplify_uniontype(True, False)
def _localindex(self, axis, depth):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
return self._localindex_axis0()
else:
contents = []
for content in self._contents:
contents.append(content._localindex(posaxis, depth))
return UnionArray(
self._tags,
self._index,
contents,
self._identifier,
self._parameters,
self._nplike,
)
def _combinations(self, n, replacement, recordlookup, parameters, axis, depth):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
return self._combinations_axis0(n, replacement, recordlookup, parameters)
else:
contents = []
for content in self._contents:
contents.append(
content._combinations(
n, replacement, recordlookup, parameters, posaxis, depth
)
)
return ak._v2.unionarray.UnionArray(
self._tags,
self._index,
contents,
self._identifier,
self._parameters,
self._nplike,
)
def numbers_to_type(self, name):
contents = []
for x in self._contents:
contents.append(x.numbers_to_type(name))
return ak._v2.contents.unionarray.UnionArray(
self._tags,
self._index,
contents,
self._identifier,
self._parameters,
self._nplike,
)
def _is_unique(self, negaxis, starts, parents, outlength):
simplified = self.simplify_uniontype(True, True)
if isinstance(simplified, ak._v2.contents.UnionArray):
raise ak._v2._util.error(
ValueError("cannot check if an irreducible UnionArray is unique")
)
return simplified._is_unique(negaxis, starts, parents, outlength)
def _unique(self, negaxis, starts, parents, outlength):
simplified = self.simplify_uniontype(True, True)
if isinstance(simplified, ak._v2.contents.UnionArray):
raise ak._v2._util.error(
ValueError("cannot make a unique irreducible UnionArray")
)
return simplified._unique(negaxis, starts, parents, outlength)
def _argsort_next(
self,
negaxis,
starts,
shifts,
parents,
outlength,
ascending,
stable,
kind,
order,
):
simplified = self.simplify_uniontype(mergebool=True)
if simplified.length == 0:
return ak._v2.contents.NumpyArray(
self._nplike.empty(0, np.int64), None, None, self._nplike
)
if isinstance(simplified, ak._v2.contents.UnionArray):
raise ak._v2._util.error(
ValueError("cannot argsort an irreducible UnionArray")
)
return simplified._argsort_next(
negaxis, starts, shifts, parents, outlength, ascending, stable, kind, order
)
def _sort_next(
self, negaxis, starts, parents, outlength, ascending, stable, kind, order
):
if self.length == 0:
return self
simplified = self.simplify_uniontype(mergebool=True)
if simplified.length == 0:
return simplified
if isinstance(simplified, ak._v2.contents.UnionArray):
raise ak._v2._util.error(
ValueError("cannot sort an irreducible UnionArray")
)
return simplified._sort_next(
negaxis, starts, parents, outlength, ascending, stable, kind, order
)
def _reduce_next(
self,
reducer,
negaxis,
starts,
shifts,
parents,
outlength,
mask,
keepdims,
):
simplified = self.simplify_uniontype(mergebool=True)
if isinstance(simplified, UnionArray):
raise ak._v2._util.error(
ValueError(
f"cannot call ak.{reducer.name} on an irreducible UnionArray"
)
)
return simplified._reduce_next(
reducer,
negaxis,
starts,
shifts,
parents,
outlength,
mask,
keepdims,
)
def _validityerror(self, path):
for i in range(len(self.contents)):
if isinstance(self.contents[i], ak._v2.contents.unionarray.UnionArray):
return "{} contains {}, the operation that made it might have forgotten to call 'simplify_uniontype'".format(
type(self), type(self.contents[i])
)
if self._nplike.known_shape and self.index.length < self.tags.length:
return f'at {path} ("{type(self)}"): len(index) < len(tags)'
lencontents = self._nplike.empty(len(self.contents), dtype=np.int64)
if self._nplike.known_shape:
for i in range(len(self.contents)):
lencontents[i] = self.contents[i].length
error = self._nplike[
"awkward_UnionArray_validity",
self.tags.dtype.type,
self.index.dtype.type,
np.int64,
](
self.tags.data,
self.index.data,
self.tags.length,
len(self.contents),
lencontents,
)
if error.str is not None:
if error.filename is None:
filename = ""
else:
filename = " (in compiled code: " + error.filename.decode(
errors="surrogateescape"
).lstrip("\n").lstrip("(")
message = error.str.decode(errors="surrogateescape")
return 'at {} ("{}"): {} at i={}{}'.format(
path, type(self), message, error.id, filename
)
for i in range(len(self.contents)):
sub = self.contents[i].validityerror(path + f".content({i})")
if sub != "":
return sub
return ""
def _nbytes_part(self):
result = self.tags._nbytes_part() + self.index._nbytes_part()
for content in self.contents:
result = result + content._nbytes_part()
if self.identifier is not None:
result = result + self.identifier._nbytes_part()
return result
def _rpad(self, target, axis, depth, clip):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
return self.rpad_axis0(target, clip)
else:
contents = []
for content in self._contents:
contents.append(content._rpad(target, posaxis, depth, clip))
out = ak._v2.contents.unionarray.UnionArray(
self.tags,
self.index,
contents,
self._identifier,
self._parameters,
self._nplike,
)
return out.simplify_uniontype(True, False)
def _to_arrow(self, pyarrow, mask_node, validbytes, length, options):
nptags = self._tags.raw(numpy)
npindex = self._index.raw(numpy)
copied_index = False
values = []
for tag, content in enumerate(self._contents):
selected_tags = nptags == tag
this_index = npindex[selected_tags]
# Arrow unions can't have masks; propagate validbytes down to the content.
if validbytes is not None:
# If this_index is a filtered permutation, we can just filter-permute
# the mask to have the same order the content.
if numpy.unique(this_index).shape[0] == this_index.shape[0]:
this_validbytes = numpy.zeros(this_index.shape[0], dtype=np.int8)
this_validbytes[this_index] = validbytes[selected_tags]
# If this_index is not a filtered permutation, then we can't modify
# the mask to fit the content. The same element in the content array
# will appear multiple times in the union array, and it *might* be
# presented as valid in some union array elements and masked in others.
# The validbytes that recurses down to the next level can't have an
# element that is both 0 (masked) and 1 (valid).
else:
this_validbytes = validbytes[selected_tags]
content = content[this_index]
if not copied_index:
copied_index = True
npindex = numpy.array(npindex, copy=True)
npindex[selected_tags] = numpy.arange(
this_index.shape[0], dtype=npindex.dtype
)
else:
this_validbytes = None
values.append(
content._to_arrow(
pyarrow, mask_node, this_validbytes, this_index.shape[0], options
)
)
types = pyarrow.union(
[
pyarrow.field(str(i), values[i].type).with_nullable(
mask_node is not None or self._contents[i].is_OptionType
)
for i in range(len(values))
],
"dense",
list(range(len(values))),
)
if not issubclass(npindex.dtype.type, np.int32):
npindex = npindex.astype(np.int32)
return pyarrow.Array.from_buffers(
ak._v2._connect.pyarrow.to_awkwardarrow_type(
types,
options["extensionarray"],
options["record_is_scalar"],
None,
self,
),
nptags.shape[0],
[
None,
ak._v2._connect.pyarrow.to_length(nptags, length),
ak._v2._connect.pyarrow.to_length(npindex, length),
],
children=values,
)
def _to_numpy(self, allow_missing):
contents = [
ak._v2.operations.convert.to_numpy(
self.project(i), allow_missing=allow_missing
)
for i in range(len(self.contents))
]
if any(isinstance(x, self._nplike.ma.MaskedArray) for x in contents):
try:
out = self._nplike.ma.concatenate(contents)
except Exception:
raise ak._v2._util.error(
ValueError(f"cannot convert {self} into numpy.ma.MaskedArray")
)
else:
try:
out = numpy.concatenate(contents)
except Exception:
raise ak._v2._util.error(
ValueError(f"cannot convert {self} into np.ndarray")
)
tags = numpy.asarray(self.tags)
for tag, content in enumerate(contents):
mask = tags == tag
out[mask] = content
return out
def _completely_flatten(self, nplike, options):
out = []
for i in range(len(self._contents)):
index = self._index[self._tags.data == i]
out.extend(
self._contents[i]
._carry(index, False)
._completely_flatten(nplike, options)
)
return out
def _recursively_apply(
self, action, depth, depth_context, lateral_context, options
):
if options["return_array"]:
def continuation():
return UnionArray(
self._tags,
self._index,
[
content._recursively_apply(
action,
depth,
copy.copy(depth_context),
lateral_context,
options,
)
for content in self._contents
],
self._identifier,
self._parameters if options["keep_parameters"] else None,
self._nplike,
)
else:
def continuation():
for content in self._contents:
content._recursively_apply(
action,
depth,
copy.copy(depth_context),
lateral_context,
options,
)
result = action(
self,
depth=depth,
depth_context=depth_context,
lateral_context=lateral_context,
continuation=continuation,
options=options,
)
if isinstance(result, Content):
return result
elif result is None:
return continuation()
else:
raise ak._v2._util.error(AssertionError(result))
def packed(self):
tags = self._tags.raw(self._nplike)
original_index = index = self._index.raw(self._nplike)[: tags.shape[0]]
contents = list(self._contents)
for tag in range(len(self._contents)):
is_tag = tags == tag
num_tag = self._nplike.count_nonzero(is_tag)
if len(contents[tag]) > num_tag:
if original_index is index:
index = index.copy()
index[is_tag] = self._nplike.arange(num_tag, dtype=index.dtype)
contents[tag] = self.project(tag)
contents[tag] = contents[tag].packed()
return UnionArray(
ak._v2.index.Index8(tags),
ak._v2.index.Index(index),
contents,
self._identifier,
self._parameters,
self._nplike,
)
def _to_list(self, behavior, json_conversions):
out = self._to_list_custom(behavior, json_conversions)
if out is not None:
return out
tags = self._tags.raw(numpy)
index = self._index.raw(numpy)
contents = [x._to_list(behavior, json_conversions) for x in self._contents]
out = [None] * tags.shape[0]
for i, tag in enumerate(tags):
out[i] = contents[tag][index[i]]
return out
def _to_nplike(self, nplike):
index = self._index._to_nplike(nplike)
contents = [content._to_nplike(nplike) for content in self._contents]
return UnionArray(
self._tags,
index,
contents,
identifier=self.identifier,
parameters=self.parameters,
nplike=nplike,
)
| 35.078059
| 125
| 0.481626
|
707708cf8f374a438f02680c23297eba45b65509
| 8,317
|
py
|
Python
|
tests/base.py
|
sklump/marshmallow
|
8af34a94322bf4fff71eaff0f29d5e1655fc9c6e
|
[
"MIT"
] | null | null | null |
tests/base.py
|
sklump/marshmallow
|
8af34a94322bf4fff71eaff0f29d5e1655fc9c6e
|
[
"MIT"
] | 19
|
2021-06-11T06:21:43.000Z
|
2022-01-12T23:15:30.000Z
|
tests/base.py
|
sklump/marshmallow
|
8af34a94322bf4fff71eaff0f29d5e1655fc9c6e
|
[
"MIT"
] | null | null | null |
"""Test utilities and fixtures."""
import datetime as dt
import uuid
import simplejson
import pytz
from marshmallow import Schema, fields, post_load, validate, missing
from marshmallow.exceptions import ValidationError
central = pytz.timezone("US/Central")
ALL_FIELDS = [
fields.String,
fields.Integer,
fields.Boolean,
fields.Float,
fields.Number,
fields.DateTime,
fields.Time,
fields.Date,
fields.TimeDelta,
fields.Dict,
fields.Url,
fields.Email,
fields.UUID,
fields.Decimal,
fields.IP,
fields.IPv4,
fields.IPv6,
fields.IPInterface,
fields.IPv4Interface,
fields.IPv6Interface,
]
##### Custom asserts #####
def assert_date_equal(d1, d2):
assert d1.year == d2.year
assert d1.month == d2.month
assert d1.day == d2.day
def assert_time_equal(t1, t2):
assert t1.hour == t2.hour
assert t1.minute == t2.minute
assert t1.second == t2.second
assert t1.microsecond == t2.microsecond
##### Models #####
class User:
SPECIES = "Homo sapiens"
def __init__(
self,
name,
age=0,
id_=None,
homepage=None,
email=None,
registered=True,
time_registered=None,
birthdate=None,
birthtime=None,
balance=100,
sex="male",
employer=None,
various_data=None,
):
self.name = name
self.age = age
# A naive datetime
self.created = dt.datetime(2013, 11, 10, 14, 20, 58)
# A TZ-aware datetime
self.updated = central.localize(
dt.datetime(2013, 11, 10, 14, 20, 58), is_dst=False
)
self.id = id_
self.homepage = homepage
self.email = email
self.balance = balance
self.registered = True
self.hair_colors = ["black", "brown", "blond", "redhead"]
self.sex_choices = ("male", "female")
self.finger_count = 10
self.uid = uuid.uuid1()
self.time_registered = time_registered or dt.time(1, 23, 45, 6789)
self.birthdate = birthdate or dt.date(2013, 1, 23)
self.birthtime = birthtime or dt.time(0, 1, 2, 3333)
self.activation_date = dt.date(2013, 12, 11)
self.sex = sex
self.employer = employer
self.relatives = []
self.various_data = various_data or {
"pets": ["cat", "dog"],
"address": "1600 Pennsylvania Ave\n" "Washington, DC 20006",
}
@property
def since_created(self):
return dt.datetime(2013, 11, 24) - self.created
def __repr__(self):
return "<User {}>".format(self.name)
class Blog:
def __init__(self, title, user, collaborators=None, categories=None, id_=None):
self.title = title
self.user = user
self.collaborators = collaborators or [] # List/tuple of users
self.categories = categories
self.id = id_
def __contains__(self, item):
return item.name in [each.name for each in self.collaborators]
class DummyModel:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
def __str__(self):
return "bar {}".format(self.foo)
###### Schemas #####
class Uppercased(fields.Field):
"""Custom field formatting example."""
def _serialize(self, value, attr, obj):
if value:
return value.upper()
def get_lowername(obj):
if obj is None:
return missing
if isinstance(obj, dict):
return obj.get("name").lower()
else:
return obj.name.lower()
class UserSchema(Schema):
name = fields.String()
age = fields.Float() # type: fields.Field
created = fields.DateTime()
created_formatted = fields.DateTime(
format="%Y-%m-%d", attribute="created", dump_only=True
)
created_iso = fields.DateTime(format="iso", attribute="created", dump_only=True)
updated = fields.DateTime()
species = fields.String(attribute="SPECIES")
id = fields.String(default="no-id")
uppername = Uppercased(attribute="name", dump_only=True)
homepage = fields.Url()
email = fields.Email()
balance = fields.Decimal()
is_old = fields.Method("get_is_old") # type: fields.Field
lowername = fields.Function(get_lowername)
registered = fields.Boolean()
hair_colors = fields.List(fields.Raw)
sex_choices = fields.List(fields.Raw)
finger_count = fields.Integer()
uid = fields.UUID()
time_registered = fields.Time()
birthdate = fields.Date()
birthtime = fields.Time()
activation_date = fields.Date()
since_created = fields.TimeDelta()
sex = fields.Str(validate=validate.OneOf(["male", "female"]))
various_data = fields.Dict()
class Meta:
render_module = simplejson
def get_is_old(self, obj):
if obj is None:
return missing
if isinstance(obj, dict):
age = obj.get("age")
else:
age = obj.age
try:
return age > 80
except TypeError as te:
raise ValidationError(str(te)) from te
@post_load
def make_user(self, data, **kwargs):
return User(**data)
class UserMetaSchema(Schema):
"""The equivalent of the UserSchema, using the ``fields`` option."""
uppername = Uppercased(attribute="name", dump_only=True)
balance = fields.Decimal()
is_old = fields.Method("get_is_old")
lowername = fields.Function(get_lowername)
species = fields.String(attribute="SPECIES")
homepage = fields.Url()
email = fields.Email()
various_data = fields.Dict()
def get_is_old(self, obj):
if obj is None:
return missing
if isinstance(obj, dict):
age = obj.get("age")
else:
age = obj.age
try:
return age > 80
except TypeError as te:
raise ValidationError(str(te)) from te
class Meta:
fields = (
"name",
"age",
"created",
"updated",
"id",
"homepage",
"uppername",
"email",
"balance",
"is_old",
"lowername",
"species",
"registered",
"hair_colors",
"sex_choices",
"finger_count",
"uid",
"time_registered",
"birthdate",
"birthtime",
"since_created",
"various_data",
)
class UserExcludeSchema(UserSchema):
class Meta:
exclude = ("created", "updated")
class UserAdditionalSchema(Schema):
lowername = fields.Function(lambda obj: obj.name.lower())
class Meta:
additional = ("name", "age", "created", "email")
class UserIntSchema(UserSchema):
age = fields.Integer()
class UserFloatStringSchema(UserSchema):
age = fields.Float(as_string=True)
class ExtendedUserSchema(UserSchema):
is_old = fields.Boolean()
class UserRelativeUrlSchema(UserSchema):
homepage = fields.Url(relative=True)
class BlogSchema(Schema):
title = fields.String()
user = fields.Nested(UserSchema)
collaborators = fields.List(fields.Nested(UserSchema()))
categories = fields.List(fields.String)
id = fields.String()
class BlogUserMetaSchema(Schema):
user = fields.Nested(UserMetaSchema())
collaborators = fields.List(fields.Nested(UserMetaSchema()))
class BlogSchemaMeta(Schema):
"""Same as BlogSerializer but using ``fields`` options."""
user = fields.Nested(UserSchema)
collaborators = fields.List(fields.Nested(UserSchema()))
class Meta:
fields = ("title", "user", "collaborators", "categories", "id")
class BlogOnlySchema(Schema):
title = fields.String()
user = fields.Nested(UserSchema)
collaborators = fields.List(fields.Nested(UserSchema(only=("id",))))
class BlogSchemaExclude(BlogSchema):
user = fields.Nested(UserSchema, exclude=("uppername", "species"))
class BlogSchemaOnlyExclude(BlogSchema):
user = fields.Nested(UserSchema, only=("name",), exclude=("name", "species"))
class mockjson: # noqa
@staticmethod
def dumps(val):
return b"{'foo': 42}"
@staticmethod
def loads(val):
return {"foo": 42}
| 25.20303
| 84
| 0.604906
|
bfe0b9e1fcf14757fb6b70de330dd0de46b2cced
| 2,839
|
py
|
Python
|
antilupa/forms.py
|
voyager-kin/antilupa
|
dd5b16541b5178b085df8899a848859adc978da9
|
[
"MIT"
] | null | null | null |
antilupa/forms.py
|
voyager-kin/antilupa
|
dd5b16541b5178b085df8899a848859adc978da9
|
[
"MIT"
] | null | null | null |
antilupa/forms.py
|
voyager-kin/antilupa
|
dd5b16541b5178b085df8899a848859adc978da9
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, HiddenField
from wtforms.validators import Length, EqualTo, Email, DataRequired, ValidationError
from antilupa.models import User, Person, TrackRecordView
from wtforms import StringField, BooleanField, FileField, SubmitField
from flask_wtf.file import FileField, FileRequired
from werkzeug.utils import secure_filename
class RegisterForm(FlaskForm):
def validate_username(self, username_to_check):
user = User.query.filter_by(username=username_to_check.data).first()
if user:
raise ValidationError('Username already exists! Please try a different username')
def validate_email_address(self, email_address_to_check):
email_address = User.query.filter_by(email_address=email_address_to_check.data).first()
if email_address:
raise ValidationError('Email Address already exists! Please try a different email address')
username = StringField(label='User Name:', validators=[Length(min=2, max=30), DataRequired()])
email_address = StringField(label='Email Address:', validators=[Email(), DataRequired()])
password1 = PasswordField(label='Password:', validators=[Length(min=6), DataRequired()])
password2 = PasswordField(label='Confirm Password:', validators=[EqualTo('password1'), DataRequired()])
submit = SubmitField(label='Create Account')
class PersonSearchForm(FlaskForm):
name = StringField(label='Person to track:', validators=[Length(min=2, max=100), DataRequired()])
submit = SubmitField(label='Track')
class PersonForm(FlaskForm):
name = StringField(label='Person to track:', validators=[Length(min=2, max=100), DataRequired()])
submit = SubmitField(label='Track')
class RecordForm(FlaskForm):
name = StringField(label='Topic or person :', validators=[Length(min=2, max=100), DataRequired()])
photo = FileField(label='Photo :') # IMAGE
person_id = StringField(label='Topic or person :', validators=[Length(min=0, max=100)])
user_id = StringField(label='User id :', validators=[Length(min=0, max=100)])
title = StringField(label='Record :', validators=[Length(min=2, max=200), DataRequired()])
date = StringField(label='Date :', validators=[Length(min=2, max=100), DataRequired()])
url = StringField(label='URL :', validators=[Length(min=2, max=100), DataRequired()])
tag = StringField(label='Tag - maximum of 5 tags separated by comma :', validators=[Length(min=0, max=100)])
submit = SubmitField(label='Add Record')
class LoginForm(FlaskForm):
username = StringField(label='User Name:', validators=[DataRequired()])
password = PasswordField(label='Password:', validators=[DataRequired()])
submit = SubmitField(label='Sign in')
| 52.574074
| 113
| 0.717506
|
abd0f4023fe510e1be546fb87d4aefc0c2981d78
| 11,296
|
py
|
Python
|
Tests/GUI/DMachineSetup/test_SBar.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | 4
|
2017-11-27T10:14:34.000Z
|
2018-09-20T11:30:32.000Z
|
Tests/GUI/DMachineSetup/test_SBar.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DMachineSetup/test_SBar.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
from random import uniform
from PySide2 import QtWidgets
from PySide2.QtTest import QTest
from pyleecan.Classes.CondType21 import CondType21
from pyleecan.Classes.CondType22 import CondType22
from pyleecan.Classes.LamSquirrelCage import LamSquirrelCage
from pyleecan.Classes.MachineSCIM import MachineSCIM
from pyleecan.Classes.Material import Material
from pyleecan.Classes.SlotW22 import SlotW22
from Tests.GUI import gui_option # Set unit as [m]
from pyleecan.GUI.Dialog.DMatLib.DMatLib import LIB_KEY, MACH_KEY
from pyleecan.GUI.Dialog.DMachineSetup.SBar.PCondType21.PCondType21 import PCondType21
from pyleecan.GUI.Dialog.DMachineSetup.SBar.PCondType22.PCondType22 import PCondType22
from pyleecan.GUI.Dialog.DMachineSetup.SBar.SBar import SBar
import pytest
class TestSBar(object):
"""Test that the widget SBar behave like it should"""
@classmethod
def setup_class(cls):
"""Start the app for the test"""
print("\nStart Test TestSBar")
if not QtWidgets.QApplication.instance():
cls.app = QtWidgets.QApplication(sys.argv)
else:
cls.app = QtWidgets.QApplication.instance()
@classmethod
def teardown_class(cls):
"""Exit the app after the test"""
cls.app.quit()
def setup_method(self):
"""Run at the begining of every test to setup the gui"""
test_obj = MachineSCIM()
test_obj.rotor = LamSquirrelCage(Hscr=0.11, Lscr=0.12)
test_obj.rotor.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
test_obj.rotor.winding.Lewout = 0.13
test_obj.rotor.ring_mat.name = "test3"
test_obj.rotor.winding.conductor = CondType21(Hbar=0.014, Wbar=0.015)
test_obj.rotor.winding.conductor.cond_mat.name = "test1"
material_dict = {LIB_KEY: list(), MACH_KEY: list()}
material_dict[LIB_KEY] = [
Material(name="test1"),
Material(name="test2"),
Material(name="test3"),
]
material_dict[LIB_KEY][0].elec.rho = 0.31
material_dict[LIB_KEY][1].elec.rho = 0.32
material_dict[LIB_KEY][2].elec.rho = 0.33
self.widget = SBar(
machine=test_obj, material_dict=material_dict, is_stator=False
)
self.test_obj = test_obj
self.material_dict = material_dict
def test_init(self):
"""Check that the Widget spinbox initialise to the lamination value"""
assert self.widget.lf_Hscr.value() == 0.11
assert self.widget.lf_Lscr.value() == 0.12
assert self.widget.lf_Lewout.value() == 0.13
assert self.widget.w_mat_scr.c_mat_type.currentIndex() == 2
assert self.widget.w_mat_scr.c_mat_type.currentText() == "test3"
assert type(self.widget.w_bar) is PCondType21
assert self.widget.c_bar_type.count() == 2
assert self.widget.c_bar_type.currentIndex() == 0
assert self.widget.c_bar_type.currentText() == "Rectangular bar"
assert self.widget.w_bar.lf_Hbar.value() == 0.014
assert self.widget.w_bar.lf_Wbar.value() == 0.015
assert self.widget.w_bar.w_mat.c_mat_type.currentIndex() == 0
assert self.widget.w_bar.w_mat.c_mat_type.currentText() == "test1"
# Check output txt
assert self.widget.w_bar.w_out.out_Sbar.text() == "Sbar: 0.00021 [m²]"
assert self.widget.w_bar.w_out.out_Sslot.text() == "Sslot: 0.002088 [m²]"
assert self.widget.w_bar.w_out.out_ratio.text() == "Sbar / Sslot: 10.06 [%]"
self.test_obj.rotor = LamSquirrelCage(Hscr=0.21, Lscr=0.22)
self.test_obj.rotor.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
self.test_obj.rotor.winding.Lewout = 0.23
self.test_obj.rotor.ring_mat.name = "test2"
self.test_obj.rotor.winding.conductor = None
self.widget = SBar(
machine=self.test_obj,
material_dict=self.material_dict,
is_stator=False,
)
assert self.widget.c_bar_type.currentIndex() == 0
def test_init_Cond22(self):
self.test_obj.rotor = LamSquirrelCage(Hscr=0.21, Lscr=0.22)
self.test_obj.rotor.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
self.test_obj.rotor.winding.Lewout = 0.23
self.test_obj.rotor.ring_mat.name = "test2"
self.test_obj.rotor.winding.conductor = CondType22()
self.test_obj.rotor.winding.conductor.cond_mat.name = "test3"
self.widget = SBar(
machine=self.test_obj,
material_dict=self.material_dict,
is_stator=False,
)
assert self.widget.lf_Hscr.value() == 0.21
assert self.widget.lf_Lscr.value() == 0.22
assert self.widget.lf_Lewout.value() == 0.23
assert self.widget.w_mat_scr.c_mat_type.currentIndex() == 1
assert self.widget.w_mat_scr.c_mat_type.currentText() == "test2"
assert type(self.widget.w_bar) is PCondType22
assert self.widget.c_bar_type.currentIndex() == 1
assert self.widget.c_bar_type.currentText() == "Die cast bar"
assert self.widget.w_bar.w_mat.c_mat_type.currentIndex() == 2
assert self.widget.w_bar.w_mat.c_mat_type.currentText() == "test3"
# Check output txt
assert self.widget.w_bar.w_out.out_Sbar.text() == "Sbar: 0.002088 [m²]"
assert self.widget.w_bar.w_out.out_Sslot.text() == "Sslot: 0.002088 [m²]"
assert self.widget.w_bar.w_out.out_ratio.text() == "Sbar / Sslot: 100 [%]"
def test_set_Hscr(self):
"""Check that the Widget allow to update Hscr"""
# Clear the field before writing the new value
self.widget.lf_Hscr.clear()
value = round(uniform(0, 1), 4)
QTest.keyClicks(self.widget.lf_Hscr, str(value))
self.widget.lf_Hscr.editingFinished.emit() # To trigger the slot
assert self.test_obj.rotor.Hscr == value
def test_set_Lscr(self):
"""Check that the Widget allow to update Lscr"""
# Clear the field before writing the new value
self.widget.lf_Lscr.clear()
value = round(uniform(0, 1), 4)
QTest.keyClicks(self.widget.lf_Lscr, str(value))
self.widget.lf_Lscr.editingFinished.emit() # To trigger the slot
assert self.test_obj.rotor.Lscr == value
def test_set_Hbar(self):
"""Check that the Widget allow to update Hbar"""
# Clear the field before writing the new value
self.widget.w_bar.lf_Hbar.clear()
value = round(uniform(0, 1), 4)
QTest.keyClicks(self.widget.w_bar.lf_Hbar, str(value))
self.widget.w_bar.lf_Hbar.editingFinished.emit() # To trigger the slot
assert self.test_obj.rotor.winding.conductor.Hbar == value
def test_set_Wbar(self):
"""Check that the Widget allow to update Wbar"""
# Clear the field before writing the new value
self.widget.w_bar.lf_Wbar.clear()
value = round(uniform(0, 1), 4)
QTest.keyClicks(self.widget.w_bar.lf_Wbar, str(value))
self.widget.w_bar.lf_Wbar.editingFinished.emit() # To trigger the slot
assert self.test_obj.rotor.winding.conductor.Wbar == value
def test_set_Lewout(self):
"""Check that the Widget allow to update Lewout"""
# Clear the field before writing the new value
self.widget.lf_Lewout.clear()
value = round(uniform(0, 1), 4)
QTest.keyClicks(self.widget.lf_Lewout, str(value))
self.widget.lf_Lewout.editingFinished.emit() # To trigger the slot
assert self.test_obj.rotor.winding.Lewout == value
def test_set_material(self):
"""Check that the combobox update the material"""
self.widget.w_mat_scr.c_mat_type.setCurrentIndex(0)
assert self.test_obj.rotor.ring_mat.name == "test1"
assert self.test_obj.rotor.ring_mat.elec.rho == 0.31
self.widget.w_mat_scr.c_mat_type.setCurrentIndex(1)
assert self.test_obj.rotor.ring_mat.name == "test2"
assert self.test_obj.rotor.ring_mat.elec.rho == 0.32
self.widget.w_mat_scr.c_mat_type.setCurrentIndex(2)
assert self.test_obj.rotor.ring_mat.name == "test3"
assert self.test_obj.rotor.ring_mat.elec.rho == 0.33
def test_set_cond_type(self):
"""Check that you can change the conductor type"""
# To remember to update the test
assert self.widget.c_bar_type.count() == 2
# Check init position
assert type(self.widget.w_bar) is PCondType21
assert type(self.test_obj.rotor.winding.conductor) is CondType21
self.widget.c_bar_type.setCurrentIndex(1)
assert type(self.widget.w_bar) is PCondType22
assert type(self.test_obj.rotor.winding.conductor) is CondType22
self.widget.c_bar_type.setCurrentIndex(0)
assert type(self.widget.w_bar) is PCondType21
assert type(self.test_obj.rotor.winding.conductor) is CondType21
def test_init_PCondType21(self):
"""Check that the init is setting a conductor if None"""
self.test_obj.rotor = LamSquirrelCage(Hscr=0.21, Lscr=0.22)
self.test_obj.rotor.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
self.test_obj.rotor.winding.Lewout = 0.23
self.test_obj.rotor.ring_mat.name = "test2"
self.test_obj.rotor.winding.conductor = None
self.widget = PCondType21(
machine=self.test_obj, material_dict=self.material_dict
)
assert type(self.widget.machine.rotor.winding.conductor) is CondType21
def test_init_PCondType22(self):
"""Check that the init is setting a conductor if None"""
self.test_obj.rotor = LamSquirrelCage(Hscr=0.21, Lscr=0.22)
self.test_obj.rotor.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
self.test_obj.rotor.winding.Lewout = 0.23
self.test_obj.rotor.ring_mat.name = "test2"
self.test_obj.rotor.winding.conductor = None
self.widget = PCondType22(
machine=self.test_obj, material_dict=self.material_dict
)
assert type(self.widget.machine.rotor.winding.conductor) is CondType22
def test_check(self):
"""Check that the check method return errors"""
lam = LamSquirrelCage(Hscr=0.21, Lscr=0.22)
lam.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
lam.winding.Lewout = None
lam.ring_mat.name = "test2"
lam.winding.conductor = None
assert self.widget.check(lam) == "You must set Lewout !"
lam = LamSquirrelCage(Hscr=None, Lscr=0.22)
assert self.widget.check(lam) == "You must set Hscr !"
lam = LamSquirrelCage(Hscr=0.21, Lscr=None)
assert self.widget.check(lam) == "You must set Lscr !"
lam = LamSquirrelCage(Hscr=0.21, Lscr=0.22)
lam.slot = SlotW22(H0=0.001, H2=0.01, W0=0.1, W2=0.2)
lam.winding.Lewout = 0.23
lam.ring_mat.name = "test2"
lam.winding.conductor = CondType21(Hbar=None, Wbar=0.015)
assert self.widget.check(lam) == "You must set Hbar !"
lam.winding.conductor = CondType21(Hbar=0.014, Wbar=None)
assert self.widget.check(lam) == "You must set Wbar !"
if __name__ == "__main__":
a = TestSBar()
a.setup_class()
a.setup_method()
a.test_init()
a.teardown_class()
print("Done")
| 42.466165
| 86
| 0.65926
|
ea94430b6a444a9fbf590cc3841d06e1028d94b4
| 4,200
|
py
|
Python
|
simfempy/solvers/cfd.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
simfempy/solvers/cfd.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
simfempy/solvers/cfd.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pyamg
import scipy.sparse.linalg as splinalg
import scipy.sparse as sparse
from simfempy import tools
#=================================================================#
class VelcoitySolver():
def __init__(self, A, **kwargs):
self.maxiter = kwargs.pop('maxiter', 1)
self.nsmooth = kwargs.pop('nsmooth', 1)
self.smoother = kwargs.pop('smoother', 'schwarz')
smooth = ('energy', {'krylov': 'fgmres'})
smoother = (self.smoother, {'sweep': 'symmetric', 'iterations': self.nsmooth})
pyamgargs = {'B': pyamg.solver_configuration(A, verb=False)['B'], 'smooth': smooth, 'presmoother':smoother, 'postsmoother':smoother}
pyamgargs['symmetry'] = 'nonsymmetric'
pyamgargs['coarse_solver'] = 'splu'
self.solver = pyamg.smoothed_aggregation_solver(A, **pyamgargs)
def solve(self, b):
return self.solver.solve(b, maxiter=self.maxiter, tol=1e-16)
#=================================================================#
class PressureSolverDiagonal():
def __init__(self, mesh, mu):
self.BP = sparse.diags(1/mesh.dV*mu, offsets=(0), shape=(mesh.ncells, mesh.ncells))
def solve(self, b):
return self.BP.dot(b)
#=================================================================#
class PressureSolverSchur():
def __init__(self, mesh, ncomp, A, B, AP, **kwargs):
self.A, self.B, self.AP = A, B, AP
self.maxiter = kwargs.pop('maxiter',3)
ncells, nfaces = mesh.ncells, mesh.nfaces
self.solver = splinalg.LinearOperator(shape=(ncells,ncells), matvec=self.matvec)
self.counter = tools.iterationcounter.IterationCounter(name="schur", disp=1)
Ainv = sparse.diags(1/A.diagonal(), offsets=(0), shape=(nfaces*ncomp, nfaces*ncomp))
# self.spilu = splinalg.spilu(B*Ainv*B.T)
# self.M = splinalg.LinearOperator(shape=(ncells,ncells), matvec=self.spilu.solve)
self.M = sparse.diags( 1/(B*Ainv*B.T).diagonal(), offsets=(0), shape=(ncells, ncells) )
self.M = None
def matvec(self, x):
v = self.B.T.dot(x)
v2 = self.AP.solve(v)
return self.B.dot(v2)
def solve(self, b):
u, info = splinalg.lgmres(self.solver, b, x0=None, M=self.M, maxiter=self.maxiter, atol=1e-12, tol=1e-10)
# u, info = splinalg.bicgstab(self.solver, b, x0=None, M=None, maxiter=20, atol=1e-12, tol=1e-10)
# u, info = splinalg.gcrotmk(self.solver, b, x0=None, M=None, maxiter=self.maxiter, atol=1e-12, tol=1e-10)
# self.counter.niter=0
# u, info = splinalg.lgmres(self.solver, b, x0=None, M=None, maxiter=3, atol=1e-12, tol=1e-10, callback=self.counter)
# print(f"{info=}")
# u, info = pyamg.krylov.bicgstab(self.solver, b, maxiter=3, callback=self.counter, tol=1e-10)
# if info: raise ValueError(f"no convergence {info=}")
return u
#=================================================================#
class SystemSolver():
def __init__(self, n, matvec, matvecprec, **kwargs):
self.method = kwargs.pop('method','gmres')
self.atol = kwargs.pop('atol',1e-14)
self.rtol = kwargs.pop('rtol',1e-10)
self.disp = kwargs.pop('disp',0)
self.counter = tools.iterationcounter.IterationCounter(name=self.method, disp=self.disp)
self.Amult = splinalg.LinearOperator(shape=(n, n), matvec=matvec)
self.M = splinalg.LinearOperator(shape=(n, n), matvec=matvecprec)
def solve(self, b, x0):
if self.method=='lgmres':
u, info = splinalg.lgmres(self.Amult, b, x0=x0, M=self.M, callback=self.counter, atol=self.atol, tol=self.rtol, inner_m=10, outer_k=4)
elif self.method=='gmres':
u, info = splinalg.gmres(self.Amult, b, x0=x0, M=self.M, callback=self.counter, atol=self.atol, tol=self.rtol)
elif self.method=='gcrotmk':
u, info = splinalg.gcrotmk(self.Amult, b, x0=x0, M=self.M, callback=self.counter, atol=self.atol, tol=self.rtol, m=10, truncate='smallest')
else:
raise ValueError(f"unknown {self.method=}")
if info: raise ValueError("no convergence info={}".format(info))
return u, self.counter.niter
| 53.846154
| 151
| 0.59619
|
777ba6b9ec30b08be17c67d0df3e31ae20b847a3
| 3,833
|
py
|
Python
|
preprocess/bigramsVector.py
|
jrabinow/JJBoost
|
29d316668c7667f7ed9146716539f7c4eca2a499
|
[
"BSD-3-Clause"
] | null | null | null |
preprocess/bigramsVector.py
|
jrabinow/JJBoost
|
29d316668c7667f7ed9146716539f7c4eca2a499
|
[
"BSD-3-Clause"
] | null | null | null |
preprocess/bigramsVector.py
|
jrabinow/JJBoost
|
29d316668c7667f7ed9146716539f7c4eca2a499
|
[
"BSD-3-Clause"
] | null | null | null |
import re, os, csv, string
import nltk, re, pprint
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.util import bigrams,trigrams
from nltk import FreqDist
def removeStopwords(features):
stop = stopwords.words('english')
features = [i for i in features if i not in stop]
return features
def extract(featureList, dir, fileout,n):
tokenizer = RegexpTokenizer(r'\w+')
docPos = {}
docNeg = {}
docFeatures = {}
sentiment = "pos"
for file in os.listdir(dir+sentiment):
if file.endswith(".txt"):
features = {}
sentiment = "pos"
fp = open(dir+sentiment+"/"+file, 'rb')
doc = fp.read()
tokens = [b for b in bigrams(removeStopwords(tokenizer.tokenize(doc)))]
for word in featureList:
if word in tokens:
features[word] = 1.0
else:
features[word] = 0.0
docPos[file] = ""
docFeatures[file] = features
sentiment = "neg"
for file in os.listdir(dir+sentiment):
if file.endswith(".txt"):
features = {}
sentiment = "neg"
fp = open(dir+sentiment+"/"+file, 'rb')
doc = fp.read()
tokens = [b for b in bigrams(removeStopwords(tokenizer.tokenize(doc)))]
for word in featureList:
if word in tokens:
features[word] = 1.0
else:
features[word] = 0.0
docNeg[file] = ""
docFeatures[file] = features
f = FreqDist(featureList)
featureList = [x for (x,f) in f.items()[:n]]
allData = []
for doc in docFeatures.keys():
data = []
count = 1
if doc in docNeg.keys():
val =['-1']
if doc in docPos.keys():
val =['1']
for key in featureList:
data.append("%s:%s" %(count, docFeatures[doc][key]))
count +=1
val.extend(data)
allData.append(" ".join(val))
# for doc in docFeaturesPos.keys():
# data =['+1']
# for key in featureList:
# data.append("%s:%s" %(count, docFeaturesPos[doc][key]))
# count +=1
# count = 1
# allData.append(" ".join(data))
fVectorWriter = csv.writer(open(dir+fileout+".txt", 'wb'))
for d in allData:
print d
fVectorWriter.writerow([d])
#
def extractFeatures(dir):
docs = []
featureList = []
tokenizer = RegexpTokenizer(r'\w+')
sentiment = "pos"
for file in os.listdir(dir+sentiment):
if file.endswith(".txt"):
fp = open(dir+sentiment+"/"+file, 'r')
doc = fp.read()
tokens = tokenizer.tokenize(doc)
fp.close()
sentiment = "neg"
for file in os.listdir(dir+sentiment):
if file.endswith(".txt"):
fp = open(dir+sentiment+"/"+file, 'r')
doc = fp.read()
#tokens.extend(word_tokenize(doc))
tokens.extend(tokenizer.tokenize(doc))
fp.close()
return tokens
dir = "/home/jch550/dev/JJboost/data/txt_sentoken/"
# print "extracting feautres..."
# featuresRaw = extractFeatures(dir)
# # print "cleaning features..."
# featuresClean = removeStopwords(featuresRaw)
# featuresBigrams = bigrams(featuresClean)
# # print "writing to file..."
# fListWriter = csv.writer(open(dir+"featureBigramsList.txt", 'w'))
# for f in featuresBigrams:
# fListWriter.writerow([f])
features = open(dir+"featureBigramsList.txt", 'rb')
featuresList = features.read().split("\r\n")
featuresList = [b for b in featuresBigrams]
print "extracting features from documents..."
extract(featuresList, dir, "docs_train_bigrams", 500)
print "DONE."
| 31.418033
| 83
| 0.563788
|
885622b8808a681e658af477190b55ea717c73f3
| 2,720
|
py
|
Python
|
Z - Tool Box/x2john/bitshares2john.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1,290
|
2020-05-28T21:24:43.000Z
|
2022-03-31T16:38:43.000Z
|
Z - Tool Box/x2john/bitshares2john.py
|
asim06/Active-Directory-Exploitation-Cheat-Sheet
|
708f57c83aa99d80df22f4e50f21479a709fb359
|
[
"MIT"
] | 1
|
2020-07-03T21:14:52.000Z
|
2020-07-03T21:14:52.000Z
|
Z - Tool Box/x2john/bitshares2john.py
|
asim06/Active-Directory-Exploitation-Cheat-Sheet
|
708f57c83aa99d80df22f4e50f21479a709fb359
|
[
"MIT"
] | 280
|
2020-05-29T17:28:38.000Z
|
2022-03-31T13:54:15.000Z
|
#!/usr/bin/env python
# Script to extract "hashes" from BitShares databases.
#
# Tested with BitShares.Setup.2.0.180115.exe on Windows 7 SP1.
#
# Location for databases -> %APPDATA%\BitShares2-light\databases\file__0\{1,2...}
#
# "Local Wallet" on https://wallet.bitshares.org for Google Chrome ->
# ~/.config/google-chrome/Default/databases/https_wallet.bitshares.org_0
#
# Metadata extraction:
#
# $ sqlite3 Databases.db
# sqlite> select * from Databases;
# 1|file__0|__sysdb__|System Database|4194304
# 2|file__0|graphene_db_4018d7|graphene_db_4018d7|4194304
# 3|file__0|graphene_v2_4018d7_default|graphene_v2_4018d7_default|4194304
# 4|file__0|graphene_v2_4018d7_openwall|graphene_v2_4018d7_openwall|4194304
#
# Hash extraction:
#
# $ sqlite3 file__0/4
# sqlite> select * from wallet;
# 3-openwall|{"public_name":"openwall", ..., "encryption_key":"ec4...", ...}
#
# This software is Copyright (c) 2017, Dhiru Kholia <dhiru at openwall.com> and
# it is hereby released to the general public under the following terms:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted.
import os
import sys
import json
import sqlite3
import binascii
PY3 = sys.version_info[0] == 3
if not PY3:
reload(sys)
sys.setdefaultencoding('utf8')
def process_leveldb(path):
# Standard LevelDB (via plyvel library) doesn't work for BitShares .ldb files due to usage of custom "key_compare" comparator.
data = open(path, "rb").read()
idx = data.index(b'checksum')
if idx < 0:
return False
start = idx + len("checksum") + 3
print("%s:$dynamic_84$%s" % (os.path.basename(path), data[start:start+64*2]))
return True
def process_backup_file(filename):
data = binascii.hexlify(open(filename, "rb").read())
sys.stdout.write("%s:$BitShares$1*%s\n" % (os.path.basename(filename), data))
def process_file(filename):
if process_leveldb(filename):
return
try:
db = sqlite3.connect(filename)
cursor = db.cursor()
rows = cursor.execute("SELECT key, value from wallet")
except:
process_backup_file(filename)
return
for row in rows:
name, value = row
data = json.loads(value)
if "encryption_key" not in data:
continue
encryption_key = data["encryption_key"]
sys.stdout.write("%s:$BitShares$0*%s\n" % (name, encryption_key))
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [BitShares SQLite file(s) / Backup Wallet .bin file(s) / ~/BitShares/wallets/<wallet-name>/*.ldb file(s)]\n" % sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
| 30.909091
| 159
| 0.684926
|
0f8ba67dd807d2d6fb54cd4c22ff44204b4ad0e3
| 74,574
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/_network_watchers_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/_network_watchers_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/_network_watchers_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkWatchersOperations(object):
"""NetworkWatchersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NetworkWatcher"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkWatcher"
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher resource.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.NetworkWatcher
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkWatcher')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.NetworkWatcher"
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkWatcherListResult"]
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_03_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkWatcherListResult"]
"""Gets all network watchers by subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_03_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def get_topology(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TopologyParameters"
**kwargs # type: Any
):
# type: (...) -> "models.Topology"
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the representation of topology.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.TopologyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Topology, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.Topology
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Topology"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self.get_topology.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TopologyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topology', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'} # type: ignore
def _verify_ip_flow_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.VerificationIPFlowParameters"
**kwargs # type: Any
):
# type: (...) -> "models.VerificationIPFlowResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.VerificationIPFlowResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._verify_ip_flow_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_verify_ip_flow_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
def begin_verify_ip_flow(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.VerificationIPFlowParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VerificationIPFlowResult"]
"""Verify IP flow from the specified VM to a location given the currently configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.VerificationIPFlowParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VerificationIPFlowResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.VerificationIPFlowResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VerificationIPFlowResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
def _get_next_hop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NextHopParameters"
**kwargs # type: Any
):
# type: (...) -> "models.NextHopResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.NextHopResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._get_next_hop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NextHopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_next_hop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
def begin_get_next_hop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NextHopParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.NextHopResult"]
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination endpoint.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.NextHopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NextHopResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.NextHopResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NextHopResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
def _get_vm_security_rules_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.SecurityGroupViewParameters"
**kwargs # type: Any
):
# type: (...) -> "models.SecurityGroupViewResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityGroupViewResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._get_vm_security_rules_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vm_security_rules_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
def begin_get_vm_security_rules(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.SecurityGroupViewParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.SecurityGroupViewResult"]
"""Gets the configured and effective security group rules on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the VM to check security groups for.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.SecurityGroupViewParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityGroupViewResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.SecurityGroupViewResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityGroupViewResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
def _get_troubleshooting_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> "models.TroubleshootingResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._get_troubleshooting_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
def begin_get_troubleshooting(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.TroubleshootingResult"]
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to troubleshoot.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.TroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
def _get_troubleshooting_result_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.QueryTroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> "models.TroubleshootingResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._get_troubleshooting_result_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_result_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
def begin_get_troubleshooting_result(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.QueryTroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.TroubleshootingResult"]
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to query the troubleshooting result.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.QueryTroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
def _set_flow_log_configuration_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogInformation"
**kwargs # type: Any
):
# type: (...) -> "models.FlowLogInformation"
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._set_flow_log_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_flow_log_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
def begin_set_flow_log_configuration(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogInformation"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.FlowLogInformation"]
"""Configures flow log on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow log.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.FlowLogInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
def _get_flow_log_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogStatusParameters"
**kwargs # type: Any
):
# type: (...) -> "models.FlowLogInformation"
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._get_flow_log_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_flow_log_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
def begin_get_flow_log_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogStatusParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.FlowLogInformation"]
"""Queries status of flow log on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define a resource to query flow log status.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.FlowLogStatusParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
def _check_connectivity_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.ConnectivityParameters"
**kwargs # type: Any
):
# type: (...) -> "models.ConnectivityInformation"
cls = kwargs.pop('cls', None) # type: ClsType["models.ConnectivityInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._check_connectivity_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_connectivity_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
def begin_check_connectivity(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.ConnectivityParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ConnectivityInformation"]
"""Verifies the possibility of establishing a direct TCP connection from a virtual machine to a
given endpoint including another VM or an arbitrary remote server.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine how the connectivity check will be performed.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.ConnectivityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectivityInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_03_01.models.ConnectivityInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ConnectivityInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
| 50.421907
| 233
| 0.671105
|
6be59f6a79c8bdf61ed4e998f30407ec3b45cea0
| 4,323
|
py
|
Python
|
exp-x/data_discovery.py
|
rohitmusti/SQuAD-Context-Merging
|
d055a1565b87399b1d611385097495431f5e250a
|
[
"MIT"
] | null | null | null |
exp-x/data_discovery.py
|
rohitmusti/SQuAD-Context-Merging
|
d055a1565b87399b1d611385097495431f5e250a
|
[
"MIT"
] | null | null | null |
exp-x/data_discovery.py
|
rohitmusti/SQuAD-Context-Merging
|
d055a1565b87399b1d611385097495431f5e250a
|
[
"MIT"
] | null | null | null |
import ujson as json
from toolkit import fancyprint
import config
def orig_data_discovery(filename):
"""
just a function to explore the original train data
"""
tab = " -> "
with open(filename, "r") as fh:
source = json.load(fh)
print("the type of source:", type(source))
print("the keys of source:",source.keys())
print("the type of version:",type(source["version"]))
print("the type of data:",type(source["data"]))
for article in source["data"]:
print(tab,"the type of each element of data:",type(article))
print(tab,"the keys of each element of data:",article.keys())
print(tab,"the type of title:",type(article["title"]))
print(tab,"the type of paragraphs:",type(article["paragraphs"]))
for para in article["paragraphs"]:
print(tab*2,"the type of each element of paragraphs:",type(para))
print(tab*2,"the keys of each element of paragraphs:",para.keys())
print(tab*2,"the type of context:",type(para["context"]))
print(tab*2,"the type of qas:",type(para["qas"]))
for qa in para["qas"]:
print(tab*3,"the type of each element of qas:",type(qa))
print(tab*3,"the keys of each element of qas:",qa.keys())
print(tab*3,"the type of id:",type(qa["id"]))
print(tab*3,"the type of is_impossible:",type(qa["is_impossible"]))
print(tab*3,"the type of question:",type(qa["question"]))
print(tab*3,"the type of answers:",type(qa["answers"]))
for answer in qa["answers"]:
print(tab*4,"the type of each element of answers:",type(answer))
print(tab*4,"the keys of each element of answer:",answer.keys())
print(tab*4,"the type of text:",type(answer["text"]))
print(tab*4,"the type of answer_start:",type(answer["answer_start"]))
return None
def exp2_data_discovery(filename):
"""
just a function to explore some data
"""
tab = " -> "
with open(filename, "r") as fh:
source = json.load(fh)
print("the type of source:", type(source))
print("the keys of source:",source.keys())
print("the type of experiment:",type(source["experiment"]))
print("the type of version:",type(source["version"]))
print("the type of data:",type(source["data"]))
for topic in source["data"]:
print(tab,"the type of each element of data:",type(topic))
print(tab,"the keys of each element of data:",topic.keys())
print(tab,"the type of title:",type(topic["title"]))
print(tab,"the type of topic_context:",type(topic["topic_context"]))
print(tab,"the type of qas:",type(topic["qas"]))
for qas in topic["qas"]:
print(tab*2,"the type of each element in qas", type(qas))
print(tab*2,"the keys of each element in qas", qas.keys())
print(tab*2,"the type of id:",type(qas["id"]))
print(tab*2,"the type of is_impossible:",type(qas["is_impossible"]))
print(tab*2,"the type of question:",type(qas["question"]))
print(tab*2,"the type of answers:",type(qas["answers"]))
for answer in qas["answers"]:
print(tab*3,"the type of each element in answers", type(answer))
print(tab*3,"the keys of each element in answers", answer.keys())
print(tab*3,"the type of text:",type(answer["text"]))
print(tab*3,"the type of answer_start:",type(answer["answer_start"]))
return 0
if __name__ == "__main__":
choice = int(input("select 0 for original or the number corresponding to your experiment: "))
data = config.data()
if choice == 0:
fancyprint(in_str="Original Data")
orig_data_discovery(filename=data.train_data_orig)
print()
elif choice == 2:
fancyprint(in_str="Experiment 2 Data")
exp2_data_discovery(filename=data.train_data_exp2)
print()
else:
print("Not implemented yet")
| 49.125
| 97
| 0.563498
|
a83561d049c7108fd12faca126f2115f0f6d6e5f
| 5,028
|
py
|
Python
|
libs/python/qumranica/models/reset_forgotten_user_password_request_dto.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
libs/python/qumranica/models/reset_forgotten_user_password_request_dto.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
libs/python/qumranica/models/reset_forgotten_user_password_request_dto.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
SQE API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from qumranica.configuration import Configuration
class ResetForgottenUserPasswordRequestDTO(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'password': 'str',
'token': 'str'
}
attribute_map = {
'password': 'password',
'token': 'token'
}
def __init__(self, password=None, token=None, local_vars_configuration=None): # noqa: E501
"""ResetForgottenUserPasswordRequestDTO - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._password = None
self._token = None
self.discriminator = None
self.password = password
self.token = token
@property
def password(self):
"""Gets the password of this ResetForgottenUserPasswordRequestDTO. # noqa: E501
:return: The password of this ResetForgottenUserPasswordRequestDTO. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this ResetForgottenUserPasswordRequestDTO.
:param password: The password of this ResetForgottenUserPasswordRequestDTO. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and password is None: # noqa: E501
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
password is not None and len(password) > 1024):
raise ValueError("Invalid value for `password`, length must be less than or equal to `1024`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
password is not None and len(password) < 4):
raise ValueError("Invalid value for `password`, length must be greater than or equal to `4`") # noqa: E501
self._password = password
@property
def token(self):
"""Gets the token of this ResetForgottenUserPasswordRequestDTO. # noqa: E501
:return: The token of this ResetForgottenUserPasswordRequestDTO. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this ResetForgottenUserPasswordRequestDTO.
:param token: The token of this ResetForgottenUserPasswordRequestDTO. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and token is None: # noqa: E501
raise ValueError("Invalid value for `token`, must not be `None`") # noqa: E501
self._token = token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResetForgottenUserPasswordRequestDTO):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResetForgottenUserPasswordRequestDTO):
return True
return self.to_dict() != other.to_dict()
| 32.43871
| 124
| 0.61436
|
c3d3f28e8f43d68b81d350af8a8ec5ae2bd22c61
| 1,931
|
py
|
Python
|
store/migrations/0004_delivery_order.py
|
Timoh97/Istock-invent
|
33943dc8f0cb291477b3d60af28eed17779ae28d
|
[
"MIT"
] | 1
|
2021-10-18T09:27:03.000Z
|
2021-10-18T09:27:03.000Z
|
store/migrations/0004_delivery_order.py
|
Timoh97/Istock-invent
|
33943dc8f0cb291477b3d60af28eed17779ae28d
|
[
"MIT"
] | 1
|
2021-08-04T20:11:28.000Z
|
2021-08-04T20:11:28.000Z
|
store/migrations/0004_delivery_order.py
|
dentonya/python-django-sales-inventory-project
|
d0fcdf81136908a022e0f4eeca94fc0357473635
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-07-22 03:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0003_drop_product_season'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.CharField(max_length=50)),
('color', models.CharField(max_length=50)),
('status', models.CharField(choices=[('decline', 'Decline'), ('approved', 'Approved'), ('processing', 'Processing'), ('complete', 'Complete'), ('bulk', 'Bulk')], max_length=10)),
('created_date', models.DateField(auto_now_add=True)),
('buyer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='store.Buyer')),
('drop', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='store.Drop')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Product')),
('season', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='store.Season')),
('supplier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Supplier')),
],
),
migrations.CreateModel(
name='Delivery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('courier_name', models.CharField(max_length=120)),
('created_date', models.DateField(auto_now_add=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Order')),
],
),
]
| 49.512821
| 194
| 0.606939
|
baa384733c9e63e87ec77291f414800d32d2131b
| 2,947
|
py
|
Python
|
Praktikum/Modul6/DemoQListWidget.py
|
raddox7/19104027_TeoriGUI
|
8f62b71e339c6244c9b27259f402408426df0f85
|
[
"MIT"
] | null | null | null |
Praktikum/Modul6/DemoQListWidget.py
|
raddox7/19104027_TeoriGUI
|
8f62b71e339c6244c9b27259f402408426df0f85
|
[
"MIT"
] | null | null | null |
Praktikum/Modul6/DemoQListWidget.py
|
raddox7/19104027_TeoriGUI
|
8f62b71e339c6244c9b27259f402408426df0f85
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(400, 300)
self.move(300, 300)
self.setWindowTitle('Demo QListWidget')
self.label = QLabel('&Elemen baru')
self.itemEdit = QLineEdit()
self.label.setBuddy(self.itemEdit)
self.addItemButton = QPushButton('Tambah')
hbox1 = QHBoxLayout()
hbox1.addWidget(self.itemEdit)
hbox1.addWidget(self.addItemButton)
hbox1.addStretch()
vbox1 = QVBoxLayout()
vbox1.addWidget(self.label)
vbox1.addLayout(hbox1)
self.list1 = QListWidget()
self.moveRightButton = QPushButton('>')
self.moveRightAllButton = QPushButton('>>')
self.moveLeftButton = QPushButton('<')
self.moveLeftAllButton = QPushButton('<<')
vbox2 = QVBoxLayout()
vbox2.addWidget(self.moveRightButton)
vbox2.addWidget(self.moveRightAllButton)
vbox2.addWidget(self.moveLeftButton)
vbox2.addWidget(self.moveLeftAllButton)
vbox2.addStretch()
self.list2 = QListWidget()
hbox2 = QHBoxLayout()
hbox2.addWidget(self.list1)
hbox2.addLayout(vbox2)
hbox2.addWidget(self.list2)
layout = QVBoxLayout()
layout.addLayout(vbox1)
layout.addLayout(hbox2)
self.setLayout(layout)
self.addItemButton.clicked.connect(self.addItemButtonClick)
self.moveRightButton.clicked.connect(self.moveRightButtonClick)
self.moveRightAllButton.clicked.connect(self.moveRightAllButtonClick)
self.moveLeftButton.clicked.connect(self.moveLeftButtonClick)
self.moveLeftAllButton.clicked.connect(self.moveLeftAllButtonClick)
def addItemButtonClick(self):
if len(self.itemEdit.text()) == 0: return
item = self.itemEdit.text()
self.list1.addItem(item)
self.itemEdit.clear()
self.itemEdit.setFocus()
def moveRightButtonClick(self):
if self.list1.currentRow() < 0: return
self.list2.addItem(self.list1.currentItem().text())
self.list1.takeItem(self.list1.currentRow())
def moveRightAllButtonClick(self):
for index in range(self.list1.count()):
self.list2.addItem(self.list1.item(index).text())
self.list1.clear()
def moveLeftButtonClick(self):
if self.list2.currentRow() < 0: return
self.list1.addItem(self.list2.currentItem().text())
self.list2.takeItem(self.list2.currentRow())
def moveLeftAllButtonClick(self):
for index in range(self.list2.count()):
self.list1.addItem(self.list2.item(index).text())
self.list2.clear()
if __name__ == '__main__':
a = QApplication(sys.argv)
form = MainForm()
form.show()
a.exec_()
| 34.267442
| 77
| 0.650831
|
4ba0897f1d29f2b9070f306e25b6c814dc1e9478
| 2,510
|
py
|
Python
|
pymagnitude/third_party/allennlp/nn/decoding/__init__.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 1,520
|
2018-03-01T13:37:49.000Z
|
2022-03-25T11:40:20.000Z
|
pymagnitude/third_party/allennlp/nn/decoding/__init__.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 87
|
2018-03-03T15:12:50.000Z
|
2022-02-21T15:24:12.000Z
|
pymagnitude/third_party/allennlp/nn/decoding/__init__.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 121
|
2018-03-03T08:40:53.000Z
|
2022-03-16T05:19:38.000Z
|
u"""
This module contains code for transition-based decoding. "Transition-based decoding" is where you
start in some state, iteratively transition between states, and have some kind of supervision
signal that tells you which end states, or which transition sequences, are "good".
If you want to do decoding for a vocabulary-based model, where the allowable outputs are the same
at every timestep of decoding, this code is not what you are looking for, and it will be quite
inefficient compared to other things you could do.
The key abstractions in this code are the following:
- ``DecoderState`` represents the current state of decoding, containing a list of all of the
actions taken so far, and a current score for the state. It also has methods around
determining whether the state is "finished" and for combining states for batched computation.
- ``DecoderStep`` is a ``torch.nn.Module`` that models the transition function between states.
Its main method is ``take_step``, which generates a ranked list of next states given a
current state.
- ``DecoderTrainer`` is an algorithm for training the transition function with some kind of
supervision signal. There are many options for training algorithms and supervision signals;
this is an abstract class that is generic over the type of the supervision signal.
The module also has some classes to help represent the ``DecoderState``, including ``RnnState``,
which you can use to keep track of a decoder RNN's internal state, ``GrammarState``, which
keeps track of what actions are allowed at each timestep of decoding, if your outputs are
production rules from a grammar, and ``ChecklistState`` that keeps track of coverage inforation if
you are training a coverage based parser.
There is also a generic ``BeamSearch`` class for finding the ``k`` highest-scoring transition
sequences given a trained ``DecoderStep`` and an initial ``DecoderState``.
"""
from __future__ import absolute_import
from allennlp.nn.decoding.beam_search import BeamSearch
from allennlp.nn.decoding.checklist_state import ChecklistState
from allennlp.nn.decoding.constrained_beam_search import ConstrainedBeamSearch
from allennlp.nn.decoding.decoder_state import DecoderState
from allennlp.nn.decoding.decoder_step import DecoderStep
from allennlp.nn.decoding.decoder_trainers.decoder_trainer import DecoderTrainer
from allennlp.nn.decoding.grammar_state import GrammarState
from allennlp.nn.decoding.rnn_state import RnnState
| 61.219512
| 99
| 0.794422
|
3513b7f401ab6a04d9a0775eb372fa040bbf1a68
| 2,594
|
py
|
Python
|
python/example_code/kda/kda-python-datagenerator-tworecordtypes.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 3
|
2021-01-19T20:23:17.000Z
|
2021-01-19T21:38:59.000Z
|
python/example_code/kda/kda-python-datagenerator-tworecordtypes.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/kda/kda-python-datagenerator-tworecordtypes.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 2
|
2019-12-27T13:58:00.000Z
|
2020-05-21T18:35:40.000Z
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[kda-python-datagenerator-tworecordtypes.py demonstrates how to generate sample data for the Transforming Multiple Data Types example.]
# snippet-service:[kinesisanalytics]
# snippet-keyword:[Python]
# snippet-keyword:[Amazon Kinesis Data Analytics]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[kinesis.put_record]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-29]
# snippet-sourceauthor:[fletpatr (AWS)]
# snippet-start:[kinesisanalytics.python.datagenerator.tworecordtypes]
import json
import boto3
import random
kinesis = boto3.client('kinesis')
def getOrderData(orderId, ticker):
data = {}
data['RecordType'] = "Order"
data['Oid'] = orderId
data['Oticker'] = ticker
data['Oprice'] = random.randint(500, 10000)
data['Otype'] = "Sell"
return data
def getTradeData(orderId, tradeId, ticker, tradePrice):
data = {}
data['RecordType'] = "Trade"
data['Tid'] = tradeId
data['Toid'] = orderId
data['Tticker'] = ticker
data['Tprice'] = tradePrice
return data
x = 1
while True:
#rnd = random.random()
rnd = random.randint(1,3)
if rnd == 1:
ticker = "AAAA"
elif rnd == 2:
ticker = "BBBB"
else:
ticker = "CCCC"
data = json.dumps(getOrderData(x, ticker))
kinesis.put_record(StreamName="OrdersAndTradesStream", Data=data, PartitionKey="partitionkey")
print(data)
tId = 1
for y in range (0, random.randint(0,6)):
tradeId = tId
tradePrice = random.randint(0, 3000)
data2 = json.dumps(getTradeData(x, tradeId, ticker, tradePrice));
kinesis.put_record(StreamName="OrdersAndTradesStream", Data=data2, PartitionKey="partitionkey")
print(data2)
tId+=1
x+=1
# snippet-end:[kinesisanalytics.python.datagenerator.tworecordtypes]
| 33.688312
| 164
| 0.676947
|
451a52fab063bd53f2f63b525efb50b393242786
| 583
|
py
|
Python
|
Bio-StrongHold/src/k_Mer_Composition.py
|
crf1111/Bio-Informatics-Learning
|
2ccc02d7a23584c12aee44c5620160cdcaf70bd4
|
[
"MIT"
] | 1
|
2018-10-10T19:03:52.000Z
|
2018-10-10T19:03:52.000Z
|
Bio-StrongHold/src/k_Mer_Composition.py
|
crf1111/Bio-Informatics-Learning
|
2ccc02d7a23584c12aee44c5620160cdcaf70bd4
|
[
"MIT"
] | null | null | null |
Bio-StrongHold/src/k_Mer_Composition.py
|
crf1111/Bio-Informatics-Learning
|
2ccc02d7a23584c12aee44c5620160cdcaf70bd4
|
[
"MIT"
] | null | null | null |
from Bio import SeqIO
from itertools import product
import re
import os
import sys
def main(*args, **kwargs):
fpath = os.path.join(os.getcwd(), args[-2])
dna = str(SeqIO.read(fpath,'fasta').seq)
perms = product(['A','C','G','T'], repeat=4)
kmers = []
for perm in perms:
kmers.append(''.join(perm))
opath = os.path.join(os.getcwd(),args[-1])
fo = open(opath, 'w')
for kmer in kmers:
txt = '%s\t' % len(re.findall(''.join(['(?=', kmer, ')']), dna))
fo.write(txt)
fo.close()
if __name__ == '__main__':
main(*sys.argv)
| 23.32
| 72
| 0.567753
|
e108a137fc00a623c89de368da7623601671df16
| 4,229
|
py
|
Python
|
src/exabgp/reactor/api/__init__.py
|
pierky/exabgp
|
34be537ae5906c0830b31da1152ae63108ccf911
|
[
"BSD-3-Clause"
] | 1,560
|
2015-01-01T08:53:05.000Z
|
2022-03-29T20:22:43.000Z
|
src/exabgp/reactor/api/__init__.py
|
pierky/exabgp
|
34be537ae5906c0830b31da1152ae63108ccf911
|
[
"BSD-3-Clause"
] | 818
|
2015-01-01T17:38:40.000Z
|
2022-03-30T07:29:24.000Z
|
src/exabgp/reactor/api/__init__.py
|
pierky/exabgp
|
34be537ae5906c0830b31da1152ae63108ccf911
|
[
"BSD-3-Clause"
] | 439
|
2015-01-06T21:20:41.000Z
|
2022-03-19T23:24:25.000Z
|
# encoding: utf-8
"""
decoder/__init__.py
Created by Thomas Mangin on 2009-08-25.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.configuration.core.format import formated
from exabgp.configuration.operational.parser import operational
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.protocol.family import Family
from exabgp.bgp.message.refresh import RouteRefresh
from exabgp.logger import log
from exabgp.reactor.api.command import Command
from exabgp.configuration.configuration import Configuration
# ======================================================================= Parser
#
class API(Command):
def __init__(self, reactor):
self.reactor = reactor
self.configuration = Configuration([])
def log_message(self, message, level='INFO'):
log.info(message, 'api', level)
def log_failure(self, message, level='ERR'):
error = str(self.configuration.tokeniser.error)
report = '%s\nreason: %s' % (message, error) if error else message
log.error(report, 'api', level)
def text(self, reactor, service, command):
for registered in self.functions:
if registered == command or command.endswith(' ' + registered) or registered + ' ' in command:
return self.callback['text'][registered](self, reactor, service, command)
reactor.processes.answer_error(service)
log.warning('command from process not understood : %s' % command, 'api')
return False
def api_route(self, command):
action, line = command.split(' ', 1)
self.configuration.static.clear()
if not self.configuration.partial('static', line, action):
return []
if self.configuration.scope.location():
return []
self.configuration.scope.to_context()
changes = self.configuration.scope.pop_routes()
return changes
def api_flow(self, command):
action, flow, line = command.split(' ', 2)
self.configuration.flow.clear()
if not self.configuration.partial('flow', line):
return []
if self.configuration.scope.location():
return []
self.configuration.scope.to_context()
changes = self.configuration.scope.pop_routes()
return changes
def api_vpls(self, command):
action, line = command.split(' ', 1)
self.configuration.l2vpn.clear()
if not self.configuration.partial('l2vpn', line):
return []
self.configuration.scope.to_context()
changes = self.configuration.scope.pop('l2vpn')
return changes
def api_attributes(self, command, peers):
action, line = command.split(' ', 1)
self.configuration.static.clear()
if not self.configuration.partial('static', line):
return []
self.configuration.scope.to_context()
changes = self.configuration.scope.pop_routes()
return changes
def api_refresh(self, command):
tokens = formated(command).split(' ')[2:]
if len(tokens) != 2:
return False
afi = AFI.value(tokens.pop(0))
safi = SAFI.value(tokens.pop(0))
if afi is None or safi is None:
return False
return [RouteRefresh(afi, safi)]
def api_eor(self, command):
tokens = formated(command).split(' ')[2:]
number = len(tokens)
if not number:
return Family(1, 1)
if number != 2:
return False
afi = AFI.fromString(tokens[0])
if afi == AFI.undefined:
return False
safi = SAFI.fromString(tokens[1])
if safi == SAFI.undefined:
return False
return Family(afi, safi)
def api_operational(self, command):
tokens = formated(command).split(' ')
op = tokens[1].lower()
what = tokens[2].lower()
if op != 'operational':
return False
self.configuration.tokeniser.iterate.replenish(tokens[3:])
# None or a class
return operational(what, self.configuration.tokeniser.iterate)
| 30.207143
| 106
| 0.618822
|
80de812fba7f78f0d4947860c78ddc5abaad5ef7
| 4,207
|
py
|
Python
|
comments/api.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | null | null | null |
comments/api.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | 10
|
2018-11-21T14:37:17.000Z
|
2021-02-02T09:19:59.000Z
|
comments/api.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
import django_filters
from rest_framework import exceptions, mixins, serializers, viewsets
from resources.api.base import register_view
from .models import Comment, COMMENTABLE_MODELS, get_commentable_content_types
class CommentUserSerializer(serializers.ModelSerializer):
display_name = serializers.ReadOnlyField(source='get_display_name')
class Meta:
model = get_user_model()
fields = ('display_name',)
class CommentSerializer(serializers.ModelSerializer):
target_type = serializers.CharField(required=True, write_only=True) # populated in to_representation()
target_id = serializers.IntegerField(source='object_id')
created_by = CommentUserSerializer(read_only=True)
class Meta:
model = Comment
fields = ('id', 'created_at', 'created_by', 'target_type', 'target_id', 'text')
def create(self, validated_data):
model = COMMENTABLE_MODELS.get(validated_data.pop('target_type'))
content_type = ContentType.objects.get_for_model(model)
validated_data['content_type'] = content_type
return super().create(validated_data)
def validate(self, validated_data):
target_type = validated_data.get('target_type')
if target_type not in COMMENTABLE_MODELS.keys():
raise exceptions.ValidationError({'target_type': [_('Illegal type.')]})
target_id = validated_data.get('object_id')
target_model = COMMENTABLE_MODELS.get(target_type)
try:
target_object = target_model.objects.get(id=target_id)
except target_model.DoesNotExist:
error_message = serializers.PrimaryKeyRelatedField.default_error_messages['does_not_exist']
raise exceptions.ValidationError(
{'target_id': [error_message.format(pk_value=target_id)]}
)
if not Comment.can_user_comment_object(self.context['request'].user, target_object):
raise exceptions.ValidationError(_('You cannot comment this object.'))
return validated_data
def to_representation(self, instance):
data = super().to_representation(instance)
target_model = instance.content_type.model_class()
# when used with the comment viewset it shouldn't be possible to get StopIteration here
# because other than commentable models are excluded in the viewset
data['target_type'] = next(api_name for api_name, model in COMMENTABLE_MODELS.items() if model == target_model)
return data
class CommentFilter(django_filters.rest_framework.FilterSet):
class Meta:
model = Comment
fields = ('target_type', 'target_id')
target_type = django_filters.CharFilter(method='filter_target_type')
target_id = django_filters.CharFilter(name='object_id')
def filter_target_type(self, queryset, name, value):
try:
model = next(model for api_name, model in COMMENTABLE_MODELS.items() if api_name == value)
except StopIteration:
return queryset.none()
content_type = ContentType.objects.get_for_model(model)
return queryset.filter(content_type=content_type)
class CommentViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Comment.objects.select_related('created_by').prefetch_related('content_type')
serializer_class = CommentSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filter_class = CommentFilter
def get_queryset(self):
user = self.request.user
queryset = super().get_queryset()
return queryset.filter(content_type__in=get_commentable_content_types()).can_view(user)
def perform_create(self, serializer):
obj = serializer.save(created_by=self.request.user, created_at=timezone.now())
obj.send_created_notification(self.request)
return obj
register_view(CommentViewSet, 'comment')
| 40.84466
| 119
| 0.727121
|
3e5d740adb13f29fba57c8501023f8ad903c7aec
| 851
|
py
|
Python
|
qmldataset/configurations/config_1q_XY_N1X_N6Z.py
|
rajibchakravorty/QDataSet
|
8eb21b8c7dad5654358021dd73b93ab90443f6d0
|
[
"MIT"
] | null | null | null |
qmldataset/configurations/config_1q_XY_N1X_N6Z.py
|
rajibchakravorty/QDataSet
|
8eb21b8c7dad5654358021dd73b93ab90443f6d0
|
[
"MIT"
] | null | null | null |
qmldataset/configurations/config_1q_XY_N1X_N6Z.py
|
rajibchakravorty/QDataSet
|
8eb21b8c7dad5654358021dd73b93ab90443f6d0
|
[
"MIT"
] | null | null | null |
# pylint: disable=invalid-name
"""
Configuration for experiment 1q_XY_N1X_N6Z - 1-qubit, Control on X and Y -Axes, Type 1 Noise
on X Axis, Type 6 Noise on Z Axis
"""
from numpy import array
from ..utilities.constants import pauli_operators
dimension = 2
evolution_time = 1
num_time_steps = 1024
omega = 12
dynamic_operators = [0.5*pauli_operators[1],
0.5*pauli_operators[2]]
static_operators = [0.5*pauli_operators[3]*omega]
noise_operators = [0.5*pauli_operators[1],
0.5*pauli_operators[3]]
initial_states = [
array([[0.5, 0.5], [0.5, 0.5]]), array([[0.5, -0.5], [-0.5, 0.5]]),
array([[0.5, -0.5j], [0.5j, 0.5]]), array([[0.5, 0.5j], [-0.5j, 0.5]]),
array([[1, 0], [0, 0]]), array([[0, 0], [0, 1]])
]
measurement_operators = pauli_operators[1:]
num_pulses = 5
noise_profile = ['Type 1', 'Type 6']
| 32.730769
| 92
| 0.629847
|
227f4320d0dd769705f397b52362aef7bb7bfba5
| 2,851
|
py
|
Python
|
kernel.py
|
laMia482/tensorflow-example
|
9f598baca835585769ba17325bd3c719d37ec7dc
|
[
"Apache-2.0"
] | null | null | null |
kernel.py
|
laMia482/tensorflow-example
|
9f598baca835585769ba17325bd3c719d37ec7dc
|
[
"Apache-2.0"
] | null | null | null |
kernel.py
|
laMia482/tensorflow-example
|
9f598baca835585769ba17325bd3c719d37ec7dc
|
[
"Apache-2.0"
] | null | null | null |
import os
import tensorflow as tf
from config import cfg, puts_debug, puts_info
import data_reader as dtrd
import network
import utility
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
run_net = network.build(cfg.network)
xs = tf.placeholder(tf.float32, [cfg.batch_size, cfg.image_width, cfg.image_height, cfg.image_channel])
ys = tf.placeholder(tf.float32, [cfg.batch_size, cfg.max_predictions, 5])
learning_rate = tf.placeholder(tf.float32, None)
outputs_op, _ = run_net(inputs = xs, max_predictions = cfg.max_predictions, num_classes = cfg.num_classes, is_train = cfg.is_train)
loss_op = utility.calc_loss(logits = ys, predictions = outputs_op)
train_op = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(100 * loss_op)
accuracy_op = utility.calc_accuracy(logits = ys, predictions = outputs_op)
def train():
'''kernel train
'''
with tf.Session(config = config) as sess:
saver = tf.train.Saver()
global_step = 0
iter= 1
current_learning_rate = cfg.init_learning_rate
init_variable = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_variable)
data = dtrd.Data()
data.load(data_filename = cfg.train_dataset)
puts_debug('data size: {}'.format(data.size()))
while global_step < data.size() * cfg.epoch:
batch_x, _, _, batch_y = data.decode_and_fetch(batch_size = cfg.batch_size)
_, loss_val = sess.run([train_op, loss_op], feed_dict = {xs: batch_x, ys: batch_y, learning_rate: current_learning_rate})
puts_info('iter: {}, loss: {}'.format(iter, loss_val))
global_step += cfg.batch_size
if iter % cfg.test_iter == 0:
batch_x, _, _, batch_y = data.decode_and_fetch(batch_size = cfg.batch_size)
accuracy_val, loss_val = sess.run([accuracy_op, loss_op], feed_dict = {xs: batch_x, ys: batch_y, learning_rate: current_learning_rate})
puts_info('accuracy: {:.4f}, loss: {:.4f}'.format(accuracy_val, loss_val))
if iter % cfg.save_iter == 0:
saver.save(sess, os.path.join(cfg.save_path, 'model.ckpt-' + str(global_step)))
puts_info('iter: {}, model has been saved under {}/model.ckpt-{}'.format(iter, cfg.save_path, global_step))
iter += 1
batch_x, _, _, batch_y = data.decode_and_fetch(batch_size = cfg.batch_size)
accuracy_val, loss_val, outputs_val = sess.run([accuracy_op, loss_op, outputs_op], feed_dict = {xs: batch_x, ys: batch_y, learning_rate: current_learning_rate})
puts_info('final >> val: \n{}, accuracy: {:.4f}, loss: {:.4f}'.format(outputs_val, accuracy_val, loss_val))
saver.save(sess, os.path.join(cfg.save_path, 'model.ckpt'))
puts_info('final model has been saved under {}'.format(os.path.join(cfg.save_path, 'model.ckpt')))
def eval():
'''kernel eval
'''
return
| 45.253968
| 164
| 0.705016
|
27a320d1579b0c7c12dca4451947de04cefb5e96
| 2,412
|
py
|
Python
|
functions2.py
|
MiroGasparek/python_intro
|
8b4aec8e54f16e33fef506631de0b9ee2e512dea
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
functions2.py
|
MiroGasparek/python_intro
|
8b4aec8e54f16e33fef506631de0b9ee2e512dea
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
functions2.py
|
MiroGasparek/python_intro
|
8b4aec8e54f16e33fef506631de0b9ee2e512dea
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
# 04 February 2018 Functions
# Define function using def()
def ratio(x,y):
"""The ratio of 'x' to 'y'."""
return x/y
# Define function that always returns the same thing
def IReturnOne():
"""This returns 1"""
return 1
print(ratio(4,2))
print(IReturnOne())
# Function without return argument
def think_too_much():
"""Express Caesar's skepticism about Cassius """
print("Not too much...")
think_too_much()
# The function that returns nothing
return_val = think_too_much()
print()
print(return_val)
def complement_base(base, material ='DNA'):
"""Returns the Watson-Crick complement of a base."""
if base == 'A' or base == 'a':
if material == 'DNA':
return 'T'
elif material == 'RNA':
return 'U'
elif base == 'T' or base == 't' or base == 'U' or base == 'u':
return 'A'
elif base == 'G' or base == 'g':
return 'C'
else:
return 'G'
def reverse_complement(seq,material = 'DNA'):
""" Compute reverse complement of a sequence."""
# Initialize reverse complement
rev_seq = ''
# Loop through and populate list with reverse complement
for base in reversed(seq):
rev_seq += complement_base(base,material)
return rev_seq
reversed_sequence = reverse_complement('GCATTGCA')
print(reversed_sequence)
# Define function displaying template strand above its reverse complement
def display_complements(seq):
""" Print sequence above its reverse complement."""
# Compute the reverse complement
rev_comp = reverse_complement(seq)
# Print template
print(seq)
# Print "base pairs"
for base in seq:
print('|',end = '')
# Print final newline character after base pairs
print()
# Print reverse complement
for base in reversed(rev_comp):
print(base, end='')
# Print final newline character
print()
#####
seq = 'GCAGTTGCA'
print(seq)
print()
display_complements(seq)
print(reverse_complement('GCAGTTGCA',material='RNA'))
# Function checks if triangle is right-angled
def is_almost_right(a,b,c):
""" Checks if triangle is right-angled."""
# Use sorted(), which gives a sorted list
a,b,c = sorted([a,b,c])
# Check to see if it is almost a right triangle
if abs(a**2+b**2-c**2) < 1e-12:
return True
else:
return False
| 25.389474
| 73
| 0.626451
|
8cb463eccb2b5237ed834a77f8fe161225bec847
| 1,455
|
py
|
Python
|
rcomp.py
|
flaithbheartaigh/scons-for-symbian
|
2037cbfe4e341964f571d08e9368f146b10dce7a
|
[
"MIT"
] | null | null | null |
rcomp.py
|
flaithbheartaigh/scons-for-symbian
|
2037cbfe4e341964f571d08e9368f146b10dce7a
|
[
"MIT"
] | null | null | null |
rcomp.py
|
flaithbheartaigh/scons-for-symbian
|
2037cbfe4e341964f571d08e9368f146b10dce7a
|
[
"MIT"
] | null | null | null |
"""RComp utility"""
__author__ = "Jussi Toivola"
__license__ = "MIT License"
import cpp
import os
import sys
#: RComp command path
RCOMP = os.environ["EPOCROOT"] + os.path.join( "epoc32", "tools", "rcomp" )
if sys.platform == "linux2":
RCOMP = "wine " + RCOMP + ".exe"
def RComp( env, rsc, rsg, rss, options, includes, fileinc, defines, extra_depends = None ):
"""Utility for creating Command for Symbian resource compiler"""
# Preprocess the resource file first
rpp = ".".join( os.path.basename( rss ).split( "." )[: - 1] + ["rpp"] )
rpp = os.path.abspath(os.path.join( os.path.dirname( rsg ), rpp ))
import relpath
rpp_build = cpp.Preprocess( env, rpp, rss, includes, fileinc, defines + ["_UNICODE" ] )
rss = relpath.relpath( os.path.abspath( "." ), os.path.abspath( rss ) )
# FIXME: For some strange reason, using the rcomp when creating bootup resource fails
# if using the 'normal' way( colorizer.py must mess it up somehow )
def build(target, source, env):
cmd = RCOMP + ' -u %s -o\"%s\" -h\"%s\" -s\"%s\" -i\"%s\" ' % \
( options, rsc, rsg, rpp, rss )
os.system(cmd)
if extra_depends is not None:
for dep in extra_depends:
env.Depends( rpp_build, dep)
resource_build = env.Command( [rsc, rsg], [rpp, rss], build )
env.Depends(resource_build, rpp)
return resource_build
| 39.324324
| 92
| 0.597938
|
11ccfcfd4567efb3a86c49542a2fe992c8116a31
| 6,931
|
py
|
Python
|
mixmo/learners/abstract_learner.py
|
JiarunLiu/mixmo-pytorch
|
a9ad674122d9b6512094b8292280a4045bb5a400
|
[
"Apache-2.0"
] | 72
|
2021-03-26T12:34:52.000Z
|
2022-03-27T06:39:57.000Z
|
mixmo/learners/abstract_learner.py
|
JiarunLiu/mixmo-pytorch
|
a9ad674122d9b6512094b8292280a4045bb5a400
|
[
"Apache-2.0"
] | 7
|
2021-08-06T02:13:54.000Z
|
2022-02-08T01:20:32.000Z
|
mixmo/learners/abstract_learner.py
|
JiarunLiu/mixmo-pytorch
|
a9ad674122d9b6512094b8292280a4045bb5a400
|
[
"Apache-2.0"
] | 15
|
2021-04-10T17:34:45.000Z
|
2022-03-02T11:49:34.000Z
|
"""
Base Learner wrapper definitions for logging, training and evaluating models
"""
import torch
from collections import OrderedDict
from torch.utils.tensorboard import SummaryWriter
from mixmo.utils import misc, logger, config
from mixmo.learners import model_wrapper
LOGGER = logger.get_logger(__name__, level="INFO")
class AbstractLearner:
"""
Base learner class that groups models, optimizers and loggers
Performs the entire model building, training and evaluating process
"""
def __init__(self, config_args, dloader, device):
self.config_args = config_args
self.device = device
self.dloader = dloader
self._tb_logger = None
self._create_model_wrapper()
self._best_acc = 0
self._best_epoch = 0
def _create_model_wrapper(self):
"""
Initialize the model along with other elements through a ModelWrapper
"""
self.model_wrapper = model_wrapper.ModelWrapper(
config=self.config_args["model_wrapper"],
config_args=self.config_args,
device=self.device
)
self.model_wrapper.to_eval_mode()
self.model_wrapper.print_summary(
pixels_size=self.dloader.properties("pixels_size")
)
@property
def tb_logger(self):
"""
Get (or initialize) the Tensorboard SummaryWriter
"""
if self._tb_logger is None:
self._tb_logger = SummaryWriter(log_dir=self.config_args["training"]["output_folder"])
return self._tb_logger
def save_tb(self, logs_dict, epoch):
"""
Write stats from logs_dict at epoch to the Tensoboard summary writer
"""
for tag in logs_dict:
self.tb_logger.add_scalar(tag, logs_dict[tag]["value"], epoch)
if "test/diversity_accuracy_mean" not in logs_dict:
self.tb_logger.add_scalar(
"test/diversity_accuracy_mean",
logs_dict["test/accuracy"]["value"], epoch
)
def load_checkpoint(self, checkpoint, include_optimizer=True, return_epoch=False):
"""
Load checkpoint (and optimizer if included) to the wrapped model
"""
checkpoint = torch.load(checkpoint, map_location=self.device)
self.model_wrapper.network.load_state_dict(checkpoint[self.model_wrapper.name + "_state_dict"], strict=True)
if include_optimizer:
if self.model_wrapper.optimizer is not None:
self.model_wrapper.optimizer.load_state_dict(
checkpoint[self.model_wrapper.name + "_optimizer_state_dict"])
else:
assert self.model_wrapper.name + "_optimizer_state_dict" not in checkpoint
if return_epoch:
return checkpoint["epoch"]
def save_checkpoint(self, epoch, save_path=None):
"""
Save model (and optimizer) state dict
"""
# get save_path
if epoch is not None:
dict_to_save = {"epoch": epoch}
if save_path is None:
save_path = misc.get_model_path(
self.config_args["training"]["output_folder"], epoch=epoch
)
else:
assert save_path is not None
# update dict to save
dict_to_save[self.model_wrapper.name + "_state_dict"] = (
self.model_wrapper.network.state_dict()
if isinstance(self.model_wrapper.network, torch.nn.DataParallel)
else self.model_wrapper.network.state_dict())
if self.model_wrapper.optimizer is not None:
dict_to_save[self.model_wrapper.name + "_optimizer_state_dict"] = self.model_wrapper.optimizer.state_dict()
# final save
torch.save(dict_to_save, save_path)
def train_loop(self, epoch):
raise NotImplementedError
def train(self, epoch):
"""
Train for one epoch
"""
self.model_wrapper.to_train_mode(epoch=epoch)
# Train over the entire epoch
self.train_loop(epoch)
# Eval on epoch end
logs_dict = OrderedDict(
{
"epoch": {"value": epoch, "string": f"{epoch}"},
}
)
scores = self.model_wrapper.get_dict_to_scores(split="train")
for s in scores:
logs_dict[s] = scores[s]
## Val scores
if self.dloader.val_loader is not None:
val_scores = self.evaluate(
inference_loader=self.dloader.val_loader,
split="val")
for val_score in val_scores:
logs_dict[val_score] = val_scores[val_score]
## Test scores
test_scores = self.evaluate(
inference_loader=self.dloader.test_loader,
split="test")
for test_score in test_scores:
logs_dict[test_score] = test_scores[test_score]
## Print metrics
misc.print_dict(logs_dict)
## Check if best epoch
is_best_epoch = False
ens_acc = float(logs_dict["test/accuracy"]["value"])
if ens_acc >= self._best_acc:
self._best_acc = ens_acc
self._best_epoch = epoch
is_best_epoch = True
## Save the model checkpoint
## and not config.cfg.DEBUG
if is_best_epoch:
logs_dict["general/checkpoint_saved"] = {"value": 1.0, "string": "1.0"}
save_epoch = True
else:
logs_dict["general/checkpoint_saved"] = {"value": 0.0, "string": "0.0"}
save_epoch = (epoch % config.cfg.SAVE_EVERY_X_EPOCH == 0)
if save_epoch:
self.save_checkpoint(epoch)
LOGGER.warning(f"Epoch: {epoch} was saved")
## CSV logging
short_logs_dict = OrderedDict(
{k: v for k, v in logs_dict.items()
if any([regex in k for regex in [
"test/accuracy",
"train/accuracy",
"epoch",
"checkpoint_saved"
]])
})
misc.csv_writter(
path=misc.get_logs_path(self.config_args["training"]["output_folder"]),
dic=short_logs_dict
)
# Tensorboard logging
if not config.cfg.DEBUG:
self.save_tb(logs_dict, epoch=epoch)
# Perform end of step procedure like scheduler update
self.model_wrapper.scheduler.step()
def evaluate_loop(self, dloader, verbose, **kwargs):
raise NotImplementedError
def evaluate(self, inference_loader, split="test"):
"""
Perform an evaluation of the model
"""
# Restart stats
self.model_wrapper.to_eval_mode()
# Evaluation over the dataset properly speaking
self.evaluate_loop(inference_loader)
# Gather scores
scores = self.model_wrapper.get_dict_to_scores(split=split)
return scores
| 33.483092
| 119
| 0.605685
|
4c45283e17465f1742cfd83cffc17d5046dc13ea
| 578
|
py
|
Python
|
venv/Lib/site-packages/pygame/examples/macosx/aliens_app_example/setup.py
|
ZenithEmber/COMP120-Assignment-1-contract
|
bf0ff4f84730038192818c6c65f12123242b1135
|
[
"MIT"
] | 46
|
2019-03-01T02:19:18.000Z
|
2021-12-18T12:37:02.000Z
|
venv/Lib/site-packages/pygame/examples/macosx/aliens_app_example/setup.py
|
ZenithEmber/COMP120-Assignment-1-contract
|
bf0ff4f84730038192818c6c65f12123242b1135
|
[
"MIT"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
venv/Lib/site-packages/pygame/examples/macosx/aliens_app_example/setup.py
|
ZenithEmber/COMP120-Assignment-1-contract
|
bf0ff4f84730038192818c6c65f12123242b1135
|
[
"MIT"
] | 67
|
2018-10-29T09:50:49.000Z
|
2022-01-06T07:35:56.000Z
|
"""
Script for building the example.
Usage:
python setup.py py2app
"""
from distutils.core import setup
import py2app
NAME = 'aliens'
VERSION = '0.1'
plist = dict(
CFBundleIconFile=NAME,
CFBundleName=NAME,
CFBundleShortVersionString=VERSION,
CFBundleGetInfoString=' '.join([NAME, VERSION]),
CFBundleExecutable=NAME,
CFBundleIdentifier='org.pygame.examples.aliens',
)
setup(
data_files=['English.lproj', '../../data'],
app=[
#dict(script="aliens_bootstrap.py", plist=plist),
dict(script="aliens.py", plist=plist),
],
)
| 19.931034
| 57
| 0.66955
|
dfa394019ff14a42cc18796160332a9594ccf544
| 4,421
|
py
|
Python
|
bin/Python27/Lib/site-packages/scipy/special/tests/test_lambertw.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/scipy/special/tests/test_lambertw.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/scipy/special/tests/test_lambertw.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2020-08-08T12:44:48.000Z
|
2020-08-08T12:44:48.000Z
|
#
# Tests for the lambertw function,
# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il
# Distributed under the same license as SciPy itself.
#
# [1] mpmath source code, Subversion revision 992
# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_almost_equal
from scipy.special import lambertw
from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_
from scipy.special._testutils import FuncData
def test_values():
assert_(isnan(lambertw(nan)))
assert_equal(lambertw(inf,1).real, inf)
assert_equal(lambertw(inf,1).imag, 2*pi)
assert_equal(lambertw(-inf,1).real, inf)
assert_equal(lambertw(-inf,1).imag, 3*pi)
assert_equal(lambertw(1.), lambertw(1., 0))
data = [
(0,0, 0),
(0+0j,0, 0),
(inf,0, inf),
(0,-1, -inf),
(0,1, -inf),
(0,3, -inf),
(e,0, 1),
(1,0, 0.567143290409783873),
(-pi/2,0, 1j*pi/2),
(-log(2)/2,0, -log(2)),
(0.25,0, 0.203888354702240164),
(-0.25,0, -0.357402956181388903),
(-1./10000,0, -0.000100010001500266719),
(-0.25,-1, -2.15329236411034965),
(0.25,-1, -3.00899800997004620-4.07652978899159763j),
(-0.25,-1, -2.15329236411034965),
(0.25,1, -3.00899800997004620+4.07652978899159763j),
(-0.25,1, -3.48973228422959210+7.41405453009603664j),
(-4,0, 0.67881197132094523+1.91195078174339937j),
(-4,1, -0.66743107129800988+7.76827456802783084j),
(-4,-1, 0.67881197132094523-1.91195078174339937j),
(1000,0, 5.24960285240159623),
(1000,1, 4.91492239981054535+5.44652615979447070j),
(1000,-1, 4.91492239981054535-5.44652615979447070j),
(1000,5, 3.5010625305312892+29.9614548941181328j),
(3+4j,0, 1.281561806123775878+0.533095222020971071j),
(-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j),
(3+4j,1, -0.11691092896595324+5.61888039871282334j),
(3+4j,-1, 0.25856740686699742-3.85211668616143559j),
(-0.5,-1, -0.794023632344689368-0.770111750510379110j),
(-1./10000,1, -11.82350837248724344+6.80546081842002101j),
(-1./10000,-1, -11.6671145325663544),
(-1./10000,-2, -11.82350837248724344-6.80546081842002101j),
(-1./100000,4, -14.9186890769540539+26.1856750178782046j),
(-1./100000,5, -15.0931437726379218666+32.5525721210262290086j),
((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j),
((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j),
((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j),
((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j),
(-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j),
(-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j),
(-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j),
(-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j),
(pi,0, 1.073658194796149172092178407024821347547745350410314531),
# Former bug in generated branch,
(-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j),
(-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j),
(-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j),
(-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j),
]
data = array(data, dtype=complex_)
def w(x, y):
return lambertw(x, y.real.astype(int))
olderr = np.seterr(all='ignore')
try:
FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
finally:
np.seterr(**olderr)
def test_ufunc():
assert_array_almost_equal(
lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
def test_lambertw_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(lambertw(0, 0, 0).dtype, dt)
assert_equal(lambertw([0], 0, 0).dtype, dt)
assert_equal(lambertw(0, [0], 0).dtype, dt)
assert_equal(lambertw(0, 0, [0]).dtype, dt)
assert_equal(lambertw([0], [0], [0]).dtype, dt)
| 42.509615
| 108
| 0.633567
|
954f01838e4a748b34b956caea9c42a021d60d5f
| 6,395
|
py
|
Python
|
research/object_detection/builders/box_predictor_builder.py
|
luk1684tw/models
|
7e7776e1ce0db64cdf22d6de9f1a9848e5a71b2c
|
[
"Apache-2.0"
] | 1
|
2018-09-05T09:34:50.000Z
|
2018-09-05T09:34:50.000Z
|
research/object_detection/builders/box_predictor_builder.py
|
luk1684tw/models
|
7e7776e1ce0db64cdf22d6de9f1a9848e5a71b2c
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/builders/box_predictor_builder.py
|
luk1684tw/models
|
7e7776e1ce0db64cdf22d6de9f1a9848e5a71b2c
|
[
"Apache-2.0"
] | 1
|
2020-09-15T02:44:52.000Z
|
2020-09-15T02:44:52.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
from object_detection.core import box_predictor
from object_detection.protos import box_predictor_pb2
def build(argscope_fn, box_predictor_config, is_training, num_classes):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
conv_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(conv_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
min_depth=conv_box_predictor.min_depth,
max_depth=conv_box_predictor.max_depth,
num_layers_before_predictor=(conv_box_predictor.
num_layers_before_predictor),
use_dropout=conv_box_predictor.use_dropout,
dropout_keep_prob=conv_box_predictor.dropout_keep_probability,
kernel_size=conv_box_predictor.kernel_size,
box_code_size=conv_box_predictor.box_code_size,
apply_sigmoid_to_scores=conv_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(conv_box_predictor.
class_prediction_bias_init),
use_depthwise=conv_box_predictor.use_depthwise
)
return box_predictor_object
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
conv_box_predictor = (box_predictor_config.
weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(conv_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=conv_box_predictor.depth,
num_layers_before_predictor=(conv_box_predictor.
num_layers_before_predictor),
kernel_size=conv_box_predictor.kernel_size,
box_code_size=conv_box_predictor.box_code_size,
class_prediction_bias_init=conv_box_predictor.class_prediction_bias_init
)
return box_predictor_object
if box_predictor_oneof == 'mask_rcnn_box_predictor':
mask_rcnn_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(mask_rcnn_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if mask_rcnn_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
mask_rcnn_box_predictor.conv_hyperparams, is_training)
box_predictor_object = box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=mask_rcnn_box_predictor.use_dropout,
dropout_keep_prob=mask_rcnn_box_predictor.dropout_keep_probability,
box_code_size=mask_rcnn_box_predictor.box_code_size,
conv_hyperparams_fn=conv_hyperparams_fn,
predict_instance_masks=mask_rcnn_box_predictor.predict_instance_masks,
mask_height=mask_rcnn_box_predictor.mask_height,
mask_width=mask_rcnn_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
mask_rcnn_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
mask_rcnn_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
mask_rcnn_box_predictor.masks_are_class_agnostic),
predict_keypoints=mask_rcnn_box_predictor.predict_keypoints)
return box_predictor_object
if box_predictor_oneof == 'rfcn_box_predictor':
rfcn_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(rfcn_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[rfcn_box_predictor.crop_height,
rfcn_box_predictor.crop_width],
num_spatial_bins=[rfcn_box_predictor.num_spatial_bins_height,
rfcn_box_predictor.num_spatial_bins_width],
depth=rfcn_box_predictor.depth,
box_code_size=rfcn_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
| 47.37037
| 80
| 0.736826
|
1ecaf86c62846d26d00b89c00e102ef00f877a82
| 4,638
|
py
|
Python
|
tests/data/raster_source/test_geotiff_source.py
|
Yochengliu/raster-vision
|
f5badc387df86ce02d84e0e274a08026dbf65bd6
|
[
"Apache-2.0"
] | 1
|
2019-12-10T13:37:39.000Z
|
2019-12-10T13:37:39.000Z
|
tests/data/raster_source/test_geotiff_source.py
|
Yochengliu/raster-vision
|
f5badc387df86ce02d84e0e274a08026dbf65bd6
|
[
"Apache-2.0"
] | null | null | null |
tests/data/raster_source/test_geotiff_source.py
|
Yochengliu/raster-vision
|
f5badc387df86ce02d84e0e274a08026dbf65bd6
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
import numpy as np
import rasterio
import rastervision as rv
from rastervision.core import Box
from rastervision.utils.misc import save_img
from rastervision.data.raster_source.rasterio_source import load_window
from rastervision.rv_config import RVConfig
from tests import data_file_path
class TestGeoTiffSource(unittest.TestCase):
def test_load_window(self):
with RVConfig.get_tmp_dir() as temp_dir:
# make geotiff filled with ones and zeros with nodata == 1
image_path = os.path.join(temp_dir, 'temp.tif')
height = 100
width = 100
nb_channels = 3
with rasterio.open(
image_path,
'w',
driver='GTiff',
height=height,
width=width,
count=nb_channels,
dtype=np.uint8,
nodata=1) as image_dataset:
im = np.random.randint(
0, 2, (height, width, nb_channels)).astype(np.uint8)
for channel in range(nb_channels):
image_dataset.write(im[:, :, channel], channel + 1)
# Should be all zeros after converting nodata values to zero.
window = Box.make_square(0, 0, 100).rasterio_format()
with rasterio.open(image_path) as image_dataset:
chip = load_window(image_dataset, window=window)
np.testing.assert_equal(chip, np.zeros(chip.shape))
def test_get_dtype(self):
img_path = data_file_path('small-rgb-tile.tif')
with RVConfig.get_tmp_dir() as tmp_dir:
source = rv.data.GeoTiffSourceConfig(uris=[img_path]) \
.create_source(tmp_dir)
self.assertEqual(source.get_dtype(), np.uint8)
def test_gets_raw_chip(self):
img_path = data_file_path('small-rgb-tile.tif')
channel_order = [0, 1]
source = rv.data.GeoTiffSourceConfig(uris=[img_path],
channel_order=channel_order) \
.create_source(tmp_dir=None)
out_chip = source.get_raw_image_array()
self.assertEqual(out_chip.shape[2], 3)
def test_gets_raw_chip_from_proto(self):
img_path = data_file_path('small-rgb-tile.tif')
channel_order = [0, 1]
msg = rv.data.GeoTiffSourceConfig(uris=[img_path],
channel_order=channel_order) \
.to_proto()
source = rv.RasterSourceConfig.from_proto(msg) \
.create_source(tmp_dir=None)
out_chip = source.get_raw_image_array()
self.assertEqual(out_chip.shape[2], 3)
def test_uses_channel_order(self):
with RVConfig.get_tmp_dir() as tmp_dir:
img_path = os.path.join(tmp_dir, 'img.tif')
chip = np.ones((2, 2, 4)).astype(np.uint8)
chip[:, :, :] *= np.array([0, 1, 2, 3]).astype(np.uint8)
save_img(chip, img_path)
channel_order = [0, 1, 2]
source = rv.RasterSourceConfig.builder(rv.GEOTIFF_SOURCE) \
.with_uri(img_path) \
.with_channel_order(channel_order) \
.build() \
.create_source(tmp_dir=tmp_dir)
out_chip = source.get_image_array()
expected_out_chip = np.ones((2, 2, 3)).astype(np.uint8)
expected_out_chip[:, :, :] *= np.array([0, 1, 2]).astype(np.uint8)
np.testing.assert_equal(out_chip, expected_out_chip)
def test_with_stats_transformer(self):
config = rv.RasterSourceConfig.builder(rv.GEOTIFF_SOURCE) \
.with_uri('dummy') \
.with_stats_transformer() \
.build()
self.assertEqual(len(config.transformers), 1)
self.assertIsInstance(config.transformers[0],
rv.data.StatsTransformerConfig)
def test_missing_config_uri(self):
with self.assertRaises(rv.ConfigError):
rv.data.RasterSourceConfig.builder(rv.GEOTIFF_SOURCE).build()
def test_no_missing_config(self):
try:
rv.data.RasterSourceConfig.builder(
rv.GEOTIFF_SOURCE).with_uri('').build()
except rv.ConfigError:
self.fail('ConfigError raised unexpectedly')
if __name__ == '__main__':
unittest.main()
| 38.65
| 78
| 0.562743
|
f8efb3751d677abb84c8956b3496a75d2b6b8cbb
| 3,283
|
py
|
Python
|
ytree/utilities/io.py
|
jwise77/ytree
|
8bd905bb0995383c1285aeba586d41859f494a9b
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
ytree/utilities/io.py
|
jwise77/ytree
|
8bd905bb0995383c1285aeba586d41859f494a9b
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
ytree/utilities/io.py
|
jwise77/ytree
|
8bd905bb0995383c1285aeba586d41859f494a9b
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
io utilities
"""
#-----------------------------------------------------------------------------
# Copyright (c) ytree development team. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from unyt import \
unyt_array, \
unyt_quantity
from yt.funcs import \
get_pbar
from ytree.utilities.logger import \
fake_pbar
def parse_h5_attr(f, attr):
"""A Python3-safe function for getting hdf5 attributes.
If an attribute is supposed to be a string, this will return it as such.
This was taken from yt.
"""
val = f.attrs.get(attr, None)
if isinstance(val, bytes):
return val.decode('utf8')
else:
return val
def _hdf5_yt_attr(fh, attr, unit_registry=None):
"""
Read an hdf5 attribute. If there exists another attribute
named <attr>_units, use that to assign units and return
as either a unyt_array or unyt_quantity.
"""
val = fh.attrs[attr]
units = ""
ufield = "%s_units" % attr
if ufield in fh.attrs:
units = fh.attrs[ufield]
if isinstance(units, bytes):
units = units.decode("utf")
if units == "dimensionless":
units = ""
if units != "":
if isinstance(val, np.ndarray):
val = unyt_array(val, units, registry=unit_registry)
else:
val = unyt_quantity(val, units, registry=unit_registry)
return val
def _hdf5_yt_array_lite(fh, field):
"""
Read an hdf5 dataset. If that dataset has a "units" attribute,
return that as well, but do not cast as a unyt_array.
"""
units = ""
if "units" in fh[field].attrs:
units = fh[field].attrs["units"]
if units == "dimensionless": units = ""
return (fh[field][()], units)
def f_text_block(f, block_size=4096, file_size=None, sep="\n",
pbar_string=None):
"""
Read lines from a file faster than f.readlines().
"""
start = f.tell()
if file_size is None:
f.seek(0, 2)
file_size = f.tell() - start
f.seek(start)
nblocks = np.ceil(float(file_size) /
block_size).astype(np.int64)
read_size = file_size + start
lbuff = ""
if pbar_string is None:
pbar = fake_pbar()
else:
pbar = get_pbar(pbar_string, file_size)
for ib in range(nblocks):
offset = f.tell()
my_block = min(block_size, read_size-offset)
if my_block <= 0: break
buff = f.read(my_block)
linl = -1
for ih in range(buff.count(sep)):
inl = buff.find(sep, linl+1)
if inl < 0:
lbuff += buff[linl+1:]
continue
else:
line = lbuff + buff[linl+1:inl]
loc = offset - len(lbuff) + linl + 1
lbuff = ""
linl = inl
pbar.update(loc+len(line)-start+1)
yield line, loc
lbuff += buff[linl+1:]
if lbuff:
loc = f.tell() - len(lbuff)
pbar.update(loc+len(lbuff)-start+1)
yield lbuff, loc
pbar.finish()
| 28.547826
| 78
| 0.549497
|
729d4b95271e286ad7390f17a26e336f350cf659
| 248
|
py
|
Python
|
setup.py
|
SrinivasKummari/data-science-with-python
|
0f824c437325c1cae5793e96625c19976fb17149
|
[
"MIT"
] | null | null | null |
setup.py
|
SrinivasKummari/data-science-with-python
|
0f824c437325c1cae5793e96625c19976fb17149
|
[
"MIT"
] | null | null | null |
setup.py
|
SrinivasKummari/data-science-with-python
|
0f824c437325c1cae5793e96625c19976fb17149
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Building a predictive model for titanic disaster recovery',
author='Srinivas Kummari',
license='MIT',
)
| 22.545455
| 76
| 0.697581
|
54de2456e147dfea2d131a33d2c797d3c6a98c8b
| 3,006
|
py
|
Python
|
scripts/config_generator/win_readline.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 39
|
2015-03-30T14:03:42.000Z
|
2022-03-16T16:50:33.000Z
|
scripts/config_generator/win_readline.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 670
|
2015-02-11T11:08:09.000Z
|
2022-03-21T09:27:57.000Z
|
scripts/config_generator/win_readline.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 54
|
2015-02-13T14:09:52.000Z
|
2022-01-24T13:57:09.000Z
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: win_readline
:platform: Unix
:synopsis:
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
from pyreadline.rlmain import Readline
__all__ = [ 'parse_and_bind',
'get_line_buffer',
'insert_text',
'clear_history',
'read_init_file',
'read_history_file',
'write_history_file',
'get_current_history_length',
'get_history_length',
'get_history_item',
'set_history_length',
'set_startup_hook',
'set_pre_input_hook',
'set_completer',
'get_completer',
'get_begidx',
'get_endidx',
'set_completer_delims',
'get_completer_delims',
'add_history',
'callback_handler_install',
'callback_handler_remove',
'callback_read_char',] #Some other objects are added below
# create a Readline object to contain the state
rl = Readline()
if rl.disable_readline:
def dummy(completer=""):
pass
for funk in __all__:
globals()[funk] = dummy
else:
def GetOutputFile():
'''Return the console object used by readline so that it can be used for printing in color.'''
return rl.console
__all__.append("GetOutputFile")
import pyreadline.console as console
# make these available so this looks like the python readline module
read_init_file = rl.read_init_file
parse_and_bind = rl.parse_and_bind
clear_history = rl.clear_history
add_history = rl.add_history
insert_text = rl.insert_text
write_history_file = rl.write_history_file
read_history_file = rl.read_history_file
get_completer_delims = rl.get_completer_delims
get_current_history_length = rl.get_current_history_length
get_history_length = rl.get_history_length
get_history_item = rl.get_history_item
get_line_buffer = rl.get_line_buffer
set_completer = rl.set_completer
get_completer = rl.get_completer
get_begidx = rl.get_begidx
get_endidx = rl.get_endidx
set_completer_delims = rl.set_completer_delims
set_history_length = rl.set_history_length
set_pre_input_hook = rl.set_pre_input_hook
set_startup_hook = rl.set_startup_hook
callback_handler_install=rl.callback_handler_install
callback_handler_remove=rl.callback_handler_remove
callback_read_char=rl.callback_read_char
console.install_readline(rl.readline)
__all__.append("rl")
| 31.642105
| 102
| 0.696274
|
5c7173b50be9a73e5b8c316df89f8172e57e6b63
| 22,071
|
py
|
Python
|
tests/integration/modules/test_cp.py
|
nizD/salt
|
bbe135d62d8d8b4e4a7d0362097e1b3a3b092bed
|
[
"Apache-2.0"
] | 1
|
2020-12-28T09:48:52.000Z
|
2020-12-28T09:48:52.000Z
|
tests/integration/modules/test_cp.py
|
nizD/salt
|
bbe135d62d8d8b4e4a7d0362097e1b3a3b092bed
|
[
"Apache-2.0"
] | 2
|
2021-04-30T21:36:41.000Z
|
2021-12-13T20:50:09.000Z
|
tests/integration/modules/test_cp.py
|
nizD/salt
|
bbe135d62d8d8b4e4a7d0362097e1b3a3b092bed
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import uuid
import hashlib
import logging
import psutil
import shutil
import signal
import tempfile
import textwrap
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import (
get_unused_localhost_port,
skip_if_not_root,
with_tempfile)
from tests.support.unit import skipIf
import tests.support.paths as paths
# Import 3rd party libs
import salt.ext.six as six
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
log = logging.getLogger(__name__)
SSL3_SUPPORT = sys.version_info >= (2, 7, 9)
class CPModuleTest(ModuleCase):
'''
Validate the cp module
'''
def run_function(self, *args, **kwargs):
'''
Ensure that results are decoded
TODO: maybe move this behavior to ModuleCase itself?
'''
return salt.utils.data.decode(
super(CPModuleTest, self).run_function(*args, **kwargs)
)
@with_tempfile()
def test_get_file(self, tgt):
'''
cp.get_file
'''
self.run_function(
'cp.get_file',
[
'salt://grail/scene33',
tgt,
])
with salt.utils.files.fopen(tgt, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_file_to_dir(self):
'''
cp.get_file
'''
tgt = os.path.join(paths.TMP, '')
self.run_function(
'cp.get_file',
[
'salt://grail/scene33',
tgt,
])
with salt.utils.files.fopen(tgt + 'scene33', 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
@with_tempfile()
@skipIf(salt.utils.platform.is_windows() and six.PY3, 'This test hangs on Windows on Py3')
def test_get_file_templated_paths(self, tgt):
'''
cp.get_file
'''
self.run_function(
'cp.get_file',
[
'salt://{{grains.test_grain}}',
tgt.replace('cheese', '{{grains.test_grain}}')
],
template='jinja'
)
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertNotIn('bacon', data)
@with_tempfile()
def test_get_file_gzipped(self, tgt):
'''
cp.get_file
'''
src = os.path.join(paths.FILES, 'file', 'base', 'file.big')
with salt.utils.files.fopen(src, 'rb') as fp_:
hash_str = hashlib.md5(fp_.read()).hexdigest()
self.run_function(
'cp.get_file',
[
'salt://file.big',
tgt,
],
gzip=5
)
with salt.utils.files.fopen(tgt, 'rb') as scene:
data = scene.read()
self.assertEqual(hash_str, hashlib.md5(data).hexdigest())
data = salt.utils.stringutils.to_unicode(data)
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_file_makedirs(self):
'''
cp.get_file
'''
tgt = os.path.join(paths.TMP, 'make', 'dirs', 'scene33')
self.run_function(
'cp.get_file',
[
'salt://grail/scene33',
tgt,
],
makedirs=True
)
self.addCleanup(shutil.rmtree, os.path.join(paths.TMP, 'make'), ignore_errors=True)
with salt.utils.files.fopen(tgt, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
@with_tempfile()
def test_get_template(self, tgt):
'''
cp.get_template
'''
self.run_function(
'cp.get_template',
['salt://grail/scene33', tgt],
spam='bacon')
with salt.utils.files.fopen(tgt, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('bacon', data)
self.assertNotIn('spam', data)
def test_get_dir(self):
'''
cp.get_dir
'''
tgt = os.path.join(paths.TMP, 'many')
self.run_function(
'cp.get_dir',
[
'salt://grail',
tgt
])
self.assertIn('grail', os.listdir(tgt))
self.assertIn('36', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('empty', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('scene', os.listdir(os.path.join(tgt, 'grail', '36')))
def test_get_dir_templated_paths(self):
'''
cp.get_dir
'''
tgt = os.path.join(paths.TMP, 'many')
self.run_function(
'cp.get_dir',
[
'salt://{{grains.script}}',
tgt.replace('many', '{{grains.alot}}')
]
)
self.assertIn('grail', os.listdir(tgt))
self.assertIn('36', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('empty', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('scene', os.listdir(os.path.join(tgt, 'grail', '36')))
# cp.get_url tests
@with_tempfile()
def test_get_url(self, tgt):
'''
cp.get_url with salt:// source given
'''
self.run_function(
'cp.get_url',
[
'salt://grail/scene33',
tgt,
])
with salt.utils.files.fopen(tgt, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_url_makedirs(self):
'''
cp.get_url
'''
tgt = os.path.join(paths.TMP, 'make', 'dirs', 'scene33')
self.run_function(
'cp.get_url',
[
'salt://grail/scene33',
tgt,
],
makedirs=True
)
self.addCleanup(shutil.rmtree, os.path.join(paths.TMP, 'make'), ignore_errors=True)
with salt.utils.files.fopen(tgt, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_url_dest_empty(self):
'''
cp.get_url with salt:// source given and destination omitted.
'''
ret = self.run_function(
'cp.get_url',
[
'salt://grail/scene33',
])
with salt.utils.files.fopen(ret, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_url_no_dest(self):
'''
cp.get_url with salt:// source given and destination set as None
'''
tgt = None
ret = self.run_function(
'cp.get_url',
[
'salt://grail/scene33',
tgt,
])
self.assertIn('KNIGHT: They\'re nervous, sire.', ret)
def test_get_url_nonexistent_source(self):
'''
cp.get_url with nonexistent salt:// source given
'''
tgt = None
ret = self.run_function(
'cp.get_url',
[
'salt://grail/nonexistent_scene',
tgt,
])
self.assertEqual(ret, False)
def test_get_url_to_dir(self):
'''
cp.get_url with salt:// source
'''
tgt = os.path.join(paths.TMP, '')
self.run_function(
'cp.get_url',
[
'salt://grail/scene33',
tgt,
])
with salt.utils.files.fopen(tgt + 'scene33', 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
@skipIf(not SSL3_SUPPORT, 'Requires python with SSL3 support')
@skipIf(salt.utils.platform.is_darwin() and six.PY2, 'This test hangs on OS X on Py2')
@with_tempfile()
def test_get_url_https(self, tgt):
'''
cp.get_url with https:// source given
'''
self.run_function(
'cp.get_url',
[
'https://repo.saltstack.com/index.html',
tgt,
])
with salt.utils.files.fopen(tgt, 'r') as instructions:
data = salt.utils.stringutils.to_unicode(instructions.read())
self.assertIn('Bootstrap', data)
self.assertIn('Debian', data)
self.assertIn('Windows', data)
self.assertNotIn('AYBABTU', data)
@skipIf(not SSL3_SUPPORT, 'Requires python with SSL3 support')
@skipIf(salt.utils.platform.is_darwin() and six.PY2, 'This test hangs on OS X on Py2')
def test_get_url_https_dest_empty(self):
'''
cp.get_url with https:// source given and destination omitted.
'''
ret = self.run_function(
'cp.get_url',
[
'https://repo.saltstack.com/index.html',
])
with salt.utils.files.fopen(ret, 'r') as instructions:
data = salt.utils.stringutils.to_unicode(instructions.read())
self.assertIn('Bootstrap', data)
self.assertIn('Debian', data)
self.assertIn('Windows', data)
self.assertNotIn('AYBABTU', data)
@skipIf(not SSL3_SUPPORT, 'Requires python with SSL3 support')
@skipIf(salt.utils.platform.is_darwin() and six.PY2, 'This test hangs on OS X on Py2')
def test_get_url_https_no_dest(self):
'''
cp.get_url with https:// source given and destination set as None
'''
tgt = None
ret = self.run_function(
'cp.get_url',
[
'https://repo.saltstack.com/index.html',
tgt,
])
self.assertIn('Bootstrap', ret)
self.assertIn('Debian', ret)
self.assertIn('Windows', ret)
self.assertNotIn('AYBABTU', ret)
def test_get_url_file(self):
'''
cp.get_url with file:// source given
'''
tgt = ''
src = os.path.join('file://', paths.FILES, 'file', 'base', 'file.big')
ret = self.run_function(
'cp.get_url',
[
src,
tgt,
])
with salt.utils.files.fopen(ret, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_url_file_no_dest(self):
'''
cp.get_url with file:// source given and destination set as None
'''
tgt = None
src = os.path.join('file://', paths.FILES, 'file', 'base', 'file.big')
ret = self.run_function(
'cp.get_url',
[
src,
tgt,
])
self.assertIn('KNIGHT: They\'re nervous, sire.', ret)
self.assertNotIn('bacon', ret)
# cp.get_file_str tests
def test_get_file_str_salt(self):
'''
cp.get_file_str with salt:// source given
'''
src = 'salt://grail/scene33'
ret = self.run_function(
'cp.get_file_str',
[
src,
])
self.assertIn('KNIGHT: They\'re nervous, sire.', ret)
def test_get_file_str_nonexistent_source(self):
'''
cp.get_file_str with nonexistent salt:// source given
'''
src = 'salt://grail/nonexistent_scene'
ret = self.run_function(
'cp.get_file_str',
[
src,
])
self.assertEqual(ret, False)
@skipIf(not SSL3_SUPPORT, 'Requires python with SSL3 support')
@skipIf(salt.utils.platform.is_darwin() and six.PY2, 'This test hangs on OS X on Py2')
def test_get_file_str_https(self):
'''
cp.get_file_str with https:// source given
'''
src = 'https://repo.saltstack.com/index.html'
ret = self.run_function(
'cp.get_file_str',
[
src,
])
self.assertIn('Bootstrap', ret)
self.assertIn('Debian', ret)
self.assertIn('Windows', ret)
self.assertNotIn('AYBABTU', ret)
def test_get_file_str_local(self):
'''
cp.get_file_str with file:// source given
'''
src = os.path.join('file://', paths.FILES, 'file', 'base', 'file.big')
ret = self.run_function(
'cp.get_file_str',
[
src,
])
self.assertIn('KNIGHT: They\'re nervous, sire.', ret)
self.assertNotIn('bacon', ret)
# caching tests
def test_cache_file(self):
'''
cp.cache_file
'''
ret = self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
with salt.utils.files.fopen(ret, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_cache_files(self):
'''
cp.cache_files
'''
ret = self.run_function(
'cp.cache_files',
[
['salt://grail/scene33', 'salt://grail/36/scene'],
])
for path in ret:
with salt.utils.files.fopen(path, 'r') as scene:
data = salt.utils.stringutils.to_unicode(scene.read())
self.assertIn('ARTHUR:', data)
self.assertNotIn('bacon', data)
@with_tempfile()
def test_cache_master(self, tgt):
'''
cp.cache_master
'''
ret = self.run_function(
'cp.cache_master',
[tgt],
)
for path in ret:
self.assertTrue(os.path.exists(path))
def test_cache_local_file(self):
'''
cp.cache_local_file
'''
src = os.path.join(paths.TMP, 'random')
with salt.utils.files.fopen(src, 'w+') as fn_:
fn_.write(salt.utils.stringutils.to_str('foo'))
ret = self.run_function(
'cp.cache_local_file',
[src])
with salt.utils.files.fopen(ret, 'r') as cp_:
self.assertEqual(
salt.utils.stringutils.to_unicode(cp_.read()),
'foo'
)
@skipIf(not salt.utils.path.which('nginx'), 'nginx not installed')
@skip_if_not_root
def test_cache_remote_file(self):
'''
cp.cache_file
'''
nginx_port = get_unused_localhost_port()
url_prefix = 'http://localhost:{0}/'.format(nginx_port)
temp_dir = tempfile.mkdtemp(dir=paths.TMP)
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
nginx_root_dir = os.path.join(temp_dir, 'root')
nginx_conf_dir = os.path.join(temp_dir, 'conf')
nginx_conf = os.path.join(nginx_conf_dir, 'nginx.conf')
nginx_pidfile = os.path.join(nginx_conf_dir, 'nginx.pid')
file_contents = 'Hello world!'
for dirname in (nginx_root_dir, nginx_conf_dir):
os.mkdir(dirname)
# Write the temp file
with salt.utils.files.fopen(os.path.join(nginx_root_dir, 'actual_file'), 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(file_contents))
# Write the nginx config
with salt.utils.files.fopen(nginx_conf, 'w') as fp_:
fp_.write(textwrap.dedent(salt.utils.stringutils.to_str(
'''\
user root;
worker_processes 1;
error_log {nginx_conf_dir}/server_error.log;
pid {nginx_pidfile};
events {{
worker_connections 1024;
}}
http {{
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log {nginx_conf_dir}/access.log;
error_log {nginx_conf_dir}/error.log;
server {{
listen {nginx_port} default_server;
server_name cachefile.local;
root {nginx_root_dir};
location ~ ^/301$ {{
return 301 /actual_file;
}}
location ~ ^/302$ {{
return 302 /actual_file;
}}
}}
}}'''.format(**locals())
)))
self.run_function(
'cmd.run',
[['nginx', '-c', nginx_conf]],
python_shell=False
)
with salt.utils.files.fopen(nginx_pidfile) as fp_:
nginx_pid = int(fp_.read().strip())
nginx_proc = psutil.Process(pid=nginx_pid)
self.addCleanup(nginx_proc.send_signal, signal.SIGQUIT)
for code in ('', '301', '302'):
url = url_prefix + (code or 'actual_file')
log.debug('attempting to cache %s', url)
ret = self.run_function('cp.cache_file', [url])
self.assertTrue(ret)
with salt.utils.files.fopen(ret) as fp_:
cached_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertEqual(cached_contents, file_contents)
def test_list_states(self):
'''
cp.list_states
'''
ret = self.run_function(
'cp.list_states',
)
self.assertIn('core', ret)
self.assertIn('top', ret)
def test_list_minion(self):
'''
cp.list_minion
'''
self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
ret = self.run_function('cp.list_minion')
found = False
search = 'grail/scene33'
if salt.utils.platform.is_windows():
search = r'grail\scene33'
for path in ret:
if search in path:
found = True
break
self.assertTrue(found)
def test_is_cached(self):
'''
cp.is_cached
'''
self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
ret1 = self.run_function(
'cp.is_cached',
[
'salt://grail/scene33',
])
self.assertTrue(ret1)
ret2 = self.run_function(
'cp.is_cached',
[
'salt://fasldkgj/poicxzbn',
])
self.assertFalse(ret2)
def test_hash_file(self):
'''
cp.hash_file
'''
sha256_hash = self.run_function(
'cp.hash_file',
[
'salt://grail/scene33',
])
path = self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
with salt.utils.files.fopen(path, 'rb') as fn_:
data = fn_.read()
self.assertEqual(
sha256_hash['hsum'], hashlib.sha256(data).hexdigest())
@with_tempfile()
def test_get_file_from_env_predefined(self, tgt):
'''
cp.get_file
'''
tgt = os.path.join(paths.TMP, 'cheese')
try:
self.run_function('cp.get_file', ['salt://cheese', tgt])
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertNotIn('Comte', data)
finally:
os.unlink(tgt)
@with_tempfile()
def test_get_file_from_env_in_url(self, tgt):
tgt = os.path.join(paths.TMP, 'cheese')
try:
self.run_function('cp.get_file', ['salt://cheese?saltenv=prod', tgt])
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
os.unlink(tgt)
def test_push(self):
log_to_xfer = os.path.join(paths.TMP, uuid.uuid4().hex)
open(log_to_xfer, 'w').close() # pylint: disable=resource-leakage
try:
self.run_function('cp.push', [log_to_xfer])
tgt_cache_file = os.path.join(
paths.TMP,
'master-minion-root',
'cache',
'minions',
'minion',
'files',
paths.TMP,
log_to_xfer)
self.assertTrue(os.path.isfile(tgt_cache_file), 'File was not cached on the master')
finally:
os.unlink(tgt_cache_file)
def test_envs(self):
self.assertEqual(sorted(self.run_function('cp.envs')), sorted(['base', 'prod']))
| 32.126638
| 96
| 0.517919
|
872449cef7e302dd693363602cd754f2633802d5
| 1,588
|
py
|
Python
|
241_different_ways_to_add_parentheses.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 2
|
2018-04-24T19:17:40.000Z
|
2018-04-24T19:33:52.000Z
|
241_different_ways_to_add_parentheses.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | null | null | null |
241_different_ways_to_add_parentheses.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 3
|
2020-06-17T05:48:52.000Z
|
2021-01-02T06:08:25.000Z
|
# 241. Different Ways to Add Parentheses
#
# Given a string of numbers and operators, return all possible results
# from computing all the different possible ways to group numbers and operators.
# The valid operators are +, - and *.
#
# Example 1
# Input: "2-1-1".
#
# ((2-1)-1) = 0
# (2-(1-1)) = 2
# Output: [0, 2]
#
#
# Example 2
# Input: "2*3-4*5"
#
# (2*(3-(4*5))) = -34
# ((2*3)-(4*5)) = -14
# ((2*(3-4))*5) = -10
# (2*((3-4)*5)) = -10
# (((2*3)-4)*5) = 10
# Output: [-34, -14, -10, -10, 10]
#
# http://www.tangjikai.com/algorithms/leetcode-241-different-ways-to-add-parentheses
# Divide and Conque.
# For each sign, divide into two parts for before and after it.
class Solution(object):
def diffWaysToCompute(self, input):
"""
:type input: str
:rtype: List[int]
"""
ans = []
for i, c in enumerate(input):
if c in '+-*':
left = self.diffWaysToCompute(input[:i])
right = self.diffWaysToCompute(input[i+1:])
for m in left:
for n in right:
if c == '+':
ans.append(m + n)
elif c == '*':
ans.append(m * n)
else:
# elif c == '-':
ans.append(m - n)
# input is a single digit
if not ans:
ans.append(int(input))
return ans
if __name__ == '__main__':
print Solution().diffWaysToCompute("2-1-1")
print Solution().diffWaysToCompute("2")
| 27.859649
| 84
| 0.488665
|
2b1bfc10c06b49d8d5675ad7b066e7a8c0fb24a1
| 513
|
py
|
Python
|
config/python.py
|
veltzer/pyblueprint
|
dda4d50755fd10a72a7ee076d698b7a5b39bf438
|
[
"MIT"
] | null | null | null |
config/python.py
|
veltzer/pyblueprint
|
dda4d50755fd10a72a7ee076d698b7a5b39bf438
|
[
"MIT"
] | null | null | null |
config/python.py
|
veltzer/pyblueprint
|
dda4d50755fd10a72a7ee076d698b7a5b39bf438
|
[
"MIT"
] | null | null | null |
import config.project
package_name = config.project.project_name
console_scripts = [
]
setup_requires = [
]
test_requires = [
'pytest',
'pytest-cov',
'pylint',
'flake8',
'pymakehelper',
]
install_requires = [
'svgwrite',
]
dev_requires = [
'pypitools',
'pydmt',
'pyclassifiers',
'pylint',
]
python_requires = ">=3.7"
extras_require = {
}
test_os = "[ubuntu-18.04, ubuntu-20.04]"
test_python = "[3.7, 3.8, 3.9]"
test_container = "[ 'ubuntu:18.04', 'ubuntu:20.04' ]"
| 13.864865
| 53
| 0.614035
|
791a0ab2f5e97cf2af9aba9f5b099033352fbf85
| 15,600
|
py
|
Python
|
paragridded/giga_tools.py
|
Mesharou/paragridded
|
d9dd6b7c7e7ba30c4e702717039afa85ef753003
|
[
"MIT"
] | 6
|
2020-11-15T14:42:48.000Z
|
2022-03-18T08:38:15.000Z
|
paragridded/giga_tools.py
|
Mesharou/paragridded
|
d9dd6b7c7e7ba30c4e702717039afa85ef753003
|
[
"MIT"
] | null | null | null |
paragridded/giga_tools.py
|
Mesharou/paragridded
|
d9dd6b7c7e7ba30c4e702717039afa85ef753003
|
[
"MIT"
] | 2
|
2022-02-21T17:29:00.000Z
|
2022-03-18T08:38:16.000Z
|
""" GIGATL experiment specifications
dimpart contains
- "netcdfdimnames": how to map netCDF to CROCO dimensions
- "domainpartition": which CROCO dimensions are tiled
"""
import os
import sys
import glob
from shapely.geometry.polygon import Polygon
import pickle
from pretty import BB
import pretty
import tempfile
try:
from mpi4py import MPI
is_mpi = True
except:
print("MPI not found, no problem")
is_mpi = False
if is_mpi:
comm = MPI.COMM_WORLD
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
def barrier():
if is_mpi:
MPI.COMM_WORLD.Barrier()
else:
pass
def abort():
if is_mpi:
MPI.COMM_WORLD.Abort()
else:
sys.exit()
# path to where giga_tools.py sits
dirmodule = os.path.dirname(pretty.__file__)
sep = os.path.sep
# path to pickle GIGATL data files
dirdata = sep.join(dirmodule.split(sep)[:-1] + ["data"])
subdomains = range(1, 14)
partition = (100, 100)
nsigma = 100
dimpart = {"netcdfdimnames":
{"sigma": ("sigma_rho", "sigma_w"),
"eta": ("eta_rho", "eta_v"),
"xi": ("xi_rho", "xi_u")},
"domainpartition": ("eta", "xi")}
domain = os.popen("hostname -d").read()
if "tgcc" in domain:
hostname = "irene"
elif "univ-brest" in domain:
hostname = os.popen("hostname").read()[:-1]
else:
raise ValueError("Could not find the Gigatl data")
if rank == 0:
print(f"hostname: {hostname}")
print(f"paragridded is located in : {dirmodule}")
print(f"data are located in : {dirdata}")
barrier()
def create_hisdir():
subd = 1
dirname = dirhis.format(subd=subd)
parent = os.path.abspath(dirname+"/..")
content = glob.glob(parent+"/??")
ok = True
for subd in subdomains:
dirname = dirhis.format(subd=subd)
if dirname in content:
if os.path.isdir(dirname):
command = f"fusermount -u {dirname}"
try:
os.system(command)
except:
pass
else:
ok = False
else:
os.makedirs(dirname)
if not ok:
print(f"{parent} needs to be cleaned")
try:
os.rename(f"{parent}", f"{dirtrash}/")
except:
abort()
for subd in subdomains:
dirname = dirhis.format(subd=subd)
os.makedirs(dirname)
print(f"{parent} has been cleaned", flush=True)
else:
print(f"{parent} is sound")
def create_directory(dirname, attempt=0):
if not os.path.isdir(dirname):
parent = os.path.abspath(dirname+"/..")
content = glob.glob(parent+"/*")
if not dirname in content:
os.makedirs(dirname)
if attempt == 2:
print(f"{dirname}: moved to trash")
else:
if attempt == 1:
print("{dirname}: fusermount didn't work")
command = f"mv -f {parent} {dirtrash}"
os.system(command)
create_directory(dirname, attempt=2)
if attempt == 2:
print(f"{dirname}: really serious problem")
abort()
else:
print(f"{dirname}: problem with the fuse system")
#print("*** try to fix it with fuserumount")
command = f"fusermount -u {dirname}"
os.system(command)
create_directory(dirname, attempt=1)
if hostname == "irene":
dirgridtar = "/ccc/store/cont003/gch0401/gch0401/GIGATL1_1h_tides/GRD/{subd:02}"
dirgigaref = "/ccc/store/cont003/gch0401/gch0401/GIGATL1_1h_tides/HIS_1h/{subd:02}"
dirgiga = "/ccc/store/cont003/gch0401/groullet/GIGATL1_1h_tides/HIS_1h/{subd:02}"
# fix_filename_on_store
# dirsurf = "/ccc/store/cont003/gch0401/gch0401/GIGATL1_1h_tides/SURF"
dirmounted_root = "/ccc/work/cont003/gch0401/groullet/gigatl"
dirmounted = f"{dirmounted_root}/R_{rank:04}"
dirgrid = "/ccc/scratch/cont003/gen12051/gulaj/GIGATL1/GRD3"
dirhis = dirmounted+"/HIS/{subd:02}"
#dirtrash = f"/ccc/scratch/cont003/gen12051/groullet/trash"
# for d in [dirhis]:
# for subd in subdomains:
# dirname = d.format(subd=subd)
# create_directory(dirname)
# or use directly
# dirgrid = "/ccc/scratch/cont003/ra4735/gulaj/GIGATL1/INIT_N100_100_100/GRD3"
# hisindex = [0, 6, 12, 18] | [24, 30, 36, 42] | [48, 54, 60, 66]
# | [72, 78, 84, 90] | [96, 102, 108, 114]
# e.g. "2008-09-19" contains [96, 102, 108, 114]
hisindex = 36
# hisdate = "2008-03-14" .. "2008-11-18" (included)
# 250 days as of Nov 17th 2020
hisdate = "2008-09-26"
targridtemplate = "gigatl1_grd_masked.{subd:02}.tar"
tarhistemplate = "gigatl1_his_1h.{hisdate}.{subd:02}.tar"
for subd in subdomains:
assert os.path.isdir(dirgigaref.format(subd=subd))
else:
dirgrid = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/GRD"
dirsurf = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/SURF/gigatl1_surf.2008-05-23"
dirgigaref = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/HIS_1h"
dirgiga = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/HIS_1h"
dirmounted_root = "/net/omega/local/tmp/1/roullet/gigatl"
dirmounted = f"{dirmounted_root}/R_{rank:04}"
dirhis = dirmounted+"/HIS/{subd:02}"
#dirtrash = "/net/omega/local/tmp/1/roullet/trash"
hisindex = 72
hisdate = "2008-09-23"
tarhistemplate = "gigatl1_his_1h.{hisdate}.{subd:02}.tar"
# for d in [dirhis]:
# for subd in subdomains:
# dirname = d.format(subd=subd)
# create_directory(dirname)
hour = 14
sqlitesdir = f"{dirmounted_root}/sqlites"
def check():
""" check that all paths are properly defined"""
checked = True
print(f" - history tar files will be mounted on: {dirmounted_root}")
print(f" - ratarmount executable is in : {ratarmount}")
def setup_directories():
if rank == 0:
if not os.path.isdir(sqlitesdir):
os.makedirs(sqlitesdir)
# if not os.path.isdir(dirtrash):
# os.makedirs(dirtrash)
# else:
# command = f"rm -Rf {dirtrash}/*"
# os.system(command)
if not os.path.isdir(dirmounted):
os.makedirs(dirmounted)
barrier()
create_hisdir()
def get_subd(tile):
if tile in subdmap:
return subdmap[tile]
else:
return -1
def grdfiles(tile):
#print(f" read grid {tile}")
subd = get_subd(tile)
directory = dirgrid.format(subd=subd)
filename = f"{directory}/gigatl1_grd_masked.{tile:04}.nc"
if not os.path.isfile(filename):
mount(subd, grid=True)
return filename
def surffiles(tile):
return f"{dirsurf}/gigatl1_surf.{tile:04}.nc"
def hisfiles(tile):
subd = get_subd(tile)
if subd > 0:
directory = dirhis.format(subd=subd)
files = sorted(glob.glob(f"{directory}/gigatl1_his.*.{tile:04}.nc"))
_dateindex = [int(f.split(".")[-3]) for f in files]
_hisindex = _dateindex[int(hour)//6]
filename = f"{directory}/gigatl1_his.{_hisindex:06}.{tile:04}.nc"
if not os.path.isfile(filename):
mount(subd)
return filename
else:
return ""
def get_subdmap(directory):
"""Reconstruct how netCDF files are stored in fused directory
directory == dirgrid | dirhis """
_subdmap = {}
for subd in subdomains:
fs = glob.glob(directory.format(subd=subd)+"/*.nc")
tiles = [int(f.split(".")[-2]) for f in fs]
for t in tiles:
_subdmap[t] = subd
return _subdmap
def set_ratarmount():
mount = "ratarmount"
options = "" # "-c -gs 160000"
ratarmount = os.popen(f"which {mount}").read()
if len(ratarmount) > 0:
# remove the trailing "\n"
ratarmount = ratarmount[:-1]
print(f"found ratarmount in : {ratarmount}")
else:
if rank == 0:
print("")
print(BB("warning").center(20, "*"))
print(f"{mount} is not installed or cannot be found")
print("you can set it manually with")
print("giga.ratarmount = /path/to/bin/ratarmount")
return ratarmount
def mount_tar(source, tarfile, destdir):
"""
source: str, directory of the tar files
template: str, template name for the tar file containing "{subd"
subd: int, index of the subdomain (0<=subd<=13)
destdir: str, directory where to archivemount
"""
srcfile = f"{source}/{tarfile}"
#print(f"mount {srcfile} on {destdir}")
assert os.path.isfile(srcfile), f"{srcfile} does not exsit"
sqlitefile = get_sqlitefilename(srcfile)
home = os.path.expanduser("~")
ratardirsqlite = f"{home}/.ratarmount"
if os.path.isfile(f"{ratardirsqlite}/{sqlitefile}"):
# nothing to do
pass
else:
if os.path.isfile(f"{sqlitesdir}/{sqlitefile}"):
command = f"cp {sqlitesdir}/{sqlitefile} {ratardirsqlite}/"
os.system(command)
assert len(ratarmount) > 0, BB("You forgot to set the ratarmount path")
command = f"{ratarmount} {srcfile} {destdir}"
os.system(command)
if os.path.isfile(f"{sqlitesdir}/{sqlitefile}"):
# nothing to do
pass
else:
command = f"cp {ratardirsqlite}/{sqlitefile} {sqlitesdir}/"
os.system(command)
# delete sqlitefile on ratardirsqlite
# os.remove(f"{ratardirsqlite}/{sqlitefile}")
def mount(subd, grid=False, overwrite=True):
"""Mount tar file `subd`"""
if grid:
destdir = dirgrid.format(subd=subd)
srcdir = dirgridtar.format(subd=subd)
tarfile = targridtemplate.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
srcdir = dirgigaref.format(subd=subd)
tarfile = tarhistemplate.format(hisdate=hisdate, subd=subd)
tomount = True
if os.path.exists(destdir):
#print(f"{destdir} already exists")
if len(os.listdir(destdir)) == 0:
# folder is empty
pass
elif overwrite:
# folder is not empty but we want to overwrite it
# first let's unmount it
command = f"fusermount -u {destdir}"
try:
os.system(command)
except:
pass
#
assert len(os.listdir(f"{destdir}")) == 0
else:
tomount = False
else:
print(f"*** makedir {destdir}")
# os.makedirs(destdir)
if tomount:
mount_tar(srcdir, tarfile, destdir)
if not(grid):
write_toc(destdir, subd, hisdate)
def write_toc(destdir, subd, _hisdate):
with open(f"{destdir}/../hisdate_{subd:02}.txt", mode="w") as fid:
fid.write(_hisdate)
def read_toc(destdir, subd):
with open(f"{destdir}/../hisdate_{subd:02}.txt", mode="r") as fid:
return fid.read()
def mount_all(grid=False):
for subd in subdomains:
mount(subd, grid=grid)
def mount_stats(grid=False):
""" Print statistics on mounted tar files"""
print("-"*40)
print(BB("statistics on mounted tar files"))
print(f"mounting point: {dirmounted}")
for subd in subdomains:
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if os.path.exists(destdir):
filelist = os.listdir(f"{destdir}")
nbfiles = len(filelist)
if nbfiles > 0:
tiles = set([int(f.split(".")[-2]) for f in filelist])
nbtiles = len(tiles)
tile = list(tiles)[0]
fs = [f for f in filelist if f"{tile:04}.nc" in f]
if grid:
msg = f" - {subd:02} : {nbtiles:03} tiles"
else:
_hisdate = read_toc(destdir, subd)
# dateindex = sorted([int(f.split(".")[-3]) for f in fs])
# msg = f" - {subd:02} : {nbtiles:03} tiles x {dateindex} dateindex"
bbhisdate = BB(_hisdate)
msg = f" - {subd:02} : {bbhisdate} with {nbtiles:03} tiles"
else:
msg = f" - {subd:02} : empty"
else:
warning = BB("destroyed")
msg = f" - {subd:02} : {warning}"
print(msg)
def umount_all(grid=False):
for subd in subdomains:
umount(subd, grid=grid)
def umount(subd, grid=False):
""" Unmount `subd` tar archive folder
The command to unmount a fuse folder is fusermount -u"""
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if os.path.isdir(destdir) and len(os.listdir(f"{destdir}")) != 0:
command = f"fusermount -u {destdir}"
os.system(command)
#command = f"rmdir {destdir}"
# os.system(command)
else:
pass
#print(f"{destdir} is already umounted")
def get_sqlitefilename(tarfile):
sqlitefile = "_".join(tarfile.split("/"))+".index.sqlite"
return sqlitefile
def LLTP2domain(lowerleft, topright):
"""Convert the two pairs of (lower, left), (top, right) in (lat, lon)
into the four pairs of (lat, lon) of the corners """
xa, ya = lowerleft
xb, yb = topright
domain = [(xa, ya), (xa, yb), (xb, yb), (xb, ya)]
return domain
def find_tiles_inside(domain, corners):
"""Determine which tiles are inside `domain`
The function uses `corners` the list of corners for each tile
"""
p = Polygon(domain)
tileslist = []
for tile, c in corners.items():
q = Polygon(c)
if p.overlaps(q) or p.contains(q):
tileslist += [tile]
return tileslist
def get_dates():
"""
Scan dirgiga for *tar files
"""
subd = 1
pattern = f"{dirgigaref}/*.{subd:02}.tar".format(subd=subd)
files = glob.glob(pattern)
_dates_tar = [f.split("/")[-1].split(".")[-3] for f in files]
#print(f"------SCAN GIGATL HIS databas --------")
# print("files:")
# print(files)
# print(glob.glob(dirgiga.format(subd=1)+"/*"))
return sorted(_dates_tar)
def get_hisindexes_in_histar(hisdate):
# select one subd
subd = 1
# select one tile in this subd
tile = [t for t, s in subdmap.items() if s == subd][0]
tarfile = "/".join([dirgiga, tarhistemplate])
tarfile = tarfile.format(hisdate=hisdate, subd=subd)
files = os.popen(f"tar tvf {tarfile} *{tile:04}.nc").readlines()
hisindexes = [int(f.split(".")[-3]) for f in files]
return hisindexes
if hostname == "irene":
hisdates = get_dates()
else:
hisdates = ["2008-09-26"]
# try:
# umount_all()
# except:
# MPI.COMM_WORLD.Abort()
# corners and submap are stored in pickle files
with open(f"{dirdata}/giga_corners.pkl", "rb") as f:
corners = pickle.load(f)
assert len(corners) == 6582, "something is wrong with data/giga_corners.pkl"
with open(f"{dirdata}/giga_subdmap.pkl", "rb") as f:
subdmap = pickle.load(f)
assert len(subdmap) == 6582, "something is wrong with data/giga_subdmap.pkl"
with open(f"{dirdata}/gigaspecs.pkl", "rb") as f:
corners = pickle.load(f)
missing = pickle.load(f)
subdmap = pickle.load(f)
if False:
dirs = glob.glob(dirmounted+"/R*/HIS/??")
for d in dirs:
try:
command = f"fusermount -u {d}"
os.system(command)
except:
command = f"rm -Rf {d}"
os.system(command)
ratarmount = set_ratarmount()
barrier()
| 27.956989
| 97
| 0.587821
|
a0b7c1a805386bf7934c159cbfaf2c3e82c69450
| 305
|
py
|
Python
|
plugins/idaskins/__init__.py
|
fengjixuchui/IDASkins
|
c73272226ec44caf7c903cdb5bd33b84c277cd42
|
[
"MIT"
] | 934
|
2015-05-09T15:08:58.000Z
|
2022-03-25T11:35:55.000Z
|
plugins/idaskins/__init__.py
|
fengjixuchui/IDASkins
|
c73272226ec44caf7c903cdb5bd33b84c277cd42
|
[
"MIT"
] | 68
|
2015-06-12T16:10:13.000Z
|
2020-11-09T22:03:52.000Z
|
plugins/idaskins/__init__.py
|
fengjixuchui/IDASkins
|
c73272226ec44caf7c903cdb5bd33b84c277cd42
|
[
"MIT"
] | 173
|
2015-05-25T18:22:24.000Z
|
2022-03-25T11:35:57.000Z
|
from __future__ import absolute_import, print_function, division
import os
VERSION = 'v2.1.0'
PLUGIN_DIR = os.path.dirname(os.path.realpath(__file__))
IDA_DIR = os.path.abspath(os.path.join(PLUGIN_DIR, '..', '..'))
UI_DIR = os.path.join(PLUGIN_DIR, 'ui')
THEMES_DIR = os.path.join(PLUGIN_DIR, 'themes')
| 30.5
| 64
| 0.737705
|
2e2c7ca03ffb08e12554b1d575c7d8f2c22097e4
| 2,569
|
py
|
Python
|
local_dm_control_suite/demos/mocap_demo.py
|
huy-ha/dreamer-pytorch
|
98561a5fe4ee5323b955f5fc79bbebf483f08d58
|
[
"MIT"
] | 2
|
2021-10-31T05:12:19.000Z
|
2021-12-16T11:56:12.000Z
|
local_dm_control_suite/demos/mocap_demo.py
|
huy-ha/dreamer-pytorch
|
98561a5fe4ee5323b955f5fc79bbebf483f08d58
|
[
"MIT"
] | null | null | null |
local_dm_control_suite/demos/mocap_demo.py
|
huy-ha/dreamer-pytorch
|
98561a5fe4ee5323b955f5fc79bbebf483f08d58
|
[
"MIT"
] | null | null | null |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Demonstration of amc parsing for CMU mocap database.
To run the demo, supply a path to a `.amc` file:
python mocap_demo --filename='path/to/mocap.amc'
CMU motion capture clips are available at mocap.cs.cmu.edu
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# Internal dependencies.
from absl import app
from absl import flags
from local_dm_control_suite import humanoid_CMU
from dm_control.suite.utils import parse_amc
import matplotlib.pyplot as plt
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string('filename', None, 'amc file to be converted.')
flags.DEFINE_integer('max_num_frames', 90,
'Maximum number of frames for plotting/playback')
def main(unused_argv):
env = humanoid_CMU.stand()
# Parse and convert specified clip.
converted = parse_amc.convert(FLAGS.filename,
env.physics, env.control_timestep())
max_frame = min(FLAGS.max_num_frames, converted.qpos.shape[1] - 1)
width = 480
height = 480
video = np.zeros((max_frame, height, 2 * width, 3), dtype=np.uint8)
for i in range(max_frame):
p_i = converted.qpos[:, i]
with env.physics.reset_context():
env.physics.data.qpos[:] = p_i
video[i] = np.hstack([env.physics.render(height, width, camera_id=0),
env.physics.render(height, width, camera_id=1)])
tic = time.time()
for i in range(max_frame):
if i == 0:
img = plt.imshow(video[i])
else:
img.set_data(video[i])
toc = time.time()
clock_dt = toc - tic
tic = time.time()
# Real-time playback not always possible as clock_dt > .03
plt.pause(max(0.01, 0.03 - clock_dt)) # Need min display time > 0.0.
plt.draw()
plt.waitforbuttonpress()
if __name__ == '__main__':
flags.mark_flag_as_required('filename')
app.run(main)
| 30.223529
| 78
| 0.681199
|
f9b656b57231051dd572b2676ad3941d6fa1d53f
| 1,215
|
py
|
Python
|
read.py
|
vahidtwo/uniproject
|
5ee1e847cfde4670db06f16e07af36b3034b4d9b
|
[
"MIT"
] | 1
|
2020-05-16T16:14:55.000Z
|
2020-05-16T16:14:55.000Z
|
read.py
|
vahidtwo/uniproject
|
5ee1e847cfde4670db06f16e07af36b3034b4d9b
|
[
"MIT"
] | null | null | null |
read.py
|
vahidtwo/uniproject
|
5ee1e847cfde4670db06f16e07af36b3034b4d9b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import socket
import RPi.GPIO as GPIO
import SimpleMFRC522
import time
reader = SimpleMFRC522.SimpleMFRC522()
while(1):
try:
id, text = reader.read()
print(1,id)
print(1,text)
text=text.replace(" ","")
if ((text==("vahid")or text.__contains__("ali"))and (id==(124031544076) or id==973252895679)):
try:
print(111)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('192.168.88.213',16662))
print("connect")
s.send("rfidid:"+str(id)+",rfidtext:"+str(text))
s.close()
time.sleep(1)
except Exception as e:
print("socket not co",e)
else:
try:
print(00)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('192.168.88.213',16662))
print("connect")
s.send("rfidid:"+str(id)+",rfidtext:"+str(text)+",allert")
s.close()
time.sleep(1)
except Exception as e:
print("socket not co",e)
except:
print("cant read")
| 32.837838
| 102
| 0.495473
|
40be95eed34d301c9d24650b472f6f6cf6639a71
| 1,092
|
py
|
Python
|
2019/tests/day2/test_day2_part2.py
|
niklind/advent-of-code
|
c5736e5ec9f830f4e80b962874d28360e3735674
|
[
"MIT"
] | null | null | null |
2019/tests/day2/test_day2_part2.py
|
niklind/advent-of-code
|
c5736e5ec9f830f4e80b962874d28360e3735674
|
[
"MIT"
] | null | null | null |
2019/tests/day2/test_day2_part2.py
|
niklind/advent-of-code
|
c5736e5ec9f830f4e80b962874d28360e3735674
|
[
"MIT"
] | null | null | null |
from typing import List
import day2.part2
import pytest
@pytest.mark.parametrize("program, expected",
[(
[1, 0, 0, 3, 1, 1, 2, 3, 1, 3, 4, 3, 1, 5, 0, 3, 2, 1, 13, 19, 1, 10, 19, 23, 2, 9, 23, 27, 1,
6, 27, 31, 1, 10, 31, 35, 1, 35, 10, 39, 1, 9, 39, 43, 1, 6, 43, 47, 1, 10, 47, 51, 1, 6, 51,
55, 2, 13, 55, 59, 1, 6, 59, 63, 1, 10, 63, 67, 2, 67, 9, 71, 1, 71, 5, 75, 1, 13, 75, 79, 2,
79, 13, 83, 1, 83, 9, 87, 2, 10, 87, 91, 2, 91, 6, 95, 2, 13, 95, 99, 1, 10, 99, 103, 2, 9,
103, 107, 1, 107, 5, 111, 2, 9, 111, 115, 1, 5, 115, 119, 1, 9, 119, 123, 2, 123, 6, 127, 1,
5, 127, 131, 1, 10, 131, 135, 1, 135, 6, 139, 1, 139, 5, 143, 1, 143, 9, 147, 1, 5, 147, 151,
1, 151, 13, 155, 1, 5, 155, 159, 1, 2, 159, 163, 1, 163, 6, 0, 99, 2, 0, 14, 0],
19690720)])
def test_part2(program, expected):
assert expected == day2.part2.int_code(program)[0]
| 57.473684
| 120
| 0.414835
|
568c763d94c8cdf30349584205445785c5864cc6
| 5,195
|
py
|
Python
|
analysis/utils/utils.py
|
Bwei112233/BuzzBlogBenchmark
|
3de0e8143e5035246792adea7fa3254ca7bcedb2
|
[
"Apache-2.0"
] | null | null | null |
analysis/utils/utils.py
|
Bwei112233/BuzzBlogBenchmark
|
3de0e8143e5035246792adea7fa3254ca7bcedb2
|
[
"Apache-2.0"
] | null | null | null |
analysis/utils/utils.py
|
Bwei112233/BuzzBlogBenchmark
|
3de0e8143e5035246792adea7fa3254ca7bcedb2
|
[
"Apache-2.0"
] | null | null | null |
import gzip
import os
import pandas as pd
import re
import sys
import tarfile
sys.path.append(os.path.abspath(os.path.join("..")))
from parsers.loadgen_parser import LoadgenParser
def get_node_names(experiment_dirname):
return [dirname
for dirname in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs"))
if not dirname.startswith('.')]
def get_rpc_logfiles(experiment_dirname):
tarball_patterns = [
r"^apigateway.*\.tar\.gz$",
r"^.+_service.*\.tar\.gz$",
]
for node_name in get_node_names(experiment_dirname):
for tarball_name in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname,
"logs", node_name)):
if sum([1 if re.match(tarball_pattern, tarball_name) else 0 for tarball_pattern in tarball_patterns]):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs",
node_name, tarball_name)
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith("calls.log"):
with tar.extractfile(filename) as logfile:
yield logfile
def get_query_logfiles(experiment_dirname):
tarball_patterns = [
r"^.+_service.*\.tar\.gz$",
]
for node_name in get_node_names(experiment_dirname):
for tarball_name in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname,
"logs", node_name)):
if sum([1 if re.match(tarball_pattern, tarball_name) else 0 for tarball_pattern in tarball_patterns]):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs",
node_name, tarball_name)
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith("queries.log"):
with tar.extractfile(filename) as logfile:
yield logfile
def get_loadgen_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
for tarball_name in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname,
"logs", node_name)):
if re.match(r"^loadgen.*\.tar\.gz$", tarball_name):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs",
node_name, tarball_name)
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith("loadgen.log"):
with tar.extractfile(filename) as logfile:
yield logfile
def get_collectl_cpu_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs", node_name,
"collectl.tar.gz")
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith(".cpu.gz"):
with gzip.open(tar.extractfile(filename), "rt") as logfile:
yield (node_name, logfile)
def get_collectl_mem_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs", node_name,
"collectl.tar.gz")
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith(".numa.gz"):
with gzip.open(tar.extractfile(filename), "rt") as logfile:
yield (node_name, logfile)
def get_collectl_dsk_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs", node_name,
"collectl.tar.gz")
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith(".dsk.gz"):
with gzip.open(tar.extractfile(filename), "rt") as logfile:
yield (node_name, logfile)
def get_experiment_start_time(experiment_dirname):
requests = pd.concat([
pd.DataFrame.from_dict(LoadgenParser(logfile).parse())
for logfile in get_loadgen_logfiles(experiment_dirname)
], ignore_index=True)
return requests["timestamp"].values.min()
def get_experiment_end_time(experiment_dirname):
requests = pd.concat([
pd.DataFrame.from_dict(LoadgenParser(logfile).parse())
for logfile in get_loadgen_logfiles(experiment_dirname)
], ignore_index=True)
return requests["timestamp"].values.max()
| 45.570175
| 120
| 0.610395
|
0571888a4c33b6da397480f2bbdc01bccdc355c4
| 1,509
|
py
|
Python
|
dataset_specific/prostate/expts/prostate_bai.py
|
suhitaghosh10/UATS
|
fe295ca2e16e1b7404398b3b62e404778900d958
|
[
"MIT"
] | 11
|
2021-04-12T03:40:35.000Z
|
2022-02-02T13:47:13.000Z
|
dataset_specific/prostate/expts/prostate_bai.py
|
suhitaghosh10/UATS
|
fe295ca2e16e1b7404398b3b62e404778900d958
|
[
"MIT"
] | 1
|
2021-04-11T14:40:11.000Z
|
2021-04-11T14:40:11.000Z
|
dataset_specific/prostate/expts/prostate_bai.py
|
suhitaghosh10/UATS
|
fe295ca2e16e1b7404398b3b62e404778900d958
|
[
"MIT"
] | 1
|
2021-01-08T10:49:36.000Z
|
2021-01-08T10:49:36.000Z
|
import argparse
import os
import tensorflow as tf
from dataset_specific.prostate.model.bai import weighted_model
from train.semi_supervised.bai import train
from utility.config import get_metadata
from utility.constants import *
from utility.utils import cleanup
## Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu_num', type=str, default='0', help='GPU Number')
parser.add_argument('-f', '--fold_num', type=int, default=1, help='Fold Number')
parser.add_argument('-e', '--ens_folder_name', type=str, help='ensemble folder name')
parser.add_argument('-d', '--ds', type=str, default=PROSTATE_DATASET_NAME, help='dataset name')
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
try:
# fold_num = args.fold_num
# perc = args.perc
# temp_path = args.temp_path
# gpu_num = args.gpu_num
gpu_num = '0'
fold_num = 1
perc = 1.0
temp_path = 'sadv477'
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
metadata = get_metadata(args.ds)
# Build Model
wm = weighted_model()
train(None, None,
dataset_name=args.ds,
ens_folder_name=temp_path,
labelled_perc=perc,
fold_num=fold_num,
model_type=wm,
early_stop=False
)
finally:
if os.path.exists(metadata[m_root_temp_path] + temp_path):
cleanup(metadata[m_root_temp_path] + temp_path)
print('clean up done!')
| 30.18
| 95
| 0.695162
|
398d095d2f651c6c99822eda441f2ffe322b7c22
| 77
|
py
|
Python
|
Python/Tests/TestData/Grammar/FromFuture25.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 695
|
2019-05-06T23:49:37.000Z
|
2022-03-30T01:56:00.000Z
|
Python/Tests/TestData/Grammar/FromFuture25.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/Grammar/FromFuture25.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
from __future__ import with_statement
from __future__ import absolute_import
| 25.666667
| 38
| 0.896104
|
ec4b41234acf82b72e06a3ae391b859ac979a741
| 38,676
|
py
|
Python
|
test/functional/feature_segwit.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
test/functional/feature_segwit.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
test/functional/feature_segwit.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from decimal import Decimal
from io import BytesIO
from test_framework.address import (
key_to_p2pkh,
program_to_witness,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP
from test_framework.test_framework import VIDCoinTestFramework
from test_framework.util import (
assert_equal,
assert_is_hex_string,
assert_raises_rpc_error,
hex_str_to_bytes,
try_rpc,
)
NODE_0 = 0
NODE_2 = 2
P2WPKH = 0
P2WSH = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
txs_mined = {} # txindex from txid to blockhash
class SegWitTest(VIDCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-rpcserialversion=0",
"-segwitheight=432",
"-addresstype=legacy",
],
[
"-acceptnonstdtxn=1",
"-rpcserialversion=1",
"-segwitheight=432",
"-addresstype=legacy",
],
[
"-acceptnonstdtxn=1",
"-segwitheight=432",
"-addresstype=legacy",
],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
self.connect_nodes(0, 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
self.sync_blocks()
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
self.sync_blocks()
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, use_p2wsh=1, node=node, utxo=getutxo(txid), pubkey=self.pubkey[0], encode_p2sh=False, amount=Decimal("49.998"), sign=sign, insert_redeem_script=redeem_script)
def run_test(self):
self.nodes[0].generate(161) # block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert tmpl['sizelimit'] == 1000000
assert 'weightlimit' not in tmpl
assert tmpl['sigoplimit'] == 20000
assert tmpl['transactions'][0]['hash'] == txid
assert tmpl['transactions'][0]['sigops'] == 2
assert '!segwit' not in tmpl['rules']
self.nodes[0].generate(1) # block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][TYPE] is an array of txids that spend to P2WPKH (TYPE=0) or P2WSH (TYPE=1) scripts to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][TYPE] is an array of txids that spend to P2WPKH (TYPE=0) or P2WSH (TYPE=1) scripts to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for _ in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for _ in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) # block 163
self.sync_blocks()
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60 * 50 + 20 * Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20 * Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20 * Decimal("49.999"))
self.nodes[0].generate(260) # block 423
self.sync_blocks()
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][P2WPKH][0], True) # block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][P2WSH][0], True) # block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][P2WPKH][0], True) # block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][P2WSH][0], True) # block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
blockhash = self.nodes[2].generate(1)[0] # block 432 (first block with new rules; 432 = 144 * 3)
self.sync_blocks()
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(blockhash)["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify default node can't accept txs with missing witness")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "non-mandatory-script-verify-flag (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False)
self.fail_accept(self.nodes[0], "non-mandatory-script-verify-flag (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "non-mandatory-script-verify-flag (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "non-mandatory-script-verify-flag (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0]))
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert self.nodes[2].getblock(blockhash, False) != self.nodes[0].getblock(blockhash, False)
assert self.nodes[1].getblock(blockhash, False) == self.nodes[2].getblock(blockhash, False)
for tx_id in segwit_tx_list:
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(tx_id)["hex"])
assert self.nodes[2].getrawtransaction(tx_id, False, blockhash) != self.nodes[0].getrawtransaction(tx_id, False, blockhash)
assert self.nodes[1].getrawtransaction(tx_id, False, blockhash) == self.nodes[2].getrawtransaction(tx_id, False, blockhash)
assert self.nodes[0].getrawtransaction(tx_id, False, blockhash) != self.nodes[2].gettransaction(tx_id)["hex"]
assert self.nodes[1].getrawtransaction(tx_id, False, blockhash) == self.nodes[2].gettransaction(tx_id)["hex"]
assert self.nodes[0].getrawtransaction(tx_id, False, blockhash) == tx.serialize_without_witness().hex()
# Coinbase contains the witness commitment nonce, check that RPC shows us
coinbase_txid = self.nodes[2].getblock(blockhash)['tx'][0]
coinbase_tx = self.nodes[2].gettransaction(txid=coinbase_txid, verbose=True)
witnesses = coinbase_tx["decoded"]["vin"][0]["txinwitness"]
assert_equal(len(witnesses), 1)
assert_is_hex_string(witnesses[0])
assert_equal(witnesses[0], '00'*32)
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][P2WPKH][0], True) # block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][P2WSH][0], True) # block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][P2WPKH][0], True) # block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][P2WSH][0], True) # block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert tmpl['sizelimit'] >= 3999577 # actual maximum size is lower due to minimum mandatory non-witness data
assert tmpl['weightlimit'] == 4000000
assert tmpl['sigoplimit'] == 80000
assert tmpl['transactions'][0]['txid'] == txid
assert tmpl['transactions'][0]['sigops'] == 8
assert '!segwit' in tmpl['rules']
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert tx.wit.is_null() # This should not be a segwit input
assert txid1 in self.nodes[0].getrawmempool()
tx1_hex = self.nodes[0].gettransaction(txid1)['hex']
tx1 = FromHex(CTransaction(), tx1_hex)
# Check that wtxid is properly reported in mempool entry (txid1)
assert_equal(int(self.nodes[0].getmempoolentry(txid1)["wtxid"], 16), tx1.calc_sha256(True))
# Check that weight and vsize are properly reported in mempool entry (txid1)
assert_equal(self.nodes[0].getmempoolentry(txid1)["vsize"], (self.nodes[0].getmempoolentry(txid1)["weight"] + 3) // 4)
assert_equal(self.nodes[0].getmempoolentry(txid1)["weight"], len(tx1.serialize_without_witness())*3 + len(tx1.serialize_with_witness()))
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert not tx.wit.is_null()
# Check that wtxid is properly reported in mempool entry (txid2)
assert_equal(int(self.nodes[0].getmempoolentry(txid2)["wtxid"], 16), tx.calc_sha256(True))
# Check that weight and vsize are properly reported in mempool entry (txid2)
assert_equal(self.nodes[0].getmempoolentry(txid2)["vsize"], (self.nodes[0].getmempoolentry(txid2)["weight"] + 3) // 4)
assert_equal(self.nodes[0].getmempoolentry(txid2)["weight"], len(tx.serialize_without_witness())*3 + len(tx.serialize_with_witness()))
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(hexstring=ToHex(tx), maxfeerate=0)
assert tx.wit.is_null()
assert txid3 in self.nodes[0].getrawmempool()
# Check that getblocktemplate includes all transactions.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [t['txid'] for t in template['transactions']]
assert txid1 in template_txids
assert txid2 in template_txids
assert txid3 in template_txids
# Check that wtxid is properly reported in mempool entry (txid3)
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Check that weight and vsize are properly reported in mempool entry (txid3)
assert_equal(self.nodes[0].getmempoolentry(txid3)["vsize"], (self.nodes[0].getmempoolentry(txid3)["weight"] + 3) // 4)
assert_equal(self.nodes[0].getmempoolentry(txid3)["weight"], len(tx.serialize_without_witness())*3 + len(tx.serialize_with_witness()))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert not self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed']
assert self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed']
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bare.hex())
importlist.append(CScript([OP_0, sha256(bare)]).hex())
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(p2pk.hex())
importlist.append(p2pkh.hex())
importlist.append(CScript([OP_0, hash160(pubkey)]).hex())
importlist.append(CScript([OP_0, sha256(p2pk)]).hex())
importlist.append(CScript([OP_0, sha256(p2pkh)]).hex())
importlist.append(unsolvablep2pkh.hex())
importlist.append(unsolvablep2wshp2pkh.hex())
importlist.append(op1.hex())
importlist.append(p2wshop1.hex())
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3, 5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])], {v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.restart_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x' + utxo['txid'], 0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(tx.serialize_without_witness().hex())['hex']
txid = self.nodes[0].sendrawtransaction(hexstring=signresults, maxfeerate=0)
txs_mined[txid] = self.nodes[0].generate(1)[0]
self.sync_blocks()
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if i['spendable']:
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self, v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self, v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success=True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i, 0, txs_mined[i])
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x' + i, 0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(tx.serialize_without_witness().hex())['hex']
self.nodes[0].sendrawtransaction(hexstring=signresults, maxfeerate=0)
self.nodes[0].generate(1)
self.sync_blocks()
if __name__ == '__main__':
SegWitTest().main()
| 61.390476
| 223
| 0.682827
|
fa01cb5467f7626906d3c7a040f063a82e40b43f
| 12,296
|
py
|
Python
|
cinder/api/v3/attachments.py
|
Boye-Z/cinder
|
2a959e6645379842880373dd9aad4d5ff3b6fd02
|
[
"Apache-2.0"
] | null | null | null |
cinder/api/v3/attachments.py
|
Boye-Z/cinder
|
2a959e6645379842880373dd9aad4d5ff3b6fd02
|
[
"Apache-2.0"
] | null | null | null |
cinder/api/v3/attachments.py
|
Boye-Z/cinder
|
2a959e6645379842880373dd9aad4d5ff3b6fd02
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from http import HTTPStatus
from oslo_log import log as logging
import webob
from cinder.api import api_utils
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import attachments as attachment
from cinder.api.v3.views import attachments as attachment_views
from cinder.api import validation
from cinder import context as cinder_context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.policies import attachments as attachment_policy
from cinder.volume import api as volume_api
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
volume = objects.Volume.get_by_id(cinder_context.get_admin_context(),
attachment.volume_id)
if volume.admin_metadata and 'format' in volume.admin_metadata:
attachment.connection_info['format'] = (
volume.admin_metadata['format'])
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
api_utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(HTTPStatus.OK)
@validation.schema(attachment.create)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment']['instance_uuid']
volume_uuid = body['attachment']['volume_uuid']
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
args = {'connector': body['attachment'].get('connector', None)}
if req.api_version_request.matches(mv.ATTACHMENT_CREATE_MODE_ARG):
# We check for attach_mode here and default to `null`
# if nothing's provided. This seems odd to not just
# set `rw`, BUT we want to keep compatability with
# setting the mode via the connector for now, so we
# use `null` as an identifier to distinguish that case
args['attach_mode'] = body['attachment'].get('mode', 'null')
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
**args))
except (exception.NotAuthorized,
exception.InvalidVolume):
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@validation.schema(attachment.update)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "None|rw|ro"
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment']['connector']
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(HTTPStatus.NO_CONTENT)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
context.authorize(attachment_policy.COMPLETE_POLICY,
target_obj=attachment_ref)
attachment_ref.update(
{'attach_status': fields.VolumeAttachStatus.ATTACHED})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
volume_utils.notify_about_volume_usage(context, volume_ref,
"attach.end")
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
| 40.447368
| 79
| 0.609792
|
d8f4a7c6295f33a186f253e5ff5005a6722d3be9
| 1,134
|
py
|
Python
|
tests/storage/cases/test_KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-08-11T02:31:24.000Z
|
2020-08-11T02:31:24.000Z
|
tests/storage/cases/test_KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/storage/cases/test_KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez.json')
def test_storage_encoding_KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1MB5WKFpyCo8tf4icYFeQQCbib2cjbqEez(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 40.5
| 112
| 0.749559
|
a914271faf1598f53029bbf25208f3188038a45e
| 4,185
|
py
|
Python
|
scripts/tensorflow/samples/mnist_0817.py
|
hidakanoko/misc
|
5e1ca971882fd85528e9a6edbf9d4cc31ca54d3a
|
[
"Apache-2.0"
] | null | null | null |
scripts/tensorflow/samples/mnist_0817.py
|
hidakanoko/misc
|
5e1ca971882fd85528e9a6edbf9d4cc31ca54d3a
|
[
"Apache-2.0"
] | null | null | null |
scripts/tensorflow/samples/mnist_0817.py
|
hidakanoko/misc
|
5e1ca971882fd85528e9a6edbf9d4cc31ca54d3a
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 参考資料
# https://deepinsider.jp/tutor/introtensorflow/buildcnn
# 毎回同じ結果になるように固定値で乱数を初期化
tf.set_random_seed(1)
# MNISTデータセットの読み込み
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n = 0
# 畳み込み層
def convolutional_layer(layer, size, strides, padding, prob):
global n
with tf.variable_scope('layer' + str(n), reuse=False):
w = tf.get_variable(
'w',
shape=size,
initializer=tf.glorot_normal_initializer())
b = tf.get_variable(
'b',
shape=[size[3]],
initializer=tf.zeros_initializer())
n += 1
return tf.nn.relu(
tf.nn.conv2d(
tf.nn.dropout(layer, prob), w,
strides=strides, padding=padding) + b)
# プーリング層
def pooling_layer(layer, size, strides, padding, prob):
return tf.nn.max_pool(
tf.nn.dropout(layer, prob), ksize=size,
strides=strides, padding=padding)
# 全結合層
def fully_connected_layer(layer, units, prob, hidden):
global n
with tf.variable_scope('layer' + str(n), reuse=False):
w = tf.get_variable(
'w',
shape=[layer.get_shape()[1], units],
initializer=tf.glorot_normal_initializer())
b = tf.get_variable(
'b',
shape=[units],
initializer=tf.zeros_initializer())
n += 1
f = tf.matmul(tf.nn.dropout(layer, prob), w) + b
if hidden:
return tf.nn.relu(f)
return f
# ドロップアウトさせない率を格納するplaceholder
prob_input = tf.placeholder(tf.float32)
prob_common = tf.placeholder(tf.float32)
prob_output = tf.placeholder(tf.float32)
# データを格納するplaceholder
x = tf.placeholder(tf.float32)
# ラベルを格納するplaceholder
y_ = tf.placeholder(tf.float32)
# 畳み込み処理やプーリング処理に渡すには4次元にreshapeする
layer_in = tf.reshape(x, [-1, 28, 28, 1])
# 畳み込み層とプーリング層の定義
kernel_count1 = 10
kernel_count2 = kernel_count1 * 2
layer_c1 = convolutional_layer(
layer_in,
[4, 4, 1, kernel_count1],
[1, 2, 2, 1],
'VALID',
prob_input)
layer_c2 = convolutional_layer(
layer_c1,
[3, 3, kernel_count1, kernel_count2],
[1, 2, 2, 1],
'VALID',
prob_common)
layer_c3 = pooling_layer(
layer_c2,
[1, 2, 2, 1],
[1, 2, 2, 1],
'VALID',
prob_common)
# 全結合処理に渡すには2次元にreshapeする
layer_f0 = tf.reshape(
layer_c3, [-1, layer_c3.shape[1] * layer_c3.shape[2] * layer_c3.shape[3]])
# 全結合層の定義
layer_f1 = fully_connected_layer(layer_f0, 40, prob_common, True)
layer_out = fully_connected_layer(layer_f1, 10, prob_output, False)
# 誤差関数とトレーニングアルゴリズムとevaluate
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=layer_out)
optimizer = tf.train.AdamOptimizer().minimize(loss)
pred = tf.argmax(layer_out, 1)
accuracy = tf.reduce_sum(
tf.cast(tf.equal(pred, tf.argmax(y_, 1)), tf.float32))
# プレースホルダに与えるドロップアウトさせない率
train_prob = {prob_input: 1.0, prob_common: 1.0, prob_output: 0.9}
eval_prob = {prob_input: 1.0, prob_common: 1.0, prob_output: 1.0}
# プレースホルダに与える入力データとラベル
train_data = {x: mnist.train.images, y_: mnist.train.labels}
validation_data = {x: mnist.validation.images, y_: mnist.validation.labels}
test_data = {x: mnist.test.images, y_: mnist.test.labels}
# 学習の実行
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(5001):
# trainデータはミニバッチにして学習させる
batch = mnist.train.next_batch(32)
inout = {x: batch[0], y_: batch[1]}
if i % 500 == 0:
# validationデータでevaluate
train_accuracy = accuracy.eval(
feed_dict={**validation_data, **eval_prob})
print('step {:d}, accuracy {:d} ({:.2f})'.format(
i, int(train_accuracy), train_accuracy / 5000))
# 学習
optimizer.run(feed_dict={**inout, **train_prob})
# testデータでevaluate
test_accuracy = accuracy.eval(feed_dict={**test_data, **eval_prob})
print('test accuracy {:d} ({:.2f})'.format(
int(test_accuracy), test_accuracy / 10000))
| 29.680851
| 79
| 0.630585
|
c650543ea2c7967847fe751c9b6ceef56a322279
| 2,790
|
py
|
Python
|
Python/Machine Learning/Code Basics/ML Algorithm/6 Dummy Variable & One Hot Encoding.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
Python/Machine Learning/Code Basics/ML Algorithm/6 Dummy Variable & One Hot Encoding.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
Python/Machine Learning/Code Basics/ML Algorithm/6 Dummy Variable & One Hot Encoding.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
# todo: Categorical Variables and One Hot Encoding
# todo: Importing Libraries
import pandas as pd
# todo: Load DataSet
df = pd.read_csv("homeprices.csv")
print(df)
# todo: Using pandas to create dummy variables
dummies = pd.get_dummies(df.town)
print(dummies)
# todo: concatenate dummies with data
merged = pd.concat([df, dummies], axis='columns')
print(merged)
# todo: dropping original dummy column
final = merged.drop(['town'], axis='columns')
print(final)
# todo: Dummy Variable Trap
# When you can derive one variable from other variables, they are known to be multi-colinear. Here
# if you know values of california and georgia then you can easily infer value of new jersey state, i.e.
# california=0 and georgia=0. There for these state variables are called to be multi-colinear. In this
# situation linear regression won't work as expected. Hence you need to drop one column.
# NOTE: sklearn library takes care of dummy variable trap hence even if you don't drop one of the
# state columns it is going to work, however we should make a habit of taking care of dummy variable
# trap ourselves just in case library that you are using is not handling this for you
# todo: Dropping one of dummy variable column for not happen dummy variable trap
final = final.drop(['west windsor'], axis='columns')
print(final)
# todo: Splitting Features and Label DataSet
X = final.drop('price', axis='columns')
y = final.price
# todo: Creating model and Prediction and score
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X, y)
model.predict(X) # 2600 sqr ft home in new jersey
model.score(X, y)
model.predict([[3400, 0, 0]]) # 3400 sqr ft home in west windsor
model.predict([[2800, 0, 1]]) # 2800 sqr ft home in robbinsville
# todo: Using SkLearn OneHotEncoder
# todo: First step is to use label encoder to convert town names into numbers
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
dfle = df
dfle.town = le.fit_transform(dfle.town)
print(dfle) # transform categorical values into 0, 1, 2, 3...
# todo: Splitting into Features and Label
X = dfle[['town', 'area']].values
print(X)
y = dfle.price.values
print(y)
# todo: Now use one hot encoder to create dummy variables for each of the town
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer([('town', OneHotEncoder(), [0])], remainder='passthrough')
X = ct.fit_transform(X) # converting 0, 1, 2, 3,.. into one hot encoding
print(X)
X = X[:, 1:] # dropping first column of one hot encoding
print(X)
model.fit(X, y) # training model
model.predict([[0, 1, 3400]]) # 3400 sqr ft home in west windsor
model.predict([[1, 0, 2800]]) # 2800 sqr ft home in robbinsville
| 37.702703
| 105
| 0.736918
|
fa62c72dde8a8482f39d701530382e5cd9a0cf49
| 11,946
|
py
|
Python
|
spyder/widgets/tests/test_editorsplitter.py
|
maraigue/spyder
|
ea9797eb24854eca085aa90a5d7ae682ceef1f7a
|
[
"MIT"
] | 1
|
2019-10-19T20:56:20.000Z
|
2019-10-19T20:56:20.000Z
|
spyder/widgets/tests/test_editorsplitter.py
|
Mazmatig/spyder
|
60680b21327cd886ab6c2b1c26ee658bfe3c99b7
|
[
"MIT"
] | null | null | null |
spyder/widgets/tests/test_editorsplitter.py
|
Mazmatig/spyder
|
60680b21327cd886ab6c2b1c26ee658bfe3c99b7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for EditorSplitter class in editor.py
"""
# Standard library imports
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import pytest
from qtpy.QtCore import Qt
# Local imports
from spyder.widgets.editor import EditorStack, EditorSplitter
# ---- Qt Test Fixtures
@pytest.fixture
def base_editor_bot(qtbot):
editor_stack = EditorStack(None, [])
editor_stack.set_introspector(Mock())
editor_stack.set_find_widget(Mock())
editor_stack.set_io_actions(Mock(), Mock(), Mock(), Mock())
return editor_stack, qtbot
@pytest.fixture
def editor_splitter_bot(qtbot):
"""Create editor splitter."""
es = editor_splitter = EditorSplitter(None, Mock(), [], first=True)
qtbot.addWidget(es)
es.show()
return es
@pytest.fixture
def editor_splitter_layout_bot(editor_splitter_bot):
"""Create editor splitter for testing layouts."""
es = editor_splitter_bot
# Allow the split() to duplicate editor stacks.
def clone(editorstack):
editorstack.close_action.setEnabled(False)
editorstack.set_introspector(Mock())
editorstack.set_find_widget(Mock())
editorstack.set_io_actions(Mock(), Mock(), Mock(), Mock())
editorstack.new('foo.py', 'utf-8', 'a = 1\nprint(a)\n\nx = 2')
editorstack.new('layout_test.py', 'utf-8', 'print(spam)')
with open(__file__) as f:
text = f.read()
editorstack.new(__file__, 'utf-8', text)
es.plugin.clone_editorstack.side_effect = clone
# Setup editor info for this EditorStack.
clone(es.editorstack)
return es
# ---- Tests
def test_init(editor_splitter_bot):
""""Test __init__."""
es = editor_splitter_bot
assert es.orientation() == Qt.Horizontal
assert es.testAttribute(Qt.WA_DeleteOnClose)
assert not es.childrenCollapsible()
assert not es.toolbar_list
assert not es.menu_list
assert es.register_editorstack_cb == es.plugin.register_editorstack
assert es.unregister_editorstack_cb == es.plugin.unregister_editorstack
# No menu actions in parameter call.
assert not es.menu_actions
# EditorStack adds its own menu actions to the existing actions.
assert es.editorstack.menu_actions != []
assert isinstance(es.editorstack, EditorStack)
es.plugin.register_editorstack.assert_called_with(es.editorstack)
es.plugin.unregister_editorstack.assert_not_called()
es.plugin.clone_editorstack.assert_not_called()
assert es.count() == 1
assert es.widget(0) == es.editorstack
def test_close(qtbot):
"""Test the inteface for closing the editor splitters."""
# Split the main editorspliter once, than split the second editorsplitter
# twice.
es = editor_splitter_bot(qtbot)
es.split()
esw1 = es.widget(1)
esw1.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 1
esw1.split()
esw1w1 = esw1.widget(1)
esw1w1.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 2
assert esw1w1.count() == 1
esw1.split()
esw1w2 = esw1.widget(2)
esw1w2.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 3
assert esw1w1.count() == esw1w2.count() == 1
# Assert that all the editorsplitters are visible.
assert es.isVisible()
assert esw1.isVisible()
assert esw1w1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1 and assert that it is
# not destroyed because it still contains the editorsplitters esw1w1 and
# esw1w2.
with qtbot.waitSignal(esw1.editorstack.destroyed, timeout=1000):
esw1.editorstack.close_split()
assert es.count() == 2
assert esw1.count() == 2
assert esw1.editorstack is None
assert es.isVisible()
assert esw1.isVisible()
assert esw1w1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1w1, assert it is
# correctly destroyed afterwards on the Qt side and that it is correctly
# removed from the editorsplitter esw1.
with qtbot.waitSignal(esw1w1.destroyed, timeout=1000):
esw1w1.editorstack.close_split()
with pytest.raises(RuntimeError):
esw1w1.count()
assert es.count() == 2
assert esw1.count() == 1
assert es.isVisible()
assert esw1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1w2 and assert that
# editorsplitters esw1w2 AND esw1 are correctly destroyed afterward on
# the Qt side.
with qtbot.waitSignal(esw1.destroyed, timeout=1000):
esw1w2.editorstack.close_split()
with pytest.raises(RuntimeError):
esw1.count()
with pytest.raises(RuntimeError):
esw1w2.count()
assert es.isVisible()
assert es.count() == 1
# Test that the editorstack of the main editorsplitter es cannot be closed.
es.editorstack.close_split()
assert es.isVisible()
assert es.count() == 1
def test_split(editor_splitter_layout_bot):
"""Test split() that adds new splitters to this instance."""
es = editor_splitter_layout_bot
# Split main panel with default split.
es.split() # Call directly.
assert es.orientation() == Qt.Vertical
assert not es.editorstack.horsplit_action.isEnabled()
assert es.editorstack.versplit_action.isEnabled()
assert es.count() == 2
assert isinstance(es.widget(1), EditorSplitter)
# Each splitter gets its own editor stack as the first widget.
assert es.widget(1).count() == 1
assert es.widget(1).editorstack == es.widget(1).widget(0)
es.widget(1).plugin.clone_editorstack.assert_called_with(
editorstack=es.widget(1).editorstack)
# Create a horizontal split on original widget.
es.editorstack.sig_split_horizontally.emit() # Call from signal.
assert es.orientation() == Qt.Horizontal
assert es.editorstack.horsplit_action.isEnabled()
assert not es.editorstack.versplit_action.isEnabled()
assert es.count() == 3
assert isinstance(es.widget(2), EditorSplitter)
# Two splits have been created and each contains one EditorStack.
assert es.widget(1).count() == 1
assert es.widget(2).count() == 1
# Test splitting one of the children.
es1 = es.widget(1)
es1.editorstack.sig_split_vertically.emit()
assert es.orientation() == Qt.Horizontal # Main split didn't change.
assert es1.orientation() == Qt.Vertical # Child splitter.
assert not es1.editorstack.horsplit_action.isEnabled()
assert es1.editorstack.versplit_action.isEnabled()
assert es1.count() == 2
assert isinstance(es1.widget(0), EditorStack)
assert isinstance(es1.widget(1), EditorSplitter)
assert not es1.widget(1).isHidden()
def test_iter_editorstacks(editor_splitter_bot):
"""Test iter_editorstacks."""
es = editor_splitter_bot
es_iter = es.iter_editorstacks
# Check base splitter.
assert es_iter() == [(es.editorstack, es.orientation())]
# Split once.
es.split(Qt.Vertical)
esw1 = es.widget(1)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation())]
# Second splitter on base isn't iterated.
es.split(Qt.Horizontal)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation())]
# Split a child.
esw1.split(Qt.Vertical)
esw1w1 = es.widget(1).widget(1)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation()),
(esw1w1.editorstack, esw1w1.orientation())]
def test_get_layout_settings(editor_splitter_bot, qtbot, mocker):
"""Test get_layout_settings()."""
es = editor_splitter_bot
# Initial settings from setup.
setting = es.get_layout_settings()
assert setting['splitsettings'] == [(False, None, [])]
# Add some editors to patch output of iter_editorstacks.
stack1 = base_editor_bot(qtbot)[0]
stack1.new('foo.py', 'utf-8', 'a = 1\nprint(a)\n\nx = 2')
stack1.new('layout_test.py', 'utf-8', 'spam egg\n')
stack2 = base_editor_bot(qtbot)[0]
stack2.new('test.py', 'utf-8', 'test text')
mocker.patch.object(EditorSplitter, "iter_editorstacks")
EditorSplitter.iter_editorstacks.return_value = (
[(stack1, Qt.Vertical), (stack2, Qt.Horizontal)])
setting = es.get_layout_settings()
assert setting['hexstate']
assert setting['sizes'] == es.sizes()
assert setting['splitsettings'] == [(False, 'foo.py', [5, 3]),
(False, 'test.py', [2])]
def test_set_layout_settings_dont_goto(editor_splitter_layout_bot):
"""Test set_layout_settings()."""
es = editor_splitter_layout_bot
linecount = es.editorstack.data[2].editor.get_cursor_line_number()
# New layout to restore.
state = '000000ff000000010000000200000231000001ff00ffffffff010000000200'
sizes = [561, 511]
splitsettings = [(False, 'layout_test.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, __file__, [1, 1, 1])]
new_settings = {'hexstate': state,
'sizes': sizes,
'splitsettings': splitsettings}
# Current widget doesn't have saved settings applied.
get_settings = es.get_layout_settings()
assert es.count() == 1
assert get_settings['hexstate'] != state
assert get_settings['splitsettings'] != splitsettings
# Invalid settings value.
assert es.set_layout_settings({'spam': 'test'}) is None
# Restore layout with dont_goto set.
es.set_layout_settings(new_settings, dont_goto=True)
get_settings = es.get_layout_settings()
# Check that the panels were restored.
assert es.count() == 2 # One EditorStack and one EditorSplitter.
assert es.widget(1).count() == 2 # One EditorStack and one EditorSplitter.
assert es.widget(1).widget(1).count() == 1 # One EditorStack.
assert get_settings['hexstate'] == state
# All the lines for each tab and split are at the last line number.
assert get_settings['splitsettings'] == [(False, 'foo.py', [5, 2, linecount]),
(False, 'foo.py', [5, 2, linecount]),
(False, 'foo.py', [5, 2, linecount])]
def test_set_layout_settings_goto(editor_splitter_layout_bot):
"""Test set_layout_settings()."""
es = editor_splitter_layout_bot
# New layout to restore.
state = '000000ff000000010000000200000231000001ff00ffffffff010000000200'
sizes = [561, 511]
splitsettings = [(False, 'layout_test.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, __file__, [1, 1, 1])]
new_settings = {'hexstate': state,
'sizes': sizes,
'splitsettings': splitsettings}
# Restore layout without dont_goto, meaning it should position to the lines.
es.set_layout_settings(new_settings, dont_goto=None)
get_settings = es.get_layout_settings()
# Even though the original splitsettings had different file names
# selected, the current tab isn't restored in set_layout_settings().
# However, this shows that the current line was positioned for each tab
# and each split.
assert get_settings['splitsettings'] == [(False, 'foo.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, 'foo.py', [1, 1, 1])]
if __name__ == "__main__":
import os.path as osp
pytest.main(['-x', osp.basename(__file__), '-v', '-rw'])
| 35.032258
| 82
| 0.662397
|
ffcce6db40d793f7b49bd70be78ea121b04cbe7f
| 348
|
py
|
Python
|
q2_metadata/tests/__init__.py
|
FranckLejzerowicz/q2-metadata
|
55f303a3239a83b341f07d4d63c403e30fca72c8
|
[
"BSD-3-Clause"
] | null | null | null |
q2_metadata/tests/__init__.py
|
FranckLejzerowicz/q2-metadata
|
55f303a3239a83b341f07d4d63c403e30fca72c8
|
[
"BSD-3-Clause"
] | null | null | null |
q2_metadata/tests/__init__.py
|
FranckLejzerowicz/q2-metadata
|
55f303a3239a83b341f07d4d63c403e30fca72c8
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
| 49.714286
| 78
| 0.416667
|
51cc76a2a5fa891db1c3d67171ac8bff67f024eb
| 1,206
|
py
|
Python
|
setup.py
|
kmohrf/python-yeelight
|
483019c074556b4c3d2f665398f0fc308afd6274
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
setup.py
|
kmohrf/python-yeelight
|
483019c074556b4c3d2f665398f0fc308afd6274
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
setup.py
|
kmohrf/python-yeelight
|
483019c074556b4c3d2f665398f0fc308afd6274
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#!/usr/bin/env python
import sys
from setuptools import setup
assert sys.version >= "2.7", "Requires Python v2.7 or above."
with open("yeelight/version.py") as f:
exec(f.read())
classifiers = [
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
]
setup(
name="yeelight",
version=__version__, # type: ignore
author="Stavros Korokithakis",
author_email="hi@stavros.io",
url="https://gitlab.com/stavros/python-yeelight/",
description="A Python library for controlling YeeLight RGB bulbs.",
long_description=open("README.rst").read(),
license="BSD",
classifiers=classifiers,
packages=["yeelight"],
install_requires=["enum-compat", "future", 'pypiwin32;platform_system=="Windows"'],
test_suite="yeelight.tests",
tests_require=[],
)
| 30.15
| 87
| 0.657546
|
b325c618c4b457a537328c9efcc54294b1fc876c
| 7,408
|
py
|
Python
|
satchmo/apps/tax/modules/us_sst/management/commands/sst_import_boundry.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/tax/modules/us_sst/management/commands/sst_import_boundry.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/tax/modules/us_sst/management/commands/sst_import_boundry.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
import os
from datetime import date
# We don't actually need it, but otherwise livesettings.functions chokes.
import tax.config
from tax.modules.us_sst.models import TaxRate, TaxBoundry
from decimal import Decimal
def ash_split(arg, qty):
"""Unfortunately, states don't alwys publish the full SST fields in the
boundry files like they are required to. It's a shame really. So this function
will force a string to split to 'qty' fields, adding None values as needed to
get there.
"""
l = arg.split(',')
if len(l) < qty:
l.extend([None for x in xrange(qty-len(l))])
return l
CSV_MAP = (
'recordType', 'startDate', 'endDate',
'lowAddress', 'highAddress', 'oddEven',
'streetPreDirection', 'streetName', 'streetSuffix', 'streetPostDirection',
'addressSecondaryAbbr', 'addressSecondaryLow', 'addressSecondaryHigh', 'addressSecondaryOddEven',
'cityName', 'zipCode', 'plus4',
'zipCodeLow', 'zipExtensionLow', 'zipCodeHigh', 'zipExtensionHigh',
'serCode',
'fipsStateCode', 'fipsStateIndicator', 'fipsCountyCode', 'fipsPlaceCode', 'fipsPlaceType',
'long', 'lat',
'special_1_source', 'special_1_code', 'special_1_type',
'special_2_source', 'special_2_code', 'special_2_type',
'special_3_source', 'special_3_code', 'special_3_type',
'special_4_source', 'special_4_code', 'special_4_type',
'special_5_source', 'special_5_code', 'special_5_type',
'special_6_source', 'special_6_code', 'special_6_type',
'special_7_source', 'special_7_code', 'special_7_type',
'special_8_source', 'special_8_code', 'special_8_type',
'special_9_source', 'special_9_code', 'special_9_type',
'special_10_source', 'special_10_code', 'special_10_type',
'special_11_source', 'special_11_code', 'special_11_type',
'special_12_source', 'special_12_code', 'special_12_type',
'special_13_source', 'special_13_code', 'special_13_type',
'special_14_source', 'special_14_code', 'special_14_type',
'special_15_source', 'special_15_code', 'special_15_type',
'special_16_source', 'special_16_code', 'special_16_type',
'special_17_source', 'special_17_code', 'special_17_type',
'special_18_source', 'special_18_code', 'special_18_type',
'special_19_source', 'special_19_code', 'special_19_type',
'special_20_source', 'special_20_code', 'special_20_type',
)
# Some fields we're not using.
DELETE_FIELDS = (
'long', 'lat',
'special_1_source',
'special_2_source',
'special_3_source',
'special_4_source',
'special_5_source',
'special_6_source',
'special_7_source',
'special_8_source',
'special_9_source',
'special_10_source',
'special_11_source',
'special_12_source',
'special_13_source',
'special_14_source',
'special_15_source',
'special_16_source',
'special_17_source',
'special_18_source',
'special_19_source',
'special_20_source',
)
class Command(BaseCommand):
'''Manage command to import one of the CSV files from the SST website.
To update: Simple re-run on the newer CSV file.
Any unchanged entries will be left alone, and any changed ones will get
their end dates set properly and the new rows inserted. You will need to do
this quartly or as-needed by your tax jurisdictions.'''
help = "Imports a CSV boundary file from the SST website."
args = 'file'
def handle(self, *args, **options):
new = 0
updated = 0
unchanged = 0
total = 0
if not args:
raise CommandError("No file specified")
file = args[0]
if not os.path.isfile(file):
raise RuntimeError("File: %s is not a normal file or doesn't exist." % file)
file = open(file)
print "Processing: ",
for line in file:
line = line.strip()
#Z,20080701,99991231,,,,,,,,,,,,,,,00073,,00073,,EXTRA
fields = ash_split(line, len(CSV_MAP))
# Turn it all into a dict so we can search for a duplicate,
# Then remove the keys we don't care about or use.
d = dict([(x, fields.pop(0)) for x in CSV_MAP])
for v in DELETE_FIELDS:
del(d[v])
d['recordType'] = d['recordType'].upper()
d['startDate'] = date(int(d['startDate'][0:4]), int(d['startDate'][4:6]), int(d['startDate'][6:9]))
d['endDate'] = date(int(d['endDate'][0:4]), int(d['endDate'][4:6]), int(d['endDate'][6:9]))
# Empty strings are nulls.
for k in d.keys():
if d[k] == '':
d[k] = None
if d['recordType'] == 'A':
# For now, skip these, as they barely work.
# Zip+4 is the best way always. These are a bad idea in general.
continue
d['lowAddress'] = int(d['lowAddress'])
d['highAddress'] = int(d['highAddress'])
d['oddEven'] = d['oddEven'].upper()
d['addressSecondaryOddEven'] = d['addressSecondaryOddEven'].upper()
d['zipCode'] = int(d['zipCode'])
d['plus4'] = int(d['plus4'])
elif d['recordType'] == '4':
d['zipCodeLow'] = int(d['zipCodeLow'])
d['zipExtensionLow'] = int(d['zipExtensionLow'])
d['zipCodeHigh'] = int(d['zipCodeHigh'])
d['zipExtensionHigh'] = int(d['zipExtensionHigh'])
elif d['recordType'] == 'Z':
d['zipCodeLow'] = int(d['zipCodeLow'])
d['zipCodeHigh'] = int(d['zipCodeHigh'])
end = d['endDate']
del(d['endDate'])
try:
tb = TaxBoundry.objects.get(**d)
# Over time, end dates can change. A new row with a new start
# date will also appear. This way, loading a new file correctly
# updates the map. (I hope.)
if tb.endDate != end:
tb.endDate = end
tb.save()
total += 1
updated += 1
else:
total += 1
unchanged += 1
except TaxBoundry.DoesNotExist:
# Put the end back, and save it.
d['endDate'] = end
try:
TaxBoundry(**d).save()
total += 1
new += 1
except:
print "Error loading the following row:"
for k in CSV_MAP:
if k in d:
print "%s: '%s'" % (k, d[k])
raise
if total % 100 == 0:
print "%s," % total,
# Now, handle mapping boundries to rates.
#extra = SER,state_providing,state_taxed,County,Place,Class,Long,Lat, (ST/VD,Special Code,Special Type,) x 20
# IF SER, then the tax module should report all sales taxes by that SER code.
# Otherwise, report it by each applicable tax.
# Total tax is still the same in both cases. Just the state wants it reported differently.
print ""
print "Done: New: %d. End date changed: %d. Unchanged: %d" % (new, updated, unchanged)
| 40.928177
| 121
| 0.579239
|
86e437e6098f980a03657771c4674c47a9b8111d
| 1,048
|
py
|
Python
|
docs/cornell CS class/lesson 22.Subclasses &Inheritance/demos/powpoint.py
|
LizzieDeng/kalman_fliter_analysis
|
50e728f32c496c3fcbb8ca3ee00857b999b88d99
|
[
"MIT"
] | null | null | null |
docs/cornell CS class/lesson 22.Subclasses &Inheritance/demos/powpoint.py
|
LizzieDeng/kalman_fliter_analysis
|
50e728f32c496c3fcbb8ca3ee00857b999b88d99
|
[
"MIT"
] | null | null | null |
docs/cornell CS class/lesson 22.Subclasses &Inheritance/demos/powpoint.py
|
LizzieDeng/kalman_fliter_analysis
|
50e728f32c496c3fcbb8ca3ee00857b999b88d99
|
[
"MIT"
] | null | null | null |
"""
A module of class stubs to show off subclasses
This module does not do anything. It is simply mean to
accompany the lesson video introducing subclassing.
Author: Walker M. White
Date: October 6, 2020
"""
class SlideContent(object):
"""Class representing content on a slide"""
def __init__(self,x,y,w,h):
"""Initialize the slide content"""
pass
def draw_frame(self):
"""Draw the frame (handles use to resize)"""
pass
def select(self):
"""Show the frame when object clicked"""
pass
class TextBox(SlideContent):
"""Class representing text on a slide"""
def __init__(self,x,y,text):
"""Initialize the text box"""
pass
def draw(self):
"""Draw the text contents (not the frame)"""
pass
class Image(SlideContent):
"""Class representing an image on a slide"""
def __init__(self,x,y,image_file):
"""Initialize the image"""
pass
def draw(self):
"""Draw the image (not the frame)"""
pass
| 21.833333
| 54
| 0.614504
|
d94cb8df7672bff26d91c608104db55018b88120
| 54,790
|
py
|
Python
|
launchdarkly_api/api/environments_api.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 6
|
2020-02-06T20:17:25.000Z
|
2021-12-28T20:13:34.000Z
|
launchdarkly_api/api/environments_api.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 7
|
2019-02-18T21:51:47.000Z
|
2021-09-03T17:49:33.000Z
|
launchdarkly_api/api/environments_api.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 6
|
2019-08-02T16:10:31.000Z
|
2021-05-23T17:47:03.000Z
|
# -*- coding: utf-8 -*-
"""
LaunchDarkly REST API
# Overview ## Authentication All REST API resources are authenticated with either [personal or service access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens), or session cookies. Other authentication mechanisms are not supported. You can manage personal access tokens on your [Account settings](https://app.launchdarkly.com/settings/tokens) page. LaunchDarkly also has SDK keys, mobile keys, and client-side IDs that are used by our server-side SDKs, mobile SDKs, and client-side SDKs, respectively. **These keys cannot be used to access our REST API**. These keys are environment-specific, and can only perform read-only operations (fetching feature flag settings). | Auth mechanism | Allowed resources | Use cases | | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------------------------------------------------- | | [Personal access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens) | Can be customized on a per-token basis | Building scripts, custom integrations, data export | | SDK keys | Can only access read-only SDK-specific resources and the firehose, restricted to a single environment | Server-side SDKs, Firehose API | | Mobile keys | Can only access read-only mobile SDK-specific resources, restricted to a single environment | Mobile SDKs | | Client-side ID | Single environment, only flags marked available to client-side | Client-side JavaScript | > #### Keep your access tokens and SDK keys private > > Access tokens should _never_ be exposed in untrusted contexts. Never put an access token in client-side JavaScript, or embed it in a mobile application. LaunchDarkly has special mobile keys that you can embed in mobile apps. If you accidentally expose an access token or SDK key, you can reset it from your [Account Settings](https://app.launchdarkly.com/settings#/tokens) page. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Via request header The preferred way to authenticate with the API is by adding an `Authorization` header containing your access token to your requests. The value of the `Authorization` header must be your access token. Manage personal access tokens from the [Account Settings](https://app.launchdarkly.com/settings/tokens) page. ### Via session cookie For testing purposes, you can make API calls directly from your web browser. If you're logged in to the application, the API will use your existing session to authenticate calls. If you have a [role](https://docs.launchdarkly.com/home/team/built-in-roles) other than Admin, or have a [custom role](https://docs.launchdarkly.com/home/team/custom-roles) defined, you may not have permission to perform some API calls. You will receive a `401` response code in that case. > ### Modifying the Origin header causes an error > > LaunchDarkly validates that the Origin header for any API request authenticated by a session cookie matches the expected Origin header. The expected Origin header is `https://app.launchdarkly.com`. > > If the Origin header does not match what's expected, LaunchDarkly returns an error. This error can prevent the LaunchDarkly app from working correctly. > > Any browser extension that intentionally changes the Origin header can cause this problem. For example, the `Allow-Control-Allow-Origin: *` Chrome extension changes the Origin header to `http://evil.com` and causes the app to fail. > > To prevent this error, do not modify your Origin header. > > LaunchDarkly does not require origin matching when authenticating with an access token, so this issue does not affect normal API usage. ## Representations All resources expect and return JSON response bodies. Error responses will also send a JSON body. Read [Errors](#section/Errors) for a more detailed description of the error format used by the API. In practice this means that you always get a response with a `Content-Type` header set to `application/json`. In addition, request bodies for `PUT`, `POST`, `REPORT` and `PATCH` requests must be encoded as JSON with a `Content-Type` header set to `application/json`. ### Summary and detailed representations When you fetch a list of resources, the response includes only the most important attributes of each resource. This is a _summary representation_ of the resource. When you fetch an individual resource (for example, a single feature flag), you receive a _detailed representation_ containing all of the attributes of the resource. The best way to find a detailed representation is to follow links. Every summary representation includes a link to its detailed representation. ### Links and addressability The best way to navigate the API is by following links. These are attributes in representations that link to other resources. The API always uses the same format for links: - Links to other resources within the API are encapsulated in a `_links` object. - If the resource has a corresponding link to HTML content on the site, it is stored in a special `_site` link. Each link has two attributes: an href (the URL) and a type (the content type). For example, a feature resource might return the following: ```json { \"_links\": { \"parent\": { \"href\": \"/api/features\", \"type\": \"application/json\" }, \"self\": { \"href\": \"/api/features/sort.order\", \"type\": \"application/json\" } }, \"_site\": { \"href\": \"/features/sort.order\", \"type\": \"text/html\" } } ``` From this, you can navigate to the parent collection of features by following the `parent` link, or navigate to the site page for the feature by following the `_site` link. Collections are always represented as a JSON object with an `items` attribute containing an array of representations. Like all other representations, collections have `_links` defined at the top level. Paginated collections include `first`, `last`, `next`, and `prev` links containing a URL with the respective set of elements in the collection. ## Updates Resources that accept partial updates use the `PATCH` verb, and support the [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) format. Some resources also support the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format. In addition, some resources support optional comments that can be submitted with updates. Comments appear in outgoing webhooks, the audit log, and other integrations. ### Updates via JSON Patch [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) is a way to specify the modifications to perform on a resource. For example, in this feature flag representation: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"description\": \"This is the description\", ... } ``` You can change the feature flag's description with the following patch document: ```json [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"This is the new description\" }] ``` JSON Patch documents are always arrays. You can specify multiple modifications to perform in a single request. You can also test that certain preconditions are met before applying the patch: ```json [ { \"op\": \"test\", \"path\": \"/version\", \"value\": 10 }, { \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" } ] ``` The above patch request tests whether the feature flag's `version` is `10`, and if so, changes the feature flag's description. Attributes that aren't editable, like a resource's `_links`, have names that start with an underscore. ### Updates via JSON Merge Patch The API also supports the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format, as well as the [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource. JSON Merge Patch is less expressive than JSON Patch but in many cases, it is simpler to construct a merge patch document. For example, you can change a feature flag's description with the following merge patch document: ```json { \"description\": \"New flag description\" } ``` ### Updates with comments You can submit optional comments with `PATCH` changes. The [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource supports comments. To submit a comment along with a JSON Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"patch\": [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" }] } ``` To submit a comment along with a JSON Merge Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"merge\": { \"description\": \"New flag description\" } } ``` ### Updates via semantic patches The API also supports the Semantic patch format. A semantic `PATCH` is a way to specify the modifications to perform on a resource as a set of executable instructions. JSON Patch uses paths and a limited set of operations to describe how to transform the current state of the resource into a new state. Semantic patch allows you to be explicit about intent using precise, custom instructions. In many cases, semantic patch instructions can also be defined independently of the current state of the resource. This can be useful when defining a change that may be applied at a future date. For example, in this feature flag configuration in environment Production: ```json { \"name\": \"Alternate sort order\", \"kind\": \"boolean\", \"key\": \"sort.order\", ... \"environments\": { \"production\": { \"on\": true, \"archived\": false, \"salt\": \"c29ydC5vcmRlcg==\", \"sel\": \"8de1085cb7354b0ab41c0e778376dfd3\", \"lastModified\": 1469131558260, \"version\": 81, \"targets\": [ { \"values\": [ \"Gerhard.Little@yahoo.com\" ], \"variation\": 0 }, { \"values\": [ \"1461797806429-33-861961230\", \"438580d8-02ee-418d-9eec-0085cab2bdf0\" ], \"variation\": 1 } ], \"rules\": [], \"fallthrough\": { \"variation\": 0 }, \"offVariation\": 1, \"prerequisites\": [], \"_site\": { \"href\": \"/default/production/features/sort.order\", \"type\": \"text/html\" } } } } ``` You can add a date you want a user to be removed from the feature flag's user targets. For example, “remove user 1461797806429-33-861961230 from the user target for variation 0 on the Alternate sort order flag in the production environment on Wed Jul 08 2020 at 15:27:41 pm”. This is done using the following: ```json { \"comment\": \"update expiring user targets\", \"instructions\": [ { \"kind\": \"removeExpireUserTargetDate\", \"userKey\": \"userKey\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\" }, { \"kind\": \"updateExpireUserTargetDate\", \"userKey\": \"userKey2\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1587582000000 }, { \"kind\": \"addExpireUserTargetDate\", \"userKey\": \"userKey3\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1594247266386 } ] } ``` Here is another example. In this feature flag configuration: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"environments\": { \"test\": { \"on\": true } } } ``` You can change the feature flag's description with the following patch document as a set of executable instructions. For example, “add user X to targets for variation Y and remove user A from targets for variation B for test flag”: ```json { \"comment\": \"\", \"instructions\": [ { \"kind\": \"removeUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"852cb784-54ff-46b9-8c35-5498d2e4f270\" }, { \"kind\": \"addUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"1bb18465-33b6-49aa-a3bd-eeb6650b33ad\" } ] } ``` > ### Supported semantic patch API endpoints > > - [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) > - [Update expiring user targets on feature flag](/tag/Feature-flags#operation/patchExpiringUserTargets) > - [Update expiring user target for flags](/tag/User-settings#operation/patchExpiringFlagsForUser) > - [Update expiring user targets on segment](/tag/Segments#operation/patchExpiringUserTargetsForSegment) ## Errors The API always returns errors in a common format. Here's an example: ```json { \"code\": \"invalid_request\", \"message\": \"A feature with that key already exists\", \"id\": \"30ce6058-87da-11e4-b116-123b93f75cba\" } ``` The general class of error is indicated by the `code`. The `message` is a human-readable explanation of what went wrong. The `id` is a unique identifier. Use it when you're working with LaunchDarkly support to debug a problem with a specific API call. ### HTTP Status - Error Response Codes | Code | Definition | Desc. | Possible Solution | | ---- | ----------------- | ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | | 400 | Bad Request | A request that fails may return this HTTP response code. | Ensure JSON syntax in request body is correct. | | 401 | Unauthorized | User doesn't have permission to an API call. | Ensure your SDK key is good. | | 403 | Forbidden | User does not have permission for operation. | Ensure that the user or access token has proper permissions set. | | 409 | Conflict | The API request could not be completed because it conflicted with a concurrent API request. | Retry your request. | | 429 | Too many requests | See [Rate limiting](/#section/Rate-limiting). | Wait and try again later. | ## CORS The LaunchDarkly API supports Cross Origin Resource Sharing (CORS) for AJAX requests from any origin. If an `Origin` header is given in a request, it will be echoed as an explicitly allowed origin. Otherwise, a wildcard is returned: `Access-Control-Allow-Origin: *`. For more information on CORS, see the [CORS W3C Recommendation](http://www.w3.org/TR/cors). Example CORS headers might look like: ```http Access-Control-Allow-Headers: Accept, Content-Type, Content-Length, Accept-Encoding, Authorization Access-Control-Allow-Methods: OPTIONS, GET, DELETE, PATCH Access-Control-Allow-Origin: * Access-Control-Max-Age: 300 ``` You can make authenticated CORS calls just as you would make same-origin calls, using either [token or session-based authentication](#section/Authentication). If you’re using session auth, you should set the `withCredentials` property for your `xhr` request to `true`. You should never expose your access tokens to untrusted users. ## Rate limiting We use several rate limiting strategies to ensure the availability of our APIs. Rate-limited calls to our APIs will return a `429` status code. Calls to our APIs will include headers indicating the current rate limit status. The specific headers returned depend on the API route being called. The limits differ based on the route, authentication mechanism, and other factors. Routes that are not rate limited may not contain any of the headers described below. > ### Rate limiting and SDKs > > LaunchDarkly SDKs are never rate limited and do not use the API endpoints defined here. LaunchDarkly uses a different set of approaches, including streaming/server-sent events and a global CDN, to ensure availability to the routes used by LaunchDarkly SDKs. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Global rate limits Authenticated requests are subject to a global limit. This is the maximum number of calls that can be made to the API per ten seconds. All personal access tokens on the account share this limit, so exceeding the limit with one access token will impact other tokens. Calls that are subject to global rate limits will return the headers below: | Header name | Description | | ------------------------------ | -------------------------------------------------------------------------------- | | `X-Ratelimit-Global-Remaining` | The maximum number of requests the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | We do not publicly document the specific number of calls that can be made globally. This limit may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limit. ### Route-level rate limits Some authenticated routes have custom rate limits. These also reset every ten seconds. Any access tokens hitting the same route share this limit, so exceeding the limit with one access token may impact other tokens. Calls that are subject to route-level rate limits will return the headers below: | Header name | Description | | ----------------------------- | ----------------------------------------------------------------------------------------------------- | | `X-Ratelimit-Route-Remaining` | The maximum number of requests to the current route the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | A _route_ represents a specific URL pattern and verb. For example, the [Delete environment](/tag/Environments#operation/deleteEnvironment) endpoint is considered a single route, and each call to delete an environment counts against your route-level rate limit for that route. We do not publicly document the specific number of calls that can be made to each endpoint per ten seconds. These limits may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limits. ### IP-based rate limiting We also employ IP-based rate limiting on some API routes. If you hit an IP-based rate limit, your API response will include a `Retry-After` header indicating how long to wait before re-trying the call. Clients must wait at least `Retry-After` seconds before making additional calls to our API, and should employ jitter and backoff strategies to avoid triggering rate limits again. ## OpenAPI (Swagger) We have a [complete OpenAPI (Swagger) specification](https://app.launchdarkly.com/api/v2/openapi.json) for our API. You can use this specification to generate client libraries to interact with our REST API in your language of choice. This specification is supported by several API-based tools such as Postman and Insomnia. In many cases, you can directly import our specification to ease use in navigating the APIs in the tooling. ## Client libraries We auto-generate multiple client libraries based on our OpenAPI specification. To learn more, visit [GitHub](https://github.com/search?q=topic%3Alaunchdarkly-api+org%3Alaunchdarkly&type=Repositories). ## Method Overriding Some firewalls and HTTP clients restrict the use of verbs other than `GET` and `POST`. In those environments, our API endpoints that use `PUT`, `PATCH`, and `DELETE` verbs will be inaccessible. To avoid this issue, our API supports the `X-HTTP-Method-Override` header, allowing clients to \"tunnel\" `PUT`, `PATCH`, and `DELETE` requests via a `POST` request. For example, if you wish to call one of our `PATCH` resources via a `POST` request, you can include `X-HTTP-Method-Override:PATCH` as a header. ## Beta resources We sometimes release new API resources in **beta** status before we release them with general availability. Resources that are in beta are still undergoing testing and development. They may change without notice, including becoming backwards incompatible. We try to promote resources into general availability as quickly as possible. This happens after sufficient testing and when we're satisfied that we no longer need to make backwards-incompatible changes. We mark beta resources with a \"Beta\" callout in our documentation, pictured below: > ### This feature is in beta > > To use this feature, pass in a header including the `LD-API-Version` key with value set to `beta`. Use this header with each call. To learn more, read [Beta resources](/#section/Beta-resources). ### Using beta resources To use a beta resource, you must include a header in the request. If you call a beta resource without this header, you'll receive a `403` response. Use this header: ``` LD-API-Version: beta ``` ## Versioning We try hard to keep our REST API backwards compatible, but we occasionally have to make backwards-incompatible changes in the process of shipping new features. These breaking changes can cause unexpected behavior if you don't prepare for them accordingly. Updates to our REST API include support for the latest features in LaunchDarkly. We also release a new version of our REST API every time we make a breaking change. We provide simultaneous support for multiple API versions so you can migrate from your current API version to a new version at your own pace. ### Setting the API version per request You can set the API version on a specific request by sending an `LD-API-Version` header, as shown in the example below: ``` LD-API-Version: 20191212 ``` The header value is the version number of the API version you'd like to request. The number for each version corresponds to the date the version was released. In the example above the version `20191212` corresponds to December 12, 2019. ### Setting the API version per access token When creating an access token, you must specify a specific version of the API to use. This ensures that integrations using this token cannot be broken by version changes. Tokens created before versioning was released have their version set to `20160426` (the version of the API that existed before versioning) so that they continue working the same way they did before versioning. If you would like to upgrade your integration to use a new API version, you can explicitly set the header described above. > ### Best practice: Set the header for every client or integration > > We recommend that you set the API version header explicitly in any client or integration you build. > > Only rely on the access token API version during manual testing. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: support@launchdarkly.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from launchdarkly_api.api_client import ApiClient, Endpoint as _Endpoint
from launchdarkly_api.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from launchdarkly_api.model.environment import Environment
from launchdarkly_api.model.environment_post import EnvironmentPost
from launchdarkly_api.model.forbidden_error_rep import ForbiddenErrorRep
from launchdarkly_api.model.invalid_request_error_rep import InvalidRequestErrorRep
from launchdarkly_api.model.json_patch import JSONPatch
from launchdarkly_api.model.not_found_error_rep import NotFoundErrorRep
from launchdarkly_api.model.rate_limited_error_rep import RateLimitedErrorRep
from launchdarkly_api.model.status_conflict_error_rep import StatusConflictErrorRep
from launchdarkly_api.model.unauthorized_error_rep import UnauthorizedErrorRep
class EnvironmentsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.delete_environment_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'ApiKey'
],
'endpoint_path': '/api/v2/projects/{projectKey}/environments/{environmentKey}',
'operation_id': 'delete_environment',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_key',
'environment_key',
],
'required': [
'project_key',
'environment_key',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_key':
(str,),
'environment_key':
(str,),
},
'attribute_map': {
'project_key': 'projectKey',
'environment_key': 'environmentKey',
},
'location_map': {
'project_key': 'path',
'environment_key': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_environment_endpoint = _Endpoint(
settings={
'response_type': (Environment,),
'auth': [
'ApiKey'
],
'endpoint_path': '/api/v2/projects/{projectKey}/environments/{environmentKey}',
'operation_id': 'get_environment',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_key',
'environment_key',
],
'required': [
'project_key',
'environment_key',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_key':
(str,),
'environment_key':
(str,),
},
'attribute_map': {
'project_key': 'projectKey',
'environment_key': 'environmentKey',
},
'location_map': {
'project_key': 'path',
'environment_key': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.patch_environment_endpoint = _Endpoint(
settings={
'response_type': (Environment,),
'auth': [
'ApiKey'
],
'endpoint_path': '/api/v2/projects/{projectKey}/environments/{environmentKey}',
'operation_id': 'patch_environment',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_key',
'environment_key',
'json_patch',
],
'required': [
'project_key',
'environment_key',
'json_patch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_key':
(str,),
'environment_key':
(str,),
'json_patch':
(JSONPatch,),
},
'attribute_map': {
'project_key': 'projectKey',
'environment_key': 'environmentKey',
},
'location_map': {
'project_key': 'path',
'environment_key': 'path',
'json_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.post_environment_endpoint = _Endpoint(
settings={
'response_type': (Environment,),
'auth': [
'ApiKey'
],
'endpoint_path': '/api/v2/projects/{projectKey}/environments',
'operation_id': 'post_environment',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_key',
'environment_post',
],
'required': [
'project_key',
'environment_post',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_key':
(str,),
'environment_post':
(EnvironmentPost,),
},
'attribute_map': {
'project_key': 'projectKey',
},
'location_map': {
'project_key': 'path',
'environment_post': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.reset_environment_mobile_key_endpoint = _Endpoint(
settings={
'response_type': (Environment,),
'auth': [
'ApiKey'
],
'endpoint_path': '/api/v2/projects/{projectKey}/environments/{envKey}/mobileKey',
'operation_id': 'reset_environment_mobile_key',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_key',
'env_key',
],
'required': [
'project_key',
'env_key',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_key':
(str,),
'env_key':
(str,),
},
'attribute_map': {
'project_key': 'projectKey',
'env_key': 'envKey',
},
'location_map': {
'project_key': 'path',
'env_key': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.reset_environment_sdk_key_endpoint = _Endpoint(
settings={
'response_type': (Environment,),
'auth': [
'ApiKey'
],
'endpoint_path': '/api/v2/projects/{projectKey}/environments/{envKey}/apiKey',
'operation_id': 'reset_environment_sdk_key',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_key',
'env_key',
'expiry',
],
'required': [
'project_key',
'env_key',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_key':
(str,),
'env_key':
(str,),
'expiry':
(int,),
},
'attribute_map': {
'project_key': 'projectKey',
'env_key': 'envKey',
'expiry': 'expiry',
},
'location_map': {
'project_key': 'path',
'env_key': 'path',
'expiry': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def delete_environment(
self,
project_key,
environment_key,
**kwargs
):
"""Delete environment # noqa: E501
Delete a environment by key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_environment(project_key, environment_key, async_req=True)
>>> result = thread.get()
Args:
project_key (str): The project key
environment_key (str): The environment key
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_key'] = \
project_key
kwargs['environment_key'] = \
environment_key
return self.delete_environment_endpoint.call_with_http_info(**kwargs)
def get_environment(
self,
project_key,
environment_key,
**kwargs
):
"""Get environment # noqa: E501
> ### Approval settings > > The `approvalSettings` key is only returned when the Flag Approvals feature is enabled. Get an environment given a project and key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_environment(project_key, environment_key, async_req=True)
>>> result = thread.get()
Args:
project_key (str): The project key
environment_key (str): The environment key
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Environment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_key'] = \
project_key
kwargs['environment_key'] = \
environment_key
return self.get_environment_endpoint.call_with_http_info(**kwargs)
def patch_environment(
self,
project_key,
environment_key,
json_patch,
**kwargs
):
"""Update environment # noqa: E501
> ### Approval settings > > The `approvalSettings` key is only returned when the Flag Approvals feature is enabled. > > Only the `canReviewOwnRequest`, `canApplyDeclinedChanges`, `minNumApprovals`, `required` and `requiredApprovalTagsfields` are editable. > > If you try to patch the environment by setting both `required` and `requiredApprovalTags`, it fails and an error appears. Users can specify either required approvals for all flags in an environment or those with specific tags, but not both. Only customers on an Enterprise plan can require approval for flag updates by either mechanism. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_environment(project_key, environment_key, json_patch, async_req=True)
>>> result = thread.get()
Args:
project_key (str): The project key
environment_key (str): The environment key
json_patch (JSONPatch):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Environment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_key'] = \
project_key
kwargs['environment_key'] = \
environment_key
kwargs['json_patch'] = \
json_patch
return self.patch_environment_endpoint.call_with_http_info(**kwargs)
def post_environment(
self,
project_key,
environment_post,
**kwargs
):
"""Create environment # noqa: E501
> ### Approval settings > > The `approvalSettings` key is only returned when the Flag Approvals feature is enabled. > > You cannot update approval settings when creating new environments. Update approval settings with the PATCH Environment API. Create a new environment in a specified project with a given name, key, swatch color, and default TTL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_environment(project_key, environment_post, async_req=True)
>>> result = thread.get()
Args:
project_key (str): The project key
environment_post (EnvironmentPost):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Environment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_key'] = \
project_key
kwargs['environment_post'] = \
environment_post
return self.post_environment_endpoint.call_with_http_info(**kwargs)
def reset_environment_mobile_key(
self,
project_key,
env_key,
**kwargs
):
"""Reset environment mobile SDK key # noqa: E501
Reset an environment's mobile key. The optional expiry for the old key is deprecated for this endpoint, so the old key will always expire immediately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_environment_mobile_key(project_key, env_key, async_req=True)
>>> result = thread.get()
Args:
project_key (str): The project key
env_key (str): The environment key
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Environment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_key'] = \
project_key
kwargs['env_key'] = \
env_key
return self.reset_environment_mobile_key_endpoint.call_with_http_info(**kwargs)
def reset_environment_sdk_key(
self,
project_key,
env_key,
**kwargs
):
"""Reset environment SDK key # noqa: E501
Reset an environment's SDK key with an optional expiry time for the old key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_environment_sdk_key(project_key, env_key, async_req=True)
>>> result = thread.get()
Args:
project_key (str): The project key
env_key (str): The environment key
Keyword Args:
expiry (int): The time at which you want the old SDK key to expire, in UNIX milliseconds. By default, the key expires immediately.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Environment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_key'] = \
project_key
kwargs['env_key'] = \
env_key
return self.reset_environment_sdk_key_endpoint.call_with_http_info(**kwargs)
| 66.012048
| 24,467
| 0.577989
|
a6c9268a6c555368ded9d901e7176300104dbc30
| 650
|
py
|
Python
|
msgboard/migrations/0001_initial.py
|
OfirMatasas/beyond-tutorial
|
898b5bf03dc4fba3e71eba114dce2587d330063a
|
[
"MIT"
] | null | null | null |
msgboard/migrations/0001_initial.py
|
OfirMatasas/beyond-tutorial
|
898b5bf03dc4fba3e71eba114dce2587d330063a
|
[
"MIT"
] | null | null | null |
msgboard/migrations/0001_initial.py
|
OfirMatasas/beyond-tutorial
|
898b5bf03dc4fba3e71eba114dce2587d330063a
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-12 17:20
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 26
| 117
| 0.586154
|
01fab7fcd8917dc62b8cbb7ed9adc8ec53169834
| 1,255
|
py
|
Python
|
molecule/pyaes/src/index.py
|
Molecule-Serverless/serverless-faas-workbench
|
296f5f2709b3e7b168db00473d0a28b8bf1c062f
|
[
"Apache-2.0"
] | 1
|
2021-12-11T00:49:00.000Z
|
2021-12-11T00:49:00.000Z
|
molecule/pyaes/src/index.py
|
Molecule-Serverless/serverless-faas-workbench
|
296f5f2709b3e7b168db00473d0a28b8bf1c062f
|
[
"Apache-2.0"
] | null | null | null |
molecule/pyaes/src/index.py
|
Molecule-Serverless/serverless-faas-workbench
|
296f5f2709b3e7b168db00473d0a28b8bf1c062f
|
[
"Apache-2.0"
] | null | null | null |
from time import time
import random
import string
import pyaes
def generate(length):
letters = string.ascii_lowercase + string.digits
return ''.join(random.choice(letters) for i in range(length))
def handler(event):
length_of_message = event['length_of_message']
num_of_iterations = event['num_of_iterations']
message = generate(length_of_message)
# 128-bit key (16 bytes)
KEY = b'\xa1\xf6%\x8c\x87}_\xcd\x89dHE8\xbf\xc9,'
start = time()
for loops in range(num_of_iterations):
aes = pyaes.AESModeOfOperationCTR(KEY)
ciphertext = aes.encrypt(message)
print(ciphertext)
aes = pyaes.AESModeOfOperationCTR(KEY)
plaintext = aes.decrypt(ciphertext)
print(plaintext)
aes = None
latency = time() - start
return latency
def invokeHandler():
startTime = int(round(time() * 1000))
ret = handler({'length_of_message': 256, 'num_of_iterations':10})
retTime = int(round(time() * 1000))
output = {'results': ret,
'startTime': startTime,
'retTime' : retTime,
'invokeTime': startTime
}
logf = open("log.txt", "w")
logf.write(str(output))
print(output)
if __name__ == "__main__":
invokeHandler()
| 24.134615
| 69
| 0.646215
|
a6351a0638f8639329c879c7b9f0f2f1909f8288
| 1,640
|
py
|
Python
|
offline/wallet/generate_wallets.py
|
jgeofil/avax-python
|
b09e78e3d7e1c35db5ae42e3918e960e775f2d45
|
[
"MIT"
] | 25
|
2021-05-16T23:43:47.000Z
|
2022-03-29T03:08:30.000Z
|
offline/wallet/generate_wallets.py
|
zefonseca/ava-python
|
9c72af7c720edfab9c73379a102cf6a11d864ebd
|
[
"MIT"
] | 2
|
2021-04-26T11:43:22.000Z
|
2021-06-04T07:55:22.000Z
|
offline/wallet/generate_wallets.py
|
jgeofil/avax-python
|
b09e78e3d7e1c35db5ae42e3918e960e775f2d45
|
[
"MIT"
] | 4
|
2021-08-06T10:55:58.000Z
|
2022-03-29T08:03:05.000Z
|
#!/usr/bin/python3
# avax-python : Python tools for the exploration of the Avalanche AVAX network.
#
# Documentation at https://crypto.bi
"""
Copyright © 2021 ojrdev
Support this Open Source project!
Donate to X-avax1qr6yzjykcjmeflztsgv6y88dl0xnlel3chs3r4
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# --#--#--
# Generate random AVAX wallet phrases
# Usage: generate_wallets.py <NUM> to generate NUM wallets, one per line.
from avaxpython.wallet import generator
import sys
num_wallets = 1
key_strength=256
if len(sys.argv) == 2:
num_wallets = int(sys.argv[1])
for words in generator.generate(num_wallets, key_strength):
print(words)
| 43.157895
| 463
| 0.781707
|
8c89a994629b5d83c845d0a8b891a46d03a7df49
| 12,438
|
py
|
Python
|
natnet.py
|
pg1647/self_supervised_rotnet
|
5edfa31dd7b2dde8e1d093037c6c8c9745ba8f71
|
[
"MIT"
] | null | null | null |
natnet.py
|
pg1647/self_supervised_rotnet
|
5edfa31dd7b2dde8e1d093037c6c8c9745ba8f71
|
[
"MIT"
] | null | null | null |
natnet.py
|
pg1647/self_supervised_rotnet
|
5edfa31dd7b2dde8e1d093037c6c8c9745ba8f71
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torchvision
from torchvision import datasets, transforms
import torch.nn.functional as F
import torch.optim as optim
import models as mdl
import numpy as np
import argparse
import pickle
import os
import datetime
import time
import math
import shutil
from scipy.optimize import linear_sum_assignment
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255, 66.7/255))])
test_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255, 66.7/255))])
class MyTrainset(Dataset):
def __init__(self):
self.cifar10 = datasets.CIFAR10(root='.',download=True,train=True,transform=train_transform)
def __getitem__(self, index):
data, target = self.cifar10[index]
# Your transformations here (or set it in CIFAR10)
return data, target, index
def __len__(self):
return len(self.cifar10)
class MyTestset(Dataset):
def __init__(self):
self.cifar10 = datasets.CIFAR10(root='.',download=True,train=False,transform=test_transform)
def __getitem__(self, index):
data, target = self.cifar10[index]
# Your transformations here (or set it in CIFAR10)
return data, target, index
def __len__(self):
return len(self.cifar10)
def get_parser():
parser = argparse.ArgumentParser(description='Scalable Compression')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=1,
help='number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.1,
help='learning rate (default: 0.1)')
parser.add_argument('--device', type=str, default='cuda',
help='Device to be used (acceptable values: cuda, cpu) (default: cuda)')
parser.add_argument('--milestones', nargs="+", type=int, default=[30,60,80],
help='Milestones for learning rate decay (default: [30, 60, 80])')
# not sure if I need
parser.add_argument('--model', type=str, default='natnet',
help='model choise (acceptable values: rotnet, supervised, rot-nonlinear, rot-conv ) (default: rotnet)')
parser.add_argument('--nins', type=int, default=4,
help='number of nin blocks to comprise the model (default: 4)')
# not sure if I need
parser.add_argument('--layer', type=int, default=2,
help='rotnet layer to take features from to use for classifier (default: 2)')
parser.add_argument('--opt', type=str, default='sgd',
help='Optimizer to be used (acceptable values: sgd, adam) (default: sgd)')
parser.add_argument('--momentum', type=float, default=0.9,
help='Momentum for optimizer (default: 0.1)')
parser.add_argument('--weight_decay', default=5e-4, type=float)
parser.add_argument('--print_after_batches', type=int, default=100,
help='Print training progress every print_after_batches batches (default: 2)')
parser.add_argument('--results_dir', default='results/', type=str)
parser.add_argument('--suffix', default='', type=str,
help="When I need to custom name the final results folder, must begin with _")
parser.add_argument('--epochs_to_save', nargs="+", type=int, default=[100],
help='List of epochs to save (default: [100])')
parser.add_argument('--d', type=int, default=10,
help='d size (default: [10])')
return parser
parser = get_parser()
args = parser.parse_args()
out_size = args.d
device = args.device
c = torch.from_numpy(np.random.normal(0, 1, [50000, out_size]).astype(np.float32)).to(device)
cnorm = torch.linalg.norm(c, dim=1).reshape(-1,1)
c = c/cnorm
p = torch.from_numpy(np.zeros([50000,1]).astype(np.int)).to(device)
for i in range(p.size()[0]):
p[i] = i
def train(args, network, train_loader, optimizer, mult, scheduler, epoch):
network.train()
total_images_till_now = 0
total_images = len(train_loader.dataset)*mult
for batch_idx, (data, target, indx) in enumerate(train_loader):
data, target, indx = Variable(data).to(device), Variable(target).to(device), Variable(indx).to(device)
optimizer.zero_grad()
output, _, _ = network(data)
output = output/torch.linalg.norm(output)
pbatch = p[indx]
if epoch % 3 == 0:
cost = np.zeros([output.size()[0], output.size()[0]]).astype(np.float32)
for i in range(output.size()[0]):
for j in range(output.size()[1]):
cost[i][j] = torch.linalg.norm(output[i]-c[p[indx[j]]])
optind = linear_sum_assignment(cost)
for i in range(len(optind)):
p[indx[optind[i][0]]] = pbatch[optind[i][1]]
y = c[p[indx]].reshape(-1,out_size).to(device)
lost = torch.nn.MSELoss()
loss = lost(output, y)
loss.backward()
optimizer.step()
total_images_till_now = total_images_till_now + len(data)
if batch_idx % args.print_after_batches == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch+1, total_images_till_now, total_images,
100. * total_images_till_now/total_images, loss.item()))
scheduler.step()
return
def test(args, network, test_loader, mult, datatype):
network.eval()
test_loss = 0
correct = 0
for data,target, indx in test_loader:
data, target, indx = data.to(args.device), target.to(device), indx.to(device)
output, _, _ = network(data)
y = c[p[indx]].reshape(-1,out_size).to(device)
tloss = torch.nn.MSELoss()
test_loss += tloss(output, y).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
total_images = len(test_loader.dataset)*mult
test_loss /= total_images
test_acc = 100. * correct / total_images
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
datatype, test_loss, correct, total_images, test_acc))
return test_loss, test_acc
def main(args):
# hard coded values
in_channels = 3 # rgb channels of input image
out_classes = out_size # d length
lr_decay_rate = 0.2 # lr is multiplied by decay rate after a milestone epoch is reached
mult = 1 # data become mult times
####################
#train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
#test_transform = transforms.ToTensor()
trainset = MyTrainset()
testset = MyTestset()
#testset = datasets.CIFAR10(root='.', train=False, download=True, transform=test_transform)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=0)
network = mdl.RotNet(in_channels=in_channels, num_nin_blocks=args.nins, out_classes=out_classes).to(args.device)
if args.opt == 'adam':
optimizer = optim.Adam(network.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = optim.SGD(network.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=lr_decay_rate)
####################################### Saving information
results_dict = {}
# These will store the values for best test accuracy model
results_dict['train_loss'] = -1
results_dict['train_acc'] = -1
results_dict['test_loss'] = -1
results_dict['test_acc'] = -1
results_dict['best_acc_epoch'] = -1
# For storing training history
results_dict['train_loss_hist'] = []
results_dict['train_acc_hist'] = []
results_dict['test_loss_hist'] = []
results_dict['test_acc_hist'] = []
# directories to save models
checkpoint_path = os.path.join(args.results_dir, 'model.pth')
checkpoint_path_best_acc = os.path.join(args.results_dir, 'model_best_acc.pth')
test_acc_max = -math.inf
loop_start_time = time.time()
checkpoint = {}
for epoch in range(args.epochs):
train(args, network, train_loader, optimizer, mult, scheduler, epoch)
train_loss, train_acc = test(args, network, train_loader, mult, 'Train')
results_dict['train_loss_hist'].append(train_loss)
results_dict['train_acc_hist'].append(train_acc)
test_loss, test_acc = test(args, network, test_loader, mult, 'Test')
results_dict['test_loss_hist'].append(test_loss)
results_dict['test_acc_hist'].append(test_acc)
print('Epoch {} finished --------------------------------------------------------------------------', epoch+1)
checkpoint = {'model_state_dict': network.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch':epoch+1,
'train_loss':train_loss,
'train_acc':train_acc,
'test_loss':test_loss,
'test_acc':test_acc}
if test_acc > test_acc_max:
test_acc_max = test_acc
if os.path.isfile(checkpoint_path_best_acc):
os.remove(checkpoint_path_best_acc)
torch.save(checkpoint, checkpoint_path_best_acc)
results_dict['best_acc_epoch'] = epoch+1
results_dict['train_loss'] = train_loss
results_dict['train_acc'] = train_acc
results_dict['test_loss'] = test_loss
results_dict['test_acc'] = test_acc
if epoch+1 in args.epochs_to_save:
torch.save(checkpoint, os.path.join(args.results_dir, 'model_epoch_'+str(epoch+1)+'.pth'))
torch.save(checkpoint, checkpoint_path)
print('Total time for training loop = ', time.time()-loop_start_time)
return results_dict
# Starting the program execution from here
if __name__ == '__main__':
start_time = time.time()
parser = get_parser()
args = parser.parse_args()
args.results_dir = os.path.join(args.results_dir, 'natnet_'+str(args.nins)+'_ninblocks'+args.suffix)
assert (not os.path.exists(args.results_dir))
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
results_file = os.path.join(args.results_dir, 'results_dict.pickle')
print('--------------------------------------------------------')
print('--------------------------------------------------------')
print('Experiment starting at ', datetime.datetime.now())
print(' ')
options = vars(args)
keys = options.keys()
for key in keys:
print(key, ': ', options[key])
print(' ')
print('--------------------------------------------------------')
print('--------------------------------------------------------')
print(' ')
print(' ')
results_dict = main(args)
# saving the configuration
for key in keys:
new_key = 'config_' + key
results_dict[new_key] = options[key]
with open(results_file, 'wb') as f:
pickle.dump(results_dict, f)
print('--------------------------------------------------------')
print('--------------------------------------------------------')
print('Total time for experiment: ', time.time()-start_time, ' seconds')
print('--------------------------------------------------------')
print('--------------------------------------------------------')
| 38.627329
| 223
| 0.607252
|
17b0c9ecd6531a7642250a3be823dcfb0c2e5a0f
| 4,563
|
py
|
Python
|
gamesopt/algorithms/co.py
|
hugobb/Stochastic-Extragradient
|
69f75f3f47cb2b30cd81db617c994a13b0b5302c
|
[
"MIT"
] | 2
|
2021-11-08T16:28:28.000Z
|
2022-03-21T23:45:01.000Z
|
gamesopt/algorithms/co.py
|
hugobb/Stochastic-Extragradient
|
69f75f3f47cb2b30cd81db617c994a13b0b5302c
|
[
"MIT"
] | null | null | null |
gamesopt/algorithms/co.py
|
hugobb/Stochastic-Extragradient
|
69f75f3f47cb2b30cd81db617c994a13b0b5302c
|
[
"MIT"
] | null | null | null |
from .algorithm import Algorithm
from gamesopt.games.sampler import DoubleLoopSampler
import torch.autograd as autograd
from .lr_scheduler import BaseLR, LRScheduler
import copy
import torch
class ConsensusOptimization(Algorithm):
### The recommended learning rate is 1/L
def __init__(
self, game, lr_H=None, batch_size=1, full_batch=False, *args, **kwargs
):
super().__init__(game, *args, **kwargs)
if lr_H is None:
lr_H = self.lr
self.lr_H = lr_H
if not isinstance(lr_H, LRScheduler):
self.lr_H = BaseLR(self.lr_H)
self.batch_size = batch_size
self.full_batch = full_batch
self.sampler = DoubleLoopSampler(game.sampler)
def update(self):
if self.full_batch:
grad = self.game.grad(None)
grad_1, grad_2 = grad, grad
self.n_samples += self.game.sampler.num_samples
else:
x_1, x_2 = self.sampler.sample(self.batch_size)
grad_1, grad_2 = self.game.grad(x_1), self.game.grad(x_2)
self.n_samples += 2 * self.batch_size
hamiltonian = self.game.hamiltonian(grad_1, grad_2)
grad_H = []
for player in self.game.get_players():
_g = autograd.grad(hamiltonian, player.parameters(), retain_graph=True)
grad_H.append(_g)
for i, player in enumerate(self.game.get_players()):
for p, g1, g2, gH in zip(
player.parameters(), grad_1[i], grad_2[i], grad_H[i]
):
p.data += -self.lr[i](self.k) / 2 * (g1 + g2) - self.lr_H(self.k) * gH
class SVRCO(ConsensusOptimization):
def __init__(self, game, prob=None, *args, **kwargs):
super().__init__(game, *args, **kwargs)
if prob is None:
prob = self.game.sampler.num_samples
self.prob = prob
self.snapshot = copy.deepcopy(self.game)
self.full_grad = self.snapshot.grad()
hamiltonian = self.game.hamiltonian()
self.full_grad_H = []
for player in self.game.get_players():
_g = autograd.grad(hamiltonian, player.parameters(), retain_graph=True)
self.full_grad_H.append(_g)
self.n_samples += self.game.sampler.num_samples
def update(self):
if self.full_batch:
grad = self.game.grad()
grad_snapshot = self.snapshot.grad()
grad_1, grad_2 = grad, grad
grad_snapshot_1, grad_snapshot_2 = grad_snapshot, grad_snapshot
self.n_samples += self.game.sampler.num_samples
else:
x_1, x_2 = self.sampler.sample(self.batch_size)
grad_1, grad_2 = self.game.grad(x_1), self.game.grad(x_2)
grad_snapshot_1, grad_snapshot_2 = (
self.snapshot.grad(x_1),
self.snapshot.grad(x_2),
)
self.n_samples += 2 * self.batch_size
hamiltonian = self.game.hamiltonian(grad_1, grad_2)
grad_H = []
for player in self.game.get_players():
_g = autograd.grad(hamiltonian, player.parameters(), retain_graph=True)
grad_H.append(_g)
hamiltonian = self.snapshot.hamiltonian(grad_snapshot_1, grad_snapshot_2)
snapshot_grad_H = []
for player in self.snapshot.get_players():
_g = autograd.grad(hamiltonian, player.parameters(), retain_graph=True)
snapshot_grad_H.append(_g)
for i, player in enumerate(self.game.get_players()):
for p, g1, g2, gs1, gs2, bg, g_H, gs_H, bg_H in zip(
player.parameters(),
grad_1[i],
grad_2[i],
grad_snapshot_1[i],
grad_snapshot_2[i],
self.full_grad[i],
grad_H[i],
snapshot_grad_H[i],
self.full_grad_H[i],
):
g_1 = (g1 + g2) / 2 - (gs1 + gs2) / 2 + bg
g_2 = g_H - gs_H + bg_H
p.data += -self.lr[i](self.k) * g_1 - self.lr_H(self.k) * g_2
coin = torch.rand(1)
if coin < self.prob:
self.snapshot.load_state_dict(self.game.state_dict())
self.full_grad = self.snapshot.grad()
hamiltonian = self.game.hamiltonian()
self.full_grad_H = []
for player in self.game.get_players():
_g = autograd.grad(hamiltonian, player.parameters(), retain_graph=True)
self.full_grad_H.append(_g)
self.n_samples += self.game.sampler.num_samples
| 38.669492
| 87
| 0.581416
|
aae5ddefa2bfa7e930624085d23e46164e79efcd
| 1,146
|
py
|
Python
|
ubereats/utils/.ipynb_checkpoints/merge_shops_yokohama-checkpoint.py
|
macchino/delivery-analisys
|
7d0507580532e87143c49d91693ee25aaf504a0b
|
[
"MIT"
] | 5
|
2019-12-31T08:07:02.000Z
|
2021-08-15T06:58:50.000Z
|
ubereats/utils/.ipynb_checkpoints/merge_shops_yokohama-checkpoint.py
|
macchino/delivery-analisys
|
7d0507580532e87143c49d91693ee25aaf504a0b
|
[
"MIT"
] | 39
|
2019-12-22T03:25:10.000Z
|
2021-03-16T09:46:21.000Z
|
ubereats/utils/.ipynb_checkpoints/merge_shops_yokohama-checkpoint.py
|
macchino/delivery-analisys
|
7d0507580532e87143c49d91693ee25aaf504a0b
|
[
"MIT"
] | 2
|
2020-08-13T02:22:23.000Z
|
2021-08-15T07:00:30.000Z
|
# -*- coding: utf-8 -*-
# merge_shopsをコピペしただけです。あとでなんとかしよう。
import sys
import pandas as pd
from datetime import datetime
args = sys.argv
MASTER_FILE_PATH = './data/shop_master_yokohama.csv'
GOOGLEMAP_FILE_PATH = './data/googlemap_yokohama.csv'
dir_path = "./rawdata/shops/"
file_name_base = "hiyoshi.csv"
file_name = datetime.now().strftime('%y%m%d') + "_" + file_name_base
TARGET_FILE_PATH = dir_path + file_name
master = pd.read_csv(MASTER_FILE_PATH, index_col='id')
df = pd.read_csv(TARGET_FILE_PATH, index_col="id")
data = pd.concat([master, df], sort=False).drop_duplicates(subset="url",
keep="last")
# master
data.to_csv(MASTER_FILE_PATH, index=True, mode="w")
# googlemap
googlemap = pd.DataFrame()
googlemap["店名"] = data["name"]
googlemap["住所"] = data["address"]
googlemap["緯度"] = data["latitude"]
googlemap["経度"] = data["longitude"]
googlemap["開始"] = data["open_hour"]
googlemap["終了"] = data["close_hour"]
googlemap["点数"] = data["point"]
googlemap["レビュー数"] = data["reviews"]
googlemap["URL"] = data["url"]
googlemap.to_csv(GOOGLEMAP_FILE_PATH, index=False, mode="w")
| 27.285714
| 72
| 0.681501
|
b791f67a69cc689b0e9481318909049d08aebda9
| 4,570
|
py
|
Python
|
tests/test_deselect.py
|
mhallin/cov-exclude-py
|
32a834dc60ba38034c049118e35c3970a23f24ab
|
[
"MIT"
] | 4
|
2015-12-07T15:57:26.000Z
|
2016-03-23T20:33:59.000Z
|
tests/test_deselect.py
|
mhallin/cov-exclude-py
|
32a834dc60ba38034c049118e35c3970a23f24ab
|
[
"MIT"
] | 3
|
2016-02-11T12:26:01.000Z
|
2020-01-14T00:40:31.000Z
|
tests/test_deselect.py
|
mhallin/cov-exclude-py
|
32a834dc60ba38034c049118e35c3970a23f24ab
|
[
"MIT"
] | 1
|
2020-10-13T13:53:35.000Z
|
2020-10-13T13:53:35.000Z
|
import subprocess
import os.path
import time
import pytest
def from_here(*args):
return os.path.join(os.path.dirname(__file__), *args)
def write_test_files(filename, tmpdir):
with open(from_here('files', filename), 'r') as s:
f = tmpdir.join('test.py')
f.write(s.read())
if tmpdir.join('__pycache__').check():
tmpdir.join('__pycache__').remove()
if tmpdir.join('test.pyc').check():
tmpdir.join('test.pyc').remove()
def start_test_process(filename, tmpdir):
write_test_files(filename, tmpdir)
p = subprocess.Popen(['py.test', '-v', 'test.py'],
cwd=str(tmpdir),
stdout=subprocess.PIPE)
return p
def run_test_file(filename, tmpdir):
p = start_test_process(filename, tmpdir)
stdout, _ = p.communicate()
return stdout
@pytest.mark.external_dependencies
@pytest.mark.parametrize('sequence', [
# Do *not* deselect failing tests
(('simple01.py', b'1 passed'),
('simple01_fail.py', b'1 failed'),
('simple01_fail.py', b'1 failed'),
('simple01.py', b'1 passed')),
# Do *not* deselect tests with external dependencies
(('external_deps01.py', b'1 passed'),
('external_deps01.py', b'1 passed')),
# Deselect tests where the source changes are not covered by the
# test function
(('uncovered01.py', b'1 passed'),
('uncovered02.py', b'1 deselected')),
# Changes made to whitespace between covered blocks should still
# be counted, even if the line technically wasn't executed
(('whitespace01.py', b'1 passed'),
('whitespace02.py', b'1 failed')),
# Changes made to the last line can be tricky to pick up
(('whitespace03.py', b'1 passed'),
('whitespace04.py', b'1 failed')),
# Changes made to fixtures should be picked up
(('fixture01.py', b'1 passed'),
('fixture02.py', b'1 failed')),
# Changes made to parametrized definitions should be picked up,
# but we don't really care about how many of the parameters were
# selected/deselected.
(('parametrize01.py', b'3 passed'),
('parametrize02.py', b'1 failed')),
# In order to not depend on the *name* of a parameterized test,
# run the parametrized tests with more intricate data structures
(('parametrize03.py', b'3 passed'),
('parametrize04.py', b'1 failed')),
])
def test_run_changes(sequence, tmpdir):
"""Running each sequence of files should provide the expected output
each sequence has defined.
"""
assert not tmpdir.join('.cache').check()
for filename, expected in sequence:
stdout = run_test_file(filename, tmpdir)
assert expected in stdout
@pytest.mark.external_dependencies
@pytest.mark.parametrize('filename,n_tests', [
('simple01.py', 1),
('uncovered01.py', 1),
('whitespace01.py', 1),
('whitespace03.py', 1),
('fixture01.py', 1),
('parametrize01.py', 3),
])
def test_deselect_nochange(filename, n_tests, tmpdir):
"""Running the same file twice in succession should deselect all tests"""
assert not tmpdir.join('.cache').check()
expect_pass = '{} passed'.format(n_tests).encode('ascii')
expect_deselect = '{} deselected'.format(n_tests).encode('ascii')
stdout_pass = run_test_file(filename, tmpdir)
assert expect_pass in stdout_pass
stdout_deselect = run_test_file(filename, tmpdir)
assert expect_deselect in stdout_deselect
@pytest.mark.external_dependencies
@pytest.mark.parametrize('first_filename,second_filename', [
('alter_test01.py', 'alter_test02.py'),
# alter_test03/04 alters the test during the collection phase,
# which currently is a known bug in cov-exclude. Expect this test
# case to fail.
pytest.mark.xfail(('alter_test03.py', 'alter_test04.py')),
])
def test_alter_file_during_test(first_filename, second_filename, tmpdir):
"""Altering the test file during test execution should still mark the
file as changed"""
assert not tmpdir.join('.cache').check()
# Start slow test asynchronously
first_process = start_test_process(first_filename, tmpdir)
# Wait for test to start, then replace test files with new ones
time.sleep(1)
write_test_files(second_filename, tmpdir)
# Read output to verify that the test was run
stdout, _ = first_process.communicate()
assert b'1 passed' in stdout
# Start a new test run with the altered file
second_run = run_test_file(second_filename, tmpdir)
# The second run should fail
assert b'1 failed' in second_run
| 30.264901
| 77
| 0.673523
|
074e1e3f449f29c573ce5d9fed0066fa34c0f512
| 4,064
|
py
|
Python
|
Experiment/main.py
|
quilan78/MSC_project
|
18c0298bca013a62c09752446f7c391a29f38618
|
[
"Apache-2.0"
] | null | null | null |
Experiment/main.py
|
quilan78/MSC_project
|
18c0298bca013a62c09752446f7c391a29f38618
|
[
"Apache-2.0"
] | null | null | null |
Experiment/main.py
|
quilan78/MSC_project
|
18c0298bca013a62c09752446f7c391a29f38618
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import sys
sys.path.append('../Commons/')
from LRP_output import *
from random import randint
def main(id_, path, important,write_path):
data = LRP_output()
data.load_json(id_, filepath=path)
if important:
write_path += "Important/"
else:
write_path += "Unimportant/"
for i in range(10):
newData = LRP_output()
full_lrp = np.array(data.greedy_LRP_encoder_forward) + np.array(data.greedy_LRP_encoder_backward)
#avg = averageLRP(full_lrp.copy())
avg = average_pos_LRP(full_lrp.copy(), 0.5)
#avg = average_weighted_absolute_lrp(full_lrp.copy(), 0.5)
if important:
newText = deleteImportantWords(i, data.input_text.copy(), avg.copy())
else:
newText = deleteUnImportantWords(i, data.input_text.copy(), avg.copy())
print(newText.count("<UNKNOWN>"))
newData.original_text = data.original_text
newData.input_text = newText
newData.original_summary = data.original_summary
newData.input_summary = data.input_summary
newData.greedy_summary = data.greedy_summary
newData.beam_summary = data.beam_summary
with open(write_path+str(id_)+"/"+str(i)+".json", 'w') as outfile:
json.dump(newData.__dict__, outfile)
def main_random(id_, counter, path, write_path):
data = LRP_output()
data.load_json(id_, filepath=path)
for i in range(10):
newData = LRP_output()
full_lrp = np.array(data.greedy_LRP_encoder_forward) + np.array(data.greedy_LRP_encoder_backward)
avg = averageLRP(full_lrp)
newText = deleteRandomWords(i, data.input_text.copy(), avg.copy())
print(newText.count("<UNKNOWN>"))
newData.original_text = data.original_text
newData.input_text = newText
newData.original_summary = data.original_summary
newData.input_summary = data.input_summary
newData.greedy_summary = data.greedy_summary
newData.beam_summary = data.beam_summary
with open(write_path+str(counter)+"/"+str(id_)+"/"+str(i)+".json", 'w') as outfile:
json.dump(newData.__dict__, outfile)
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def average_weighted_absolute_lrp(lrp, alpha):
avg_lrp = np.zeros(len(lrp[0]))
for word_lrp in lrp:
word_lrp = normalize(word_lrp)
for i in range(len(word_lrp)):
if word_lrp[i] < 0 :
avg_lrp[i] -= alpha * word_lrp[i]
else:
avg_lrp[i] += word_lrp[i]
avg_lrp /= len(lrp)
return avg_lrp
def deleteRandomWords(percent, text,lrp):
nbre = int(0.01*percent * len(text))
rand = []
for i in range(nbre):
index = randint(0,len(lrp)-1)
while index in rand:
index = randint(0,len(lrp)-1)
rand.append(index)
text[index] = "<UNKNOWN>"
lrp[index] = 0
return text
def average_pos_LRP(lrp, alpha):
avg_lrp = np.zeros(len(lrp[0]))
for word_lrp in lrp:
#word_lrp = np.abs(word_lrp)
for i in range(len(word_lrp)):
if word_lrp[i] < 0 :
word_lrp[i] = - alpha * word_lrp[i]
else:
word_lrp[i] = word_lrp[i]
current_order = np.zeros(len(lrp[0]))
for i in range(len(word_lrp)):
index = np.argmax(word_lrp)
current_order[index] = len(word_lrp)-i
word_lrp[index] = np.min(lrp)-1
avg_lrp += current_order
avg_lrp /= len(lrp)
return avg_lrp
def averageLRP(lrp):
avg_lrp = np.zeros(len(lrp[0]))
for word_lrp in lrp:
avg_lrp += np.abs(np.array(word_lrp))
avg_lrp /= len(lrp)
return avg_lrp
def deleteImportantWords(percent, text,lrp):
nbre = int(0.01*percent * len(text))
for i in range(nbre):
index = np.argmax(lrp)
text[index] = "<UNKNOWN>"
lrp[index] = 0
return text
def deleteUnImportantWords(percent, text,lrp):
nbre = int(0.01*percent * len(text))
for i in range(nbre):
index = np.argmin(lrp)
text[index] = "<UNKNOWN>"
lrp[index] = np.max(lrp)
return text
if __name__ == "__main__":
for i in range(1,13):
main(i, "../../Experiment/JSON/", True, "../../Experiment/ModifiedTexts/")
main(i, "../../Experiment/JSON/", False, "../../Experiment/ModifiedTexts/")
| 27.459459
| 100
| 0.669291
|
1da44f544603c872aa72c55da648278bff7de7a6
| 739
|
py
|
Python
|
NoteBooks/Curso de Python/Python/_Data/Curso_Data_estructures/Ex_Files_Python_Data_Structures/Exercise Files/GUI Code/config.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | 1
|
2021-02-26T13:12:22.000Z
|
2021-02-26T13:12:22.000Z
|
NoteBooks/Curso de Python/Python/_Data/Curso_Data_estructures/Ex_Files_Python_Data_Structures/Exercise Files/GUI Code/config.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
NoteBooks/Curso de Python/Python/_Data/Curso_Data_estructures/Ex_Files_Python_Data_Structures/Exercise Files/GUI Code/config.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
"""
Python Data Structures - A Game-Based Approach
Robin Andrews - https://compucademy.net/
"""
# MAZE_FILE = "gui_mazes/square_maze_25x25.txt"
MAZE_FILE = "gui_mazes/pacman_maze.txt"
# MAZE_FILE = "gui_mazes/modest_maze.txt"
# MAZE_FILE = "gui_mazes/wide_maze.txt"
#MAZE_FILE = "gui_mazes/diagonal_23x23.txt"
# MAZE_FILE = "gui_mazes/walled_garden_10x10.txt"
# MAZE_FILE = "gui_mazes/walled_garden_20x20.txt"
PLAYER = "P"
OPPONENT = "O"
OBSTACLE = "*"
GAME_SPEED = 100
TARGET_SCORE = 3
WIDTH = 1200
HEIGHT = 740
BUTTON_FONT = ('Arial', 12, 'normal')
SCORE_FONT = ("Courier", 24, "bold")
GAME_OVER_FONT = ("Courier", 18, "normal")
SOUND = True
offsets = {
"right": (0, 1),
"left": (0, -1),
"up": (-1, 0),
"down": (1, 0)
}
| 24.633333
| 49
| 0.675237
|
d11f09d1a3603dde8ab4ee0d902f6af78dc44ce5
| 19,979
|
py
|
Python
|
tests/postgres_tests/test_ranges.py
|
xia0AL/baby_two
|
70244363024a36463dfaeda64e9e95ac118e1934
|
[
"BSD-3-Clause"
] | 2
|
2019-01-19T06:57:51.000Z
|
2022-03-16T13:29:03.000Z
|
tests/postgres_tests/test_ranges.py
|
ojengwa/django-1
|
f6b09a7f85c3b67b2011553838b079788c413432
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T12:53:33.000Z
|
2021-03-24T12:53:33.000Z
|
tests/postgres_tests/test_ranges.py
|
ojengwa/django-1
|
f6b09a7f85c3b67b2011553838b079788c413432
|
[
"BSD-3-Clause"
] | 4
|
2016-07-31T14:29:15.000Z
|
2021-10-19T03:32:44.000Z
|
import datetime
import json
import unittest
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
from django.core import exceptions, serializers
from django.db import connection
from django.test import TestCase
from django.utils import timezone
from .models import RangesModel
def skipUnlessPG92(test):
PG_VERSION = connection.pg_version
if PG_VERSION < 90200:
return unittest.skip('PostgreSQL >= 9.2 required')(test)
return test
@skipUnlessPG92
class TestSaveLoad(TestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(None, loaded.ints)
@skipUnlessPG92
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
@skipUnlessPG92
class TestSerialization(TestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": 10, \\"lower\\": 0, '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": null, "dates": null}, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
def test_dumping(self):
instance = RangesModel(ints=NumericRange(0, 10), floats=NumericRange(empty=True))
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
dumped[0]['fields']['ints'] = json.loads(dumped[0]['fields']['ints'])
check = json.loads(self.test_data)
check[0]['fields']['ints'] = json.loads(check[0]['fields']['ints'])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.dates, None)
class TestValidators(TestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(TestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(TestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" /><input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
| 38.794175
| 158
| 0.636769
|
dd07f1fcd3603cf4c0c96aa174d9a5c83233801a
| 6,405
|
py
|
Python
|
split-patch.py
|
austinpray/split-patch
|
3db1e9b3fe6e379240cb1d7783d560a98760c5fc
|
[
"MIT"
] | null | null | null |
split-patch.py
|
austinpray/split-patch
|
3db1e9b3fe6e379240cb1d7783d560a98760c5fc
|
[
"MIT"
] | null | null | null |
split-patch.py
|
austinpray/split-patch
|
3db1e9b3fe6e379240cb1d7783d560a98760c5fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import os
import readline
import sys
import subprocess
import shutil
from tempfile import NamedTemporaryFile
from termcolor import colored
from pygments import highlight
from pygments.lexers import DiffLexer
from pygments.formatters import TerminalFormatter
from collections import defaultdict
from unidiff import PatchSet
args = None
patch = None
buckets = defaultdict(list)
all_assigned_hunks = set()
def warning(message):
print(colored(message, "red"))
def print_buckets():
for bucket in buckets:
print(f" {bucket}")
def create_new_bucket(name):
if name in buckets:
return False
buckets[name] = []
return True
def assign_to(path, hunk, name):
if name not in buckets:
if os.path.isfile(f"{name}.patch"):
ask(f"{name}.patch exists, appending there. ")
create_new_bucket(name)
else:
return False
all_assigned_hunks.add(id(hunk))
buckets[name] += [(path, hunk)]
return True
def print_hunk(path, hunk, force_less=False):
text = highlight(diff_header(path) + str(hunk), DiffLexer(), TerminalFormatter())
global args
if force_less or not args.no_less and text.count("\n") + 1 > shutil.get_terminal_size().lines:
with NamedTemporaryFile(suffix=".patch", mode="w") as temp_file:
temp_file.write(text)
temp_file.flush()
subprocess.run(["less", "-R", temp_file.name])
print(chr(27) + "[2J")
print(text)
def diff_header(path):
return f"--- a/{path}\n+++ a/{path}\n"
def save_patches():
for bucket_name, hunks in buckets.items():
with open(f"{bucket_name}.patch", "a") as bucket_file:
old_path = ""
for (path, hunk) in sorted(hunks, key=lambda a: a[0]):
if path != old_path:
old_path = path
bucket_file.write(diff_header(path))
bucket_file.write(str(hunk))
with open(args.patch, "w") as patch_file:
for patched_file in patch:
path = patched_file.path
first = True
for hunk in patched_file:
if id(hunk) not in all_assigned_hunks:
if first:
first = False
patch_file.write(diff_header(path))
patch_file.write(str(hunk))
def is_assigned(index):
current_index = 0
for patched_file in patch:
for hunk in patched_file:
if index == current_index:
return id(hunk) in all_assigned_hunks
current_index += 1
assert False
def done():
while True:
command = ask("We're done. Save? [yn] ")
if command == "y":
save_patches()
sys.exit(0)
elif command == "n":
sys.exit(0)
def next(bumping=False):
global target_index
global total_hunks
first = True
while first or is_assigned(target_index):
first = False
if target_index == total_hunks - 1:
if not bumping:
ask("You're at the last hunk! ")
if is_assigned(target_index):
previous(True)
else:
done()
return
else:
target_index += 1
def previous(bumping=False):
global target_index
first = True
while first or is_assigned(target_index):
first = False
if target_index == 0:
if not bumping:
ask("You're at the first hunk! ")
if is_assigned(target_index):
next(True)
else:
done()
return
else:
target_index -= 1
def ask(message):
try:
return input(colored(message, "blue"))
except (KeyboardInterrupt, EOFError):
print()
print("Exiting without saving")
sys.exit(1)
def handle_hunk(patched_file, hunk):
hunks_count = len(patched_file)
index_in_file = [
index for index, current_hunk in enumerate(patched_file) if id(hunk) == id(current_hunk)
][0]
print_hunk(patched_file.path, hunk)
retry = False
global last_command
global command
last_command = command
input_message = f"#({target_index+1}/{total_hunks}) ({index_in_file+1}/{hunks_count}) Target bucket [?,!BUCKET,BUCKET,p,n,q,l]"
if last_command:
input_message += f" (last command: {command})"
input_message += ": "
command = ask(input_message)
if len(command) == 0:
command = last_command
if command == "?":
print_buckets()
elif command.startswith("!"):
command = command[1:]
if not create_new_bucket(command):
ask(f'Cannot create bucket "{command}"! ')
else:
assign_to(patched_file.path, hunk, command)
next()
elif command == "l":
print_hunk(patched_file.path, hunk, True)
elif command == "n":
next()
elif command == "p":
previous()
elif command == "q":
save_patches()
sys.exit(0)
else:
if not assign_to(patched_file.path, hunk, command):
ask(f'Bucket "{command}" does not exists! ')
else:
next()
def main():
parser = argparse.ArgumentParser(description="Organize patch in buckets.")
parser.add_argument("patch", metavar="PATCH", help="input patch")
parser.add_argument("--no-less", action="store_true", help="Don't use less.")
global args
args = parser.parse_args()
global patch
patch = PatchSet(open(args.patch, "r"))
global command
command = ""
files_count = len(patch)
global target_index
target_index = 0
global total_hunks
total_hunks = 0
for patched_file in patch:
for hunk in patched_file:
total_hunks += 1
while True:
global index
index = 0
for file_index, patched_file in enumerate(patch):
path = patched_file.path
hunks_count = len(patched_file)
warning(f"({file_index+1}/{files_count}) Patched file: {path} ({hunks_count} hunks)")
for hunk in patched_file:
if index == target_index:
handle_hunk(patched_file, hunk)
index += 1
if __name__ == "__main__":
sys.exit(main())
| 26.142857
| 131
| 0.584075
|
74a1703d340264a051b3beeee2f8e9b6374837fd
| 5,777
|
py
|
Python
|
docs/conf.py
|
jposada202020/CircuitPython_equalizer
|
60d42e5f4090f7e4b97e744ec5b00a49c65138ad
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
docs/conf.py
|
jposada202020/CircuitPython_equalizer
|
60d42e5f4090f7e4b97e744ec5b00a49c65138ad
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
docs/conf.py
|
jposada202020/CircuitPython_equalizer
|
60d42e5f4090f7e4b97e744ec5b00a49c65138ad
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = [
"displayio",
"adafruit_display_shapes",
"vectorio",
"bitmaptools",
"terminalio",
"adafruit_imageload",
"adafruit_display_text",
"bitmaptools",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Show the docstring from both the class and its __init__() method.
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = " CircuitPython equalizer Library"
copyright = "2021 Jose David M."
author = "Jose David M."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
".env",
"CODE_OF_CONDUCT.md",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "CircuitPython_EqualizerLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"CircuitPython_equalizerLibrary.tex",
"CircuitPython equalizer Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"CircuitPython_equalizerLibrary",
"CircuitPython equalizer Library Documentation",
[author],
1,
),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"CircuitPython_equalizerLibrary",
"CircuitPython equalizer Library Documentation",
author,
"CircuitPython_equalizerLibrary",
"One line description of project.",
"Miscellaneous",
),
]
| 29.324873
| 84
| 0.673187
|
0263e018abdef3c2a7507e34079fbf53dac6fcb1
| 1,021
|
py
|
Python
|
hoover/search/management/commands/update.py
|
Spiderpig86/search
|
74daa985e7d8ddefc323100c2cbf2a279f5c9254
|
[
"MIT"
] | null | null | null |
hoover/search/management/commands/update.py
|
Spiderpig86/search
|
74daa985e7d8ddefc323100c2cbf2a279f5c9254
|
[
"MIT"
] | null | null | null |
hoover/search/management/commands/update.py
|
Spiderpig86/search
|
74daa985e7d8ddefc323100c2cbf2a279f5c9254
|
[
"MIT"
] | null | null | null |
import time
from django.core.management.base import BaseCommand
from ...models import Collection
from ...index import update_collection, logger as index_logger
from ...loaders.collectible import logger as collectible_logger
from ...utils import LOG_LEVEL
class Command(BaseCommand):
help = "Import a collection"
def add_arguments(self, parser):
parser.add_argument('collection')
parser.add_argument('-s', '--sleep', type=int)
def handle(self, verbosity, collection, sleep, **options):
index_logger.setLevel(LOG_LEVEL[verbosity])
collectible_logger.setLevel(LOG_LEVEL[verbosity])
while True:
report = update_collection(Collection.objects.get(name=collection))
count = report.get('indexed', 0)
print('indexed {} documents'.format(count))
if count and sleep:
print('waiting {}s ...'.format(sleep))
time.sleep(sleep)
else:
print('done')
break
| 32.935484
| 79
| 0.643487
|
5d840a0cc08030e6b8ecbfd091a42c47000289af
| 299
|
py
|
Python
|
mtorch/core/model/metrics/base.py
|
NullConvergence/torch_temp
|
29a0d7190f0be6124f51bd85b8320cd8b3cef29a
|
[
"MIT"
] | 3
|
2019-08-08T13:23:50.000Z
|
2019-08-15T15:29:36.000Z
|
mtorch/core/model/metrics/base.py
|
NullConvergence/torch-template
|
29a0d7190f0be6124f51bd85b8320cd8b3cef29a
|
[
"MIT"
] | 10
|
2019-09-20T21:25:22.000Z
|
2019-10-16T10:52:04.000Z
|
mtorch/core/model/metrics/base.py
|
NullConvergence/mtorch
|
29a0d7190f0be6124f51bd85b8320cd8b3cef29a
|
[
"MIT"
] | 2
|
2019-08-08T13:23:52.000Z
|
2019-08-08T19:46:55.000Z
|
from abc import abstractmethod
class BaseMetric(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@abstractmethod
def forward(self):
raise NotImplementedError
@abstractmethod
def get_name(self):
raise NotImplementedError
| 19.933333
| 41
| 0.672241
|
16bc5afc768d5d968ce17ca07a6a32a9277417e3
| 3,145
|
py
|
Python
|
nlp_demo/nlp_naiveb_text_classifier-master/naiveb_classifier.py
|
maliozer/cs224n
|
cd81462a998dba08d3536a606d66a6d2eeb8b147
|
[
"MIT"
] | null | null | null |
nlp_demo/nlp_naiveb_text_classifier-master/naiveb_classifier.py
|
maliozer/cs224n
|
cd81462a998dba08d3536a606d66a6d2eeb8b147
|
[
"MIT"
] | null | null | null |
nlp_demo/nlp_naiveb_text_classifier-master/naiveb_classifier.py
|
maliozer/cs224n
|
cd81462a998dba08d3536a606d66a6d2eeb8b147
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 2019
@author: maliozer
"""
from tqdm import tqdm
import numpy as np
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
#TRAINING
#file process
file_path = "files/tweetset.csv"
tweetsFrame = pd.read_csv(file_path)
#fix column_names
tweetsFrame.columns = ['polarity', 'id', 'date', 'query', 'username', 'tweet']
#drop irrelevant columns
tweetsFrame = tweetsFrame.drop(columns=['id','date','query','username'])
totalPositive = 0
totalNegative = 0
word_dict = dict()
stemmer = SnowballStemmer("english")
#reading Tweetdataframe to make df
for row in tqdm(tweetsFrame.iterrows(), total=tweetsFrame.shape[0]):
df_record = row[1] #df row as series
isPositive = df_record[0]
sentence = df_record[1]
if isPositive == 4:
totalPositive += 1
elif isPositive == 0:
totalNegative += 1
wordInSentence = nltk.word_tokenize(sentence)
wordInSentence=[word.lower() for word in wordInSentence if word.isalpha()]
for word in wordInSentence:
stemmedWord = stemmer.stem(word)
if stemmedWord in word_dict:
word_dict[stemmedWord][0] += 1
if isPositive == 4:
word_dict[stemmedWord][1] += 1
elif isPositive == 0:
word_dict[stemmedWord][2] += 1
else:
w_started = 3
p_num = 1
n_num = 1
if isPositive == 4:
p_num += 1
elif isPositive == 0:
n_num += 1
word_dict[stemmedWord] = [w_started,p_num,n_num]
df_export = pd.DataFrame.from_dict(word_dict, orient='index')
df_export.columns = ['occurence', 'pos', 'neg']
#likelihood
df_export["likelihoodPos"] = df_export["pos"] / totalPositiveWords
df_export["likelihoodNeg"] = df_export["neg"] / totalNegativeWords
#negative log likelihood NLL
df_export['log_likePos'] = -1 * np.log(df_export.likelihoodPos)
df_export['log_likeNeg'] = -1 * np.log(df_export.likelihoodNeg)
logpriorPos = -1 * np.log(df_export.pos.sum() / df_export.occurence.sum())
logpriorNeg = -1 * np.log(df_export.neg.sum() / df_export.occurence.sum())
"""
df_export = pd.DataFrame.from_dict(word_dict, orient='index')
df_export.columns = ['occurence', '+', '-']
#df_export["Ppositive"] = Series(np.random.randn(sLength), index=df1.index)
totalNegativeWords = df_export["-"].sum()
totalPositiveWords = df_export["+"].sum()
totalOccurence = df_export["occurence"].sum()
"""
#test
newSentence = "I think maliozer that you both will get on well with each other. "
wordInSentence = nltk.word_tokenize(newSentence)
wordInSentence=[word.lower() for word in wordInSentence if word.isalpha()]
testList = list()
for word in wordInSentence:
stemmedWord = stemmer.stem(word)
testList.append(stemmedWord)
sumPositive = 1
sumNegative = 1
for word in testList:
if df_export[df_export.index == word].empty:
next #pass the untrained word
else:
print(df_export[df_export.index == word])
| 25.778689
| 82
| 0.646105
|
93c9410aef06f08a2136583556b65baef98a6eff
| 3,705
|
py
|
Python
|
deprecated/train.py
|
Stillerman/MusicTransformer-pytorch
|
73abb7cab271beba042b7b6fc06a6a9aaee82e8c
|
[
"MIT"
] | 170
|
2019-08-24T07:25:04.000Z
|
2022-03-29T01:42:44.000Z
|
deprecated/train.py
|
Stillerman/MusicTransformer-pytorch
|
73abb7cab271beba042b7b6fc06a6a9aaee82e8c
|
[
"MIT"
] | 15
|
2019-10-29T01:36:57.000Z
|
2022-03-11T23:56:32.000Z
|
deprecated/train.py
|
Stillerman/MusicTransformer-pytorch
|
73abb7cab271beba042b7b6fc06a6a9aaee82e8c
|
[
"MIT"
] | 57
|
2019-10-14T07:26:10.000Z
|
2022-03-31T14:39:02.000Z
|
from model import MusicTransformer
from custom.layers import *
from custom import criterion
import params as par
from tensorflow.python.keras.optimizer_v2.adam import Adam
from data import Data
import utils
import argparse
import datetime
import sys
tf.executing_eagerly()
parser = argparse.ArgumentParser()
parser.add_argument('--l_r', default=None, help='학습률', type=float)
parser.add_argument('--batch_size', default=2, help='batch size', type=int)
parser.add_argument('--pickle_dir', default='music', help='데이터셋 경로')
parser.add_argument('--max_seq', default=2048, help='최대 길이', type=int)
parser.add_argument('--epochs', default=100, help='에폭 수', type=int)
parser.add_argument('--load_path', default=None, help='모델 로드 경로', type=str)
parser.add_argument('--save_path', default="result/0722", help='모델 저장 경로')
parser.add_argument('--is_reuse', default=False)
parser.add_argument('--multi_gpu', default=True)
args = parser.parse_args()
# set arguments
l_r = args.l_r
batch_size = args.batch_size
pickle_dir = args.pickle_dir
max_seq = args.max_seq
epochs = args.epochs
is_reuse = args.is_reuse
load_path = args.load_path
save_path = args.save_path
multi_gpu = args.multi_gpu
# load data
dataset = Data('dataset/processed')
print(dataset)
# load model
learning_rate = criterion.CustomSchedule(par.embedding_dim) if l_r is None else l_r
opt = Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
# define model
mt = MusicTransformer(
embedding_dim=256,
vocab_size=par.vocab_size,
num_layer=6,
max_seq=max_seq,
dropout=0.2,
debug=False, loader_path=load_path)
mt.compile(optimizer=opt, loss=criterion.transformer_dist_train_loss)
# define tensorboard writer
current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
train_log_dir = 'logs/gradient_tape/'+current_time+'/train'
eval_log_dir = 'logs/gradient_tape/'+current_time+'/eval'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
eval_summary_writer = tf.summary.create_file_writer(eval_log_dir)
# Train Start
idx = 0
for e in range(epochs):
mt.reset_metrics()
for b in range(len(dataset.files) // batch_size):
try:
batch_x, batch_y = dataset.seq2seq_batch(batch_size, max_seq)
except:
continue
result_metrics = mt.train_on_batch(batch_x, batch_y)
if b % 100 == 0:
eval_x, eval_y = dataset.seq2seq_batch(batch_size, max_seq, 'eval')
eval_result_metrics, weights = mt.evaluate(eval_x, eval_y)
mt.save(save_path)
with train_summary_writer.as_default():
tf.summary.scalar('loss', result_metrics[0], step=idx)
tf.summary.scalar('accuracy', result_metrics[1], step=idx)
for i, weight in enumerate(weights):
with tf.name_scope("layer_%d" % i):
with tf.name_scope("_w0"):
utils.attention_image_summary(weight[0])
with tf.name_scope("_w1"):
utils.attention_image_summary(weight[1])
with eval_summary_writer.as_default():
tf.summary.scalar('loss', eval_result_metrics[0], step=idx)
tf.summary.scalar('accuracy', eval_result_metrics[1], step=idx)
idx += 1
print('\n====================================================')
print('Epoch/Batch: {}/{}'.format(e, b))
print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format(result_metrics[0], result_metrics[1]))
print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_result_metrics[0], eval_result_metrics[1]))
| 35.285714
| 112
| 0.65803
|
809401e604879aaca29f17cab8f332b27a623e08
| 1,585
|
py
|
Python
|
Task3/INDRASHIS PAUL - TASK 3/From WebCam/Face_Detection_from_Webcam.py
|
pratiksha-shinde123/ML
|
11f55a08640189fa8869f5d1af85662d5cb582d3
|
[
"MIT"
] | 1
|
2020-10-15T03:51:05.000Z
|
2020-10-15T03:51:05.000Z
|
Task3/INDRASHIS PAUL - TASK 3/From WebCam/Face_Detection_from_Webcam.py
|
IndraP24/ML
|
f35886be236ec35a79f2404df2028fa3f312ef22
|
[
"MIT"
] | 2
|
2020-10-15T03:43:46.000Z
|
2020-10-15T05:02:43.000Z
|
Task3/INDRASHIS PAUL - TASK 3/From WebCam/Face_Detection_from_Webcam.py
|
IndraP24/ML
|
f35886be236ec35a79f2404df2028fa3f312ef22
|
[
"MIT"
] | 9
|
2020-10-09T06:34:13.000Z
|
2020-10-15T16:46:54.000Z
|
# OpenCV program to detect face in real time
# Importing libraries of python OpenCV
import cv2
# loads the required trained XML classifiers for face and eye
face_cascade = cv2.CascadeClassifier("C:/Users/indra/PycharmProjects/ai/haarcascade_frontalface_alt.xml")
eye_cascade = cv2.CascadeClassifier("C:/Users/indra/PycharmProjects/ai/haarcascade_eye.xml")
# captures frames from a camera
cap = cv2.VideoCapture(0)
# loop runs if capturing has been initialized.
while 1:
# reads frames from a camera
ret, img = cap.read()
# converts to gray scale of each frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5)
for (x, y, w, h) in faces:
# To draw a rectangle in a face
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
# Detects eyes of different sizes in the input image
eyes = eye_cascade.detectMultiScale(roi_gray)
# To draw a rectangle in eyes
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 127, 255), 2)
# Display the image in a window
cv2.imshow('img', img)
# Waiting for Esc key to stop
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
| 31.7
| 106
| 0.641009
|
b9a0a8ce1928d7f8565db199c85c18bb06123649
| 2,202
|
py
|
Python
|
homeassistant/components/abode/alarm_control_panel.py
|
billyburly/home-assistant
|
9795449d22783e77a0ca7b745f15c89a830c5cc6
|
[
"Apache-2.0"
] | 5
|
2020-09-17T10:48:51.000Z
|
2021-11-22T00:08:17.000Z
|
homeassistant/components/abode/alarm_control_panel.py
|
billyburly/home-assistant
|
9795449d22783e77a0ca7b745f15c89a830c5cc6
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
homeassistant/components/abode/alarm_control_panel.py
|
billyburly/home-assistant
|
9795449d22783e77a0ca7b745f15c89a830c5cc6
|
[
"Apache-2.0"
] | 2
|
2020-12-09T02:21:27.000Z
|
2021-08-07T04:58:01.000Z
|
"""Support for Abode Security System alarm control panels."""
import logging
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from . import AbodeDevice
from .const import ATTRIBUTION, DOMAIN
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:security"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode alarm control panel device."""
data = hass.data[DOMAIN]
async_add_entities(
[AbodeAlarm(data, await hass.async_add_executor_job(data.abode.get_alarm))]
)
class AbodeAlarm(AbodeDevice, alarm.AlarmControlPanel):
"""An alarm_control_panel implementation for Abode."""
@property
def icon(self):
"""Return the icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
if self._device.is_standby:
state = STATE_ALARM_DISARMED
elif self._device.is_away:
state = STATE_ALARM_ARMED_AWAY
elif self._device.is_home:
state = STATE_ALARM_ARMED_HOME
else:
state = None
return state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
self._device.set_standby()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._device.set_home()
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._device.set_away()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"device_id": self._device.device_id,
"battery_backup": self._device.battery,
"cellular_backup": self._device.is_cellular,
}
| 27.873418
| 83
| 0.668029
|
1ccab4339a1f6650ed37f42e8629a930f6193b09
| 11,330
|
py
|
Python
|
data_tools/wrap.py
|
veugene/data_tools
|
dece2cb92aa025cedc1297aa5075582a6751c515
|
[
"MIT"
] | 16
|
2017-11-21T02:00:00.000Z
|
2021-06-13T19:55:51.000Z
|
data_wrapper/wrap.py
|
nikkkkhil/data_wrapper
|
6b590bee65e69ea1e88b92cb67b360f3ec3c3c85
|
[
"MIT"
] | 4
|
2018-09-21T04:47:04.000Z
|
2020-12-04T22:40:18.000Z
|
data_wrapper/wrap.py
|
nikkkkhil/data_wrapper
|
6b590bee65e69ea1e88b92cb67b360f3ec3c3c85
|
[
"MIT"
] | 5
|
2018-04-19T15:47:57.000Z
|
2019-07-24T08:14:00.000Z
|
import warnings
import numpy as np
class delayed_view(object):
"""
Given an array, create a view into that array without preloading the viewed
data into memory. Data is loaded as needed when indexing into the
delayed_view.
Indexing is numpy-style, using any combination of integers, slices, index
lists, ellipsis (only one, as with numpy), and boolean arrays but not
non-boolean multi-dimensional arrays. Note that the indexing style is also
used on the underlying data sources so those data sources must support the
style of indexing used with a multi_source_array object; use simple
indexing with integers and slices (eg. obj[0,3:10]) when unsure.
Adding dimensions to the output just by indexing is not supported. This
means that unlike with numpy, indexing cannot be done with `None` or
`numpy.newaxis`; also, for example, an array A with shape (4,5) can be
indexed as A[[0,1]] and A[[[0,1]]] (these are equivalent) but not as
A[[[[0,1]]]] for which numpy would add a dimension to the output.
arr : the source array
shuffle : randomize data access order within the view
idx_min : the view into arr starts at this index
idx_max : the view into arr ends before this index
rng : numpy random number generator
"""
def __init__(self, arr, shuffle=False, idx_min=None, idx_max=None,
rng=None):
self.arr = arr
self.shuffle = shuffle
self.idx_min = idx_min
if idx_min is None:
self.idx_min = 0
self.idx_max = idx_max
if idx_max is None:
self.idx_max = len(self.arr)
if rng is None:
rng = np.random.RandomState()
self.rng = rng
self.num_items = min(self.idx_max, len(arr))-self.idx_min
assert(self.num_items >= 0)
self.dtype = self.arr.dtype
try:
self.shape = arr.shape
except AttributeError:
self.shape = (len(arr),)+np.shape(arr[0])
self.ndim = len(self.shape)
# Create index list
self.arr_indices = np.arange(self.idx_min, min(self.idx_max, len(arr)))
if self.shuffle:
self.rng.shuffle(self.arr_indices)
def re_shuffle(self, random_seed=None):
rng = self.rng
if random_seed is not None:
rng = np.random.RandomState(random_seed)
rng.shuffle(self.arr_indices)
def __iter__(self):
for idx in self.arr_indices:
idx = int(idx) # Some libraries don't like np.integer
yield self.arr[idx]
def _get_element(self, int_key, key_remainder=None):
if not isinstance(int_key, (int, np.integer)):
raise IndexError("cannot index with {}".format(type(int_key)))
idx = self.arr_indices[int_key]
if key_remainder is not None:
idx = (idx,)+key_remainder
idx = int(idx) # Some libraries don't like np.integer
return self.arr[idx]
def _get_block(self, values, key_remainder=None):
item_block = None
for i, v in enumerate(values):
# Lists in the aggregate key index in tandem;
# so, index into those lists (the first list is `values`)
v_key_remainder = key_remainder
if isinstance(values, tuple) or isinstance(values, list):
if key_remainder is not None:
broadcasted_key_remainder = ()
for k in key_remainder:
if hasattr(k, '__len__') and len(k)==np.size(k):
broadcasted_key_remainder += (k[i],)
else:
broadcasted_key_remainder += (k,)
v_key_remainder = broadcasted_key_remainder
# Make a single read at an integer index of axis 0
elem = self._get_element(v, v_key_remainder)
if item_block is None:
item_block = np.zeros((len(values),)+elem.shape,
self.dtype)
item_block[i] = elem
return item_block
def __getitem__(self, key):
item = None
key_remainder = None
# Grab the key for the first dimension, store the remainder
if hasattr(key, '__len__'):
if isinstance(key, np.ndarray):
if key.dtype == np.bool:
if key.ndim != self.ndim:
raise IndexError("not enough indices, given a boolean "
"index array with shape "
"{}".format(np.shape(key)))
key = key.nonzero()
elif key.ndim > 1:
raise IndexError("indexing by non-boolean multidimensional"
" arrays not supported")
# If there are lists in the key, make sure they have the same shape
key_shapes = []
for k in key:
if hasattr(k, '__len__'):
key_shapes.append(np.shape(k))
for s in key_shapes:
if s!=key_shapes[0]:
raise IndexError("shape mismatch: indexing arrays could "
"not be broadcast together with shapes "
""+" ".join([str(s) for s in key_shapes]))
if len(key_shapes) > self.ndim:
# More sublists/subtuples than dimensions in the array
raise IndexError("too many indices for array")
# If there are iterables in the key, or if the key is a tuple, then
# each key index corresponds to a separate data dimension (as per
# Numpy). Otherwise, such as when the key is a list of integers,
# each index corresponds only to the first data dimension.
key_remainder = None
if len(key_shapes) or isinstance(key, tuple):
key_remainder = tuple(key[1:])
key = key[0]
# Handle ellipsis
if key is Ellipsis:
key = slice(0, self.num_items)
if key_remainder is not None and len(key_remainder) < self.ndim-1:
key_remainder = (Ellipsis,)+key_remainder
# At this point the `key` is only for the first dimension and any keys
# for other dimensions that may have been passed are in key_remainder
if isinstance(key, (int, np.integer)):
item = self._get_element(key, key_remainder)
elif isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop if key.stop is not None else self.num_items
stop = min(stop, self.num_items)
step = key.step if key.step is not None else 1
item = self._get_block(range(start, stop, step), key_remainder)
elif hasattr(key, '__len__'):
item = self._get_block(key, key_remainder)
else:
raise IndexError("cannot index with {}".format(type(key)))
return item
def __len__(self):
return self.num_items
class multi_source_array(delayed_view):
"""
Given a list of sources, create an array-like interface that combines the
sources. This object allows slicing and iterating over the elements. Data
access automatically spans all data sources.
Indexing is numpy-style with the exeption of indexing using non-boolean
multi-dimensional arrays, as detailed in wrap.delayed_view.
source_list : list of sources to combine into one source
class_list : specifies class number for each source; same length as
source_list
shuffle : randomize data access order within and across all sources
maxlen : the maximum number of elements to take from each source; if
shuffle is False, a source is accessed as source[0:maxlen] and if
shuffle is True, a source is accessed as shuffle(source)[0:maxlen]
rng : numpy random number generator
"""
def __init__(self, source_list, class_list=None, shuffle=False,
maxlen=None, rng=None):
self.source_list = source_list
self.class_list = class_list
self.shuffle = shuffle
self.maxlen = maxlen
if self.maxlen == None:
self.maxlen = np.inf
self.num_items = 0
for source in source_list:
self.num_items += min(len(source), self.maxlen)
# Ensure that all the data sources contain elements of the same shape
# and data type
self.dtype = self.source_list[0].dtype
self.shape = None
for i, source in enumerate(source_list):
try:
shape = source.shape
except AttributeError:
shape = len(source)+np.shape(source[0])
if self.shape is None:
self.shape = (self.num_items,)+shape[1:]
if self.shape[1:]!=shape[1:]:
# In order, match all dimensions with the same shape, until
# a match is not found.
new_shape = self.shape
for i in range(1, max(min(len(self.shape), len(shape)), 1)):
if self.shape[1:i]==shape[1:i]:
new_shape = self.shape[:i]
self.shape = new_shape
if source.dtype != self.dtype:
self.dtype = None # Cannot determine dtype.
self.ndim = len(self.shape)
if rng is None:
rng = np.random.RandomState()
self.rng = rng
# Index the data sources
self.index_pairs = []
for i, source in enumerate(self.source_list):
source_indices = np.arange(len(source))
if self.shuffle:
self.rng.shuffle(source_indices)
source_indices = source_indices[:min(len(source), self.maxlen)]
for j in source_indices:
self.index_pairs.append((i, j))
if self.shuffle==True:
self.rng.shuffle(self.index_pairs)
def re_shuffle(self, random_seed=None):
rng = self.rng
if random_seed is not None:
rng = np.random.RandomState(random_seed)
rng.shuffle(self.index_pairs)
def get_labels(self):
labels = []
for p in self.index_pairs:
if not self.class_list:
labels.append(p[0])
else:
labels.append(self.class_list[ p[0] ])
return labels
def __iter__(self):
for source_num, idx in self.index_pairs:
yield self.source_list[source_num][idx]
def _get_element(self, int_key, key_remainder=None):
if not isinstance(int_key, (int, np.integer)):
raise IndexError("cannot index with {}".format(type(int_key)))
source_num, idx = self.index_pairs[int_key]
if key_remainder is not None:
idx = (idx,)+key_remainder
idx = int(idx) # Some libraries don't like np.integer
return self.source_list[source_num][idx]
| 42.754717
| 79
| 0.573169
|
2f1005e5bbb60856ce63b0028b44b070272aa4b6
| 524
|
py
|
Python
|
solution1.py
|
conburke83/pands-problem-set
|
78fca950a070f783a34037d560b3c61abb243498
|
[
"Apache-2.0"
] | null | null | null |
solution1.py
|
conburke83/pands-problem-set
|
78fca950a070f783a34037d560b3c61abb243498
|
[
"Apache-2.0"
] | null | null | null |
solution1.py
|
conburke83/pands-problem-set
|
78fca950a070f783a34037d560b3c61abb243498
|
[
"Apache-2.0"
] | null | null | null |
#CB 03/03/2019
#GMIT Data Analytics Programming & Scripting Module 2019
#Problem Sets
#Problem Set 1
#Setting up a user-defined variable 'i', and asking the user to enter a positive integer as 'i'
i = int(input("Please enter a positive integer: "))
#Setting up the 'total' variable, which will track the output value in the while-loop
total = 0
#Setting up the while loop that will accumulate the 'total' to be output
while i>0:
total = total + i
i = i - 1
#print the final output for the user to see
print (total)
| 34.933333
| 95
| 0.729008
|
d7adf175f3f05807c8f5cc933a03fbea882d8e3d
| 20,457
|
py
|
Python
|
python/cudf/cudf/tests/dataset_generator.py
|
sperlingxx/cudf
|
c681211df6253e1ceee9203658108980e7e93e3c
|
[
"Apache-2.0"
] | 1
|
2021-12-17T19:28:00.000Z
|
2021-12-17T19:28:00.000Z
|
python/cudf/cudf/tests/dataset_generator.py
|
sperlingxx/cudf
|
c681211df6253e1ceee9203658108980e7e93e3c
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/tests/dataset_generator.py
|
sperlingxx/cudf
|
c681211df6253e1ceee9203658108980e7e93e3c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# This module is for generating "synthetic" datasets. It was originally
# designed for testing filtered reading. Generally, it should be useful
# if you want to generate data where certain phenomena (e.g., cardinality)
# are exaggerated.
import copy
import random
import string
from multiprocessing import Pool
import mimesis
import numpy as np
import pandas as pd
import pyarrow as pa
from mimesis import Generic
from pyarrow import parquet as pq
import cudf
class ColumnParameters:
"""Parameters for generating column of data
Attributes
---
cardinality : int or None
Size of a random set of values that generated data is sampled from.
The values in the random set are derived from the given generator.
If cardinality is None, the Iterable returned by the given generator
is invoked for each value to be generated.
null_frequency : 0.1
Probability of a generated value being null
generator : Callable
Function for generating random data. It is passed a Mimesis Generic
provider and returns an Iterable that generates data.
is_sorted : bool
Sort this column. Columns are sorted in same order as ColumnParameters
instances stored in column_params of Parameters. If there are one or
more columns marked as sorted, the generated PyArrow Table will be
converted to a Pandas DataFrame to do the sorting. This may implicitly
convert numbers to floats in the presence of nulls.
dtype : optional
a numpy dtype to control the format of the data
"""
def __init__(
self,
cardinality=100,
null_frequency=0.1,
generator=lambda g: [g.address.country for _ in range(100)],
is_sorted=True,
dtype=None,
):
self.cardinality = cardinality
self.null_frequency = null_frequency
self.generator = generator
self.is_sorted = is_sorted
self.dtype = dtype
class Parameters:
"""Parameters for random dataset generation
Attributes
---
num_rows : int
Number of rows to generate
column_parameters : List[ColumnParams]
ColumnParams for each column
seed : int or None, default None
Seed for random data generation
"""
def __init__(
self, num_rows=2048, column_parameters=None, seed=None,
):
self.num_rows = num_rows
if column_parameters is None:
column_parameters = []
self.column_parameters = column_parameters
self.seed = seed
def _write(tbl, path, format):
if format["name"] == "parquet":
if isinstance(tbl, pa.Table):
pq.write_table(tbl, path, row_group_size=format["row_group_size"])
elif isinstance(tbl, pd.DataFrame):
tbl.to_parquet(path, row_group_size=format["row_group_size"])
def _generate_column(column_params, num_rows):
# If cardinality is specified, we create a set to sample from.
# Otherwise, we simply use the given generator to generate each value.
if column_params.cardinality is not None:
# Construct set of values to sample from where
# set size = cardinality
if (
isinstance(column_params.dtype, str)
and column_params.dtype == "category"
):
vals = pa.array(
column_params.generator,
size=column_params.cardinality,
safe=False,
)
return pa.DictionaryArray.from_arrays(
dictionary=vals,
indices=np.random.randint(
low=0, high=len(vals), size=num_rows
),
mask=np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
if column_params.null_frequency > 0.0
else None,
)
if hasattr(column_params.dtype, "to_arrow"):
arrow_type = column_params.dtype.to_arrow()
elif column_params.dtype is not None:
arrow_type = pa.from_numpy_dtype(column_params.dtype)
else:
arrow_type = None
if not isinstance(arrow_type, pa.lib.Decimal128Type):
vals = pa.array(
column_params.generator,
size=column_params.cardinality,
safe=False,
type=arrow_type,
)
vals = pa.array(
np.random.choice(column_params.generator, size=num_rows)
if isinstance(arrow_type, pa.lib.Decimal128Type)
else np.random.choice(vals, size=num_rows),
mask=np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
if column_params.null_frequency > 0.0
else None,
size=num_rows,
safe=False,
type=None
if isinstance(arrow_type, pa.lib.Decimal128Type)
else arrow_type,
)
if isinstance(arrow_type, pa.lib.Decimal128Type):
vals = vals.cast(arrow_type, safe=False)
return vals
else:
# Generate data for current column
return pa.array(
column_params.generator,
mask=np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
if column_params.null_frequency > 0.0
else None,
size=num_rows,
safe=False,
)
def generate(
path, parameters, format=None, use_threads=True,
):
"""
Generate dataset using given parameters and write to given format
Parameters
----------
path : str or file-like object
Path to write to
parameters : Parameters
Parameters specifying how to randomly generate data
format : Dict
Format to write
"""
if format is None:
format = {"name": "parquet", "row_group_size": 64}
df = get_dataframe(parameters, use_threads)
# Write
_write(df, path, format)
def get_dataframe(parameters, use_threads):
# Initialize seeds
if parameters.seed is not None:
np.random.seed(parameters.seed)
# For each column, use a generic Mimesis producer to create an Iterable
# for generating data
for i, column_params in enumerate(parameters.column_parameters):
if column_params.dtype is None:
column_params.generator = column_params.generator(
Generic("en", seed=parameters.seed)
)
else:
column_params.generator = column_params.generator()
# Get schema for each column
table_fields = []
for i, column_params in enumerate(parameters.column_parameters):
if (
isinstance(column_params.dtype, str)
and column_params.dtype == "category"
):
arrow_type = pa.dictionary(
index_type=pa.int64(),
value_type=pa.from_numpy_dtype(
type(next(iter(column_params.generator)))
),
)
elif hasattr(column_params.dtype, "to_arrow"):
arrow_type = column_params.dtype.to_arrow()
else:
arrow_type = pa.from_numpy_dtype(
type(next(iter(column_params.generator)))
if column_params.dtype is None
else column_params.dtype
)
table_fields.append(
pa.field(
name=str(i),
type=arrow_type,
nullable=column_params.null_frequency > 0,
)
)
schema = pa.schema(table_fields)
# Initialize column data and which columns should be sorted
column_data = [None] * len(parameters.column_parameters)
columns_to_sort = [
str(i)
for i, column_params in enumerate(parameters.column_parameters)
if column_params.is_sorted
]
# Generate data
if not use_threads:
for i, column_params in enumerate(parameters.column_parameters):
column_data[i] = _generate_column(
column_params, parameters.num_rows
)
else:
pool = Pool(pa.cpu_count())
column_data = pool.starmap(
_generate_column,
[
(column_params, parameters.num_rows)
for i, column_params in enumerate(parameters.column_parameters)
],
)
pool.close()
pool.join()
# Convert to Pandas DataFrame and sort columns appropriately
tbl = pa.Table.from_arrays(column_data, schema=schema,)
if columns_to_sort:
tbl = tbl.to_pandas()
tbl = tbl.sort_values(columns_to_sort)
tbl = pa.Table.from_pandas(tbl, schema)
return tbl
def rand_dataframe(dtypes_meta, rows, seed=random.randint(0, 2 ** 32 - 1)):
"""
Generates a random table.
Parameters
----------
dtypes_meta : List of dict
Specifies list of dtype meta data. dtype meta data should
be a dictionary of the form example:
{"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}
`"str"` dtype can contain an extra key `max_string_length` to
control the maximum size of the strings being generated in each row.
If not specified, it will default to 1000.
rows : int
Specifies the number of rows to be generated.
seed : int
Specifies the `seed` value to be utilized by all downstream
random data generation APIs.
Returns
-------
PyArrow Table
A Table with columns of corresponding dtypes mentioned in `dtypes_meta`
"""
# Apply seed
random.seed(seed)
np.random.seed(seed)
mimesis.random.random.seed(seed)
column_params = []
for meta in dtypes_meta:
dtype = copy.deepcopy(meta["dtype"])
null_frequency = copy.deepcopy(meta["null_frequency"])
cardinality = copy.deepcopy(meta["cardinality"])
if dtype == "list":
lists_max_length = meta["lists_max_length"]
nesting_max_depth = meta["nesting_max_depth"]
value_type = meta["value_type"]
nesting_depth = np.random.randint(1, nesting_max_depth)
dtype = cudf.core.dtypes.ListDtype(value_type)
# Determining the `dtype` from the `value_type`
# and the nesting_depth
i = 1
while i < nesting_depth:
dtype = cudf.core.dtypes.ListDtype(dtype)
i += 1
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=list_generator(
dtype=value_type,
size=cardinality,
nesting_depth=nesting_depth,
lists_max_length=lists_max_length,
),
is_sorted=False,
dtype=dtype,
)
)
elif dtype == "decimal64":
max_precision = meta.get(
"max_precision", cudf.Decimal64Dtype.MAX_PRECISION
)
precision = np.random.randint(1, max_precision)
scale = np.random.randint(0, precision)
dtype = cudf.Decimal64Dtype(precision=precision, scale=scale)
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=decimal_generator(dtype=dtype, size=cardinality),
is_sorted=False,
dtype=dtype,
)
)
elif dtype == "category":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=lambda cardinality=cardinality: [
mimesis.random.random.randstr(unique=True, length=2000)
for _ in range(cardinality)
],
is_sorted=False,
dtype="category",
)
)
else:
dtype = np.dtype(dtype)
if dtype.kind in ("i", "u"):
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=int_generator(dtype=dtype, size=cardinality),
is_sorted=False,
dtype=dtype,
)
)
elif dtype.kind == "f":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=float_generator(
dtype=dtype, size=cardinality
),
is_sorted=False,
dtype=dtype,
)
)
elif dtype.kind in ("U", "O"):
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=lambda cardinality=cardinality: [
mimesis.random.random.schoice(
string.printable,
meta.get("max_string_length", 1000),
)
for _ in range(cardinality)
],
is_sorted=False,
dtype=dtype,
)
)
elif dtype.kind == "M":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=datetime_generator(
dtype=dtype, size=cardinality
),
is_sorted=False,
dtype=np.dtype(dtype),
)
)
elif dtype.kind == "m":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=timedelta_generator(
dtype=dtype, size=cardinality
),
is_sorted=False,
dtype=np.dtype(dtype),
)
)
elif dtype.kind == "b":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=boolean_generator(cardinality),
is_sorted=False,
dtype=np.dtype(dtype),
)
)
else:
raise TypeError(f"Unsupported dtype: {dtype}")
# TODO: Add List column support once
# https://github.com/rapidsai/cudf/pull/6075
# is merged.
df = get_dataframe(
Parameters(num_rows=rows, column_parameters=column_params, seed=seed,),
use_threads=True,
)
return df
def int_generator(dtype, size):
"""
Generator for int data
"""
iinfo = np.iinfo(dtype)
return lambda: np.random.randint(
low=iinfo.min, high=iinfo.max, size=size, dtype=dtype,
)
def float_generator(dtype, size):
"""
Generator for float data
"""
finfo = np.finfo(dtype)
return (
lambda: np.random.uniform(
low=finfo.min / 2, high=finfo.max / 2, size=size,
)
* 2
)
def datetime_generator(dtype, size):
"""
Generator for datetime data
"""
iinfo = np.iinfo("int64")
return lambda: np.random.randint(
low=np.datetime64(iinfo.min + 1, "ns").astype(dtype).astype("int"),
high=np.datetime64(iinfo.max, "ns").astype(dtype).astype("int"),
size=size,
)
def timedelta_generator(dtype, size):
"""
Generator for timedelta data
"""
iinfo = np.iinfo("int64")
return lambda: np.random.randint(
low=np.timedelta64(iinfo.min + 1, "ns").astype(dtype).astype("int"),
high=np.timedelta64(iinfo.max, "ns").astype(dtype).astype("int"),
size=size,
)
def boolean_generator(size):
"""
Generator for bool data
"""
return lambda: np.random.choice(a=[False, True], size=size)
def decimal_generator(dtype, size):
max_integral = 10 ** (dtype.precision - dtype.scale) - 1
max_float = (10 ** dtype.scale - 1) if dtype.scale != 0 else 0
return lambda: (
np.random.uniform(
low=-max_integral,
high=max_integral + (max_float / 10 ** dtype.scale),
size=size,
)
)
def get_values_for_nested_data(dtype, lists_max_length):
"""
Returns list of values based on dtype.
"""
cardinality = np.random.randint(0, lists_max_length)
dtype = np.dtype(dtype)
if dtype.kind in ("i", "u"):
values = int_generator(dtype=dtype, size=cardinality)()
elif dtype.kind == "f":
values = float_generator(dtype=dtype, size=cardinality)()
elif dtype.kind in ("U", "O"):
values = [
mimesis.random.random.schoice(string.printable, 100,)
for _ in range(cardinality)
]
elif dtype.kind == "M":
values = datetime_generator(dtype=dtype, size=cardinality)().astype(
dtype
)
elif dtype.kind == "m":
values = timedelta_generator(dtype=dtype, size=cardinality)().astype(
dtype
)
elif dtype.kind == "b":
values = boolean_generator(cardinality)().astype(dtype)
else:
raise TypeError(f"Unsupported dtype: {dtype}")
# To ensure numpy arrays are not passed as input to
# list constructor, returning a python list object here.
if isinstance(values, np.ndarray):
return values.tolist()
else:
return values
def make_lists(dtype, lists_max_length, nesting_depth, top_level_list):
"""
Helper to create random list of lists with `nesting_depth` and
specified value type `dtype`.
"""
nesting_depth -= 1
if nesting_depth >= 0:
L = np.random.randint(1, lists_max_length)
for i in range(L):
top_level_list.append(
make_lists(
dtype=dtype,
lists_max_length=lists_max_length,
nesting_depth=nesting_depth,
top_level_list=[],
)
)
else:
top_level_list = get_values_for_nested_data(
dtype=dtype, lists_max_length=lists_max_length
)
return top_level_list
def get_nested_lists(dtype, size, nesting_depth, lists_max_length):
"""
Returns a list of nested lists with random nesting
depth and random nested lists length.
"""
list_of_lists = []
while len(list_of_lists) <= size:
list_of_lists.extend(
make_lists(
dtype=dtype,
lists_max_length=lists_max_length,
nesting_depth=nesting_depth,
top_level_list=[],
)
)
return list_of_lists
def list_generator(dtype, size, nesting_depth, lists_max_length):
"""
Generator for list data
"""
return lambda: get_nested_lists(
dtype=dtype,
size=size,
nesting_depth=nesting_depth,
lists_max_length=lists_max_length,
)
| 32.889068
| 79
| 0.554138
|
cab4428fe606a0370acce1319456a9d48e128e87
| 673
|
py
|
Python
|
Python_Scripts/ReadProcessMemory.py
|
AYIDouble/Python-Scripts
|
9c0864ec14bfb00bfd1d5382c3c092f7638a5e3b
|
[
"MIT"
] | 15
|
2018-09-12T09:58:57.000Z
|
2022-03-28T23:48:12.000Z
|
Python_Scripts/ReadProcessMemory.py
|
AYIDouble/Python-Scripts
|
9c0864ec14bfb00bfd1d5382c3c092f7638a5e3b
|
[
"MIT"
] | 4
|
2018-10-03T11:45:05.000Z
|
2019-01-03T18:15:52.000Z
|
Python_Scripts/ReadProcessMemory.py
|
AYIDouble/Python-Scripts
|
9c0864ec14bfb00bfd1d5382c3c092f7638a5e3b
|
[
"MIT"
] | 2
|
2020-05-08T14:27:59.000Z
|
2021-11-01T10:28:26.000Z
|
from ctypes import *
from ctypes.wintypes import *
PROCESS_ID = 9476 # From TaskManager for Notepad.exe
PROCESS_HEADER_ADDR = 0x7ff7b81e0000 # From SysInternals VMMap utility
# read from addresses
STRLEN = 255
PROCESS_VM_READ = 0x0010
k32 = WinDLL('kernel32')
k32.OpenProcess.argtypes = DWORD,BOOL,DWORD
k32.OpenProcess.restype = HANDLE
k32.ReadProcessMemory.argtypes = HANDLE,LPVOID,LPVOID,c_size_t,POINTER(c_size_t)
k32.ReadProcessMemory.restype = BOOL
process = k32.OpenProcess(PROCESS_VM_READ, 0, PROCESS_ID)
buf = create_string_buffer(STRLEN)
s = c_size_t()
if k32.ReadProcessMemory(process, PROCESS_HEADER_ADDR, buf, STRLEN, byref(s)):
print(s.value,buf.raw)
| 30.590909
| 80
| 0.794948
|
d661cd0088cf4702c9f22d2d9db19de33bea7c1d
| 11,728
|
py
|
Python
|
networkx/generators/tests/test_geometric.py
|
alubbock/networkx
|
95353eb81c996baf3d8d567de26d3f28f41542eb
|
[
"BSD-3-Clause"
] | 1
|
2021-10-09T18:13:09.000Z
|
2021-10-09T18:13:09.000Z
|
networkx/generators/tests/test_geometric.py
|
aganezov/networkx
|
0595dc7c7d33a94f9e46dbcb9cfce9ecd1a2329f
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/generators/tests/test_geometric.py
|
aganezov/networkx
|
0595dc7c7d33a94f9e46dbcb9cfce9ecd1a2329f
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import combinations
from math import sqrt
import random
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
import networkx as nx
from networkx.generators.geometric import euclidean
def l1dist(x, y):
return sum(abs(a - b) for a, b in zip(x, y))
class TestRandomGeometricGraph(object):
"""Unit tests for the :func:`~networkx.random_geometric_graph`
function.
"""
def test_number_of_nodes(self):
G = nx.random_geometric_graph(50, 0.25)
assert_equal(len(G), 50)
G = nx.random_geometric_graph(range(50), 0.25)
assert_equal(len(G), 50)
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if they are
within the prescribed radius.
"""
# Use the Euclidean metric, the default according to the
# documentation.
dist = euclidean
G = nx.random_geometric_graph(50, 0.25)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
# Nonadjacent vertices must be at greater distance.
else:
assert_false(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_p(self):
"""Tests for providing an alternate distance metric to the
generator.
"""
# Use the L1 metric.
dist = l1dist
G = nx.random_geometric_graph(50, 0.25, p=1)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
# Nonadjacent vertices must be at greater distance.
else:
assert_false(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_node_names(self):
"""Tests using values other than sequential numbers as node IDs.
"""
import string
nodes = list(string.ascii_lowercase)
G = nx.random_geometric_graph(nodes, 0.25)
assert_equal(len(G), len(nodes))
dist = euclidean
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
# Nonadjacent vertices must be at greater distance.
else:
assert_false(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
class TestSoftRandomGeometricGraph(object):
"""Unit tests for the :func:`~networkx.soft_random_geometric_graph`
function.
"""
def test_number_of_nodes(self):
G = nx.soft_random_geometric_graph(50, 0.25)
assert_equal(len(G), 50)
G = nx.soft_random_geometric_graph(range(50), 0.25)
assert_equal(len(G), 50)
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if they are
within the prescribed radius.
"""
# Use the Euclidean metric, the default according to the
# documentation.
dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y)))
G = nx.soft_random_geometric_graph(50, 0.25)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_p(self):
"""Tests for providing an alternate distance metric to the
generator.
"""
# Use the L1 metric.
dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y))
G = nx.soft_random_geometric_graph(50, 0.25, p=1)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_node_names(self):
"""Tests using values other than sequential numbers as node IDs.
"""
import string
nodes = list(string.ascii_lowercase)
G = nx.soft_random_geometric_graph(nodes, 0.25)
assert_equal(len(G), len(nodes))
dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y)))
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_p_dist_default(self):
"""Tests default p_dict = 0.5 returns graph with edge count <= RGG with
same n, radius, dim and positions
"""
nodes = 50
dim = 2
pos = {v: [random.random() for i in range(dim)] for v in range(nodes)}
RGG = nx.random_geometric_graph(50, 0.25,pos=pos)
SRGG = nx.soft_random_geometric_graph(50, 0.25,pos=pos)
assert_true(len(SRGG.edges()) <= len(RGG.edges()))
def test_p_dist_zero(self):
"""Tests if p_dict = 0 returns disconencted graph with 0 edges
"""
def p_dist(dist):
return 0
G = nx.soft_random_geometric_graph(50, 0.25, p_dist = p_dist)
assert_true(len(G.edges) == 0)
def join(G, u, v, theta, alpha, metric):
"""Returns ``True`` if and only if the nodes whose attributes are
``du`` and ``dv`` should be joined, according to the threshold
condition for geographical threshold graphs.
``G`` is an undirected NetworkX graph, and ``u`` and ``v`` are nodes
in that graph. The nodes must have node attributes ``'pos'`` and
``'weight'``.
``metric`` is a distance metric.
"""
du, dv = G.nodes[u], G.nodes[v]
u_pos, v_pos = du['pos'], dv['pos']
u_weight, v_weight = du['weight'], dv['weight']
return (u_weight + v_weight)*metric(u_pos, v_pos) ** alpha >= theta
class TestGeographicalThresholdGraph(object):
"""Unit tests for the :func:`~networkx.geographical_threshold_graph`
function.
"""
def test_number_of_nodes(self):
G = nx.geographical_threshold_graph(50, 100)
assert_equal(len(G), 50)
G = nx.geographical_threshold_graph(range(50), 100)
assert_equal(len(G), 50)
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if their
distances meet the given threshold.
"""
# Use the Euclidean metric and alpha = -2
# the default according to the documentation.
dist = euclidean
G = nx.geographical_threshold_graph(50, 10)
for u, v in combinations(G, 2):
# Adjacent vertices must exceed the threshold.
if v in G[u]:
assert_true(join(G, u, v, 10, -2, dist))
# Nonadjacent vertices must not exceed the threshold.
else:
assert_false(join(G, u, v, 10, -2, dist))
def test_metric(self):
"""Tests for providing an alternate distance metric to the
generator.
"""
# Use the L1 metric.
dist = l1dist
G = nx.geographical_threshold_graph(50, 10, metric=dist)
for u, v in combinations(G, 2):
# Adjacent vertices must exceed the threshold.
if v in G[u]:
assert_true(join(G, u, v, 10, -2, dist))
# Nonadjacent vertices must not exceed the threshold.
else:
assert_false(join(G, u, v, 10, -2, dist))
def test_p_dist_zero(self):
"""Tests if p_dict = 0 returns disconencted graph with 0 edges
"""
def p_dist(dist):
return 0
G = nx.geographical_threshold_graph(50, 1, p_dist=p_dist)
assert_true(len(G.edges) == 0)
class TestWaxmanGraph(object):
"""Unit tests for the :func:`~networkx.waxman_graph` function."""
def test_number_of_nodes_1(self):
G = nx.waxman_graph(50, 0.5, 0.1)
assert_equal(len(G), 50)
G = nx.waxman_graph(range(50), 0.5, 0.1)
assert_equal(len(G), 50)
def test_number_of_nodes_2(self):
G = nx.waxman_graph(50, 0.5, 0.1, L=1)
assert_equal(len(G), 50)
G = nx.waxman_graph(range(50), 0.5, 0.1, L=1)
assert_equal(len(G), 50)
def test_metric(self):
"""Tests for providing an alternate distance metric to the
generator.
"""
# Use the L1 metric.
dist = l1dist
G = nx.waxman_graph(50, 0.5, 0.1, metric=dist)
assert_equal(len(G), 50)
class TestNavigableSmallWorldGraph(object):
def test_navigable_small_world(self):
G = nx.navigable_small_world_graph(5, p=1, q=0)
gg = nx.grid_2d_graph(5, 5).to_directed()
assert_true(nx.is_isomorphic(G, gg))
G = nx.navigable_small_world_graph(5, p=1, q=0, dim=3)
gg = nx.grid_graph([5, 5, 5]).to_directed()
assert_true(nx.is_isomorphic(G, gg))
G = nx.navigable_small_world_graph(5, p=1, q=0, dim=1)
gg = nx.grid_graph([5]).to_directed()
assert_true(nx.is_isomorphic(G, gg))
class TestThresholdedRandomGeometricGraph(object):
"""Unit tests for the :func:`~networkx.thresholded_random_geometric_graph`
function.
"""
def test_number_of_nodes(self):
G = nx.thresholded_random_geometric_graph(50, 0.2, 0.1)
assert_equal(len(G), 50)
G = nx.thresholded_random_geometric_graph(range(50), 0.2, 0.1)
assert_equal(len(G), 50)
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if they are
within the prescribed radius.
"""
# Use the Euclidean metric, the default according to the
# documentation.
dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y)))
G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_p(self):
"""Tests for providing an alternate distance metric to the
generator.
"""
# Use the L1 metric.
dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y))
G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, p=1)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_node_names(self):
"""Tests using values other than sequential numbers as node IDs.
"""
import string
nodes = list(string.ascii_lowercase)
G = nx.thresholded_random_geometric_graph(nodes, 0.25, 0.1)
assert_equal(len(G), len(nodes))
dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y)))
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true(dist(G.nodes[u]['pos'], G.nodes[v]['pos']) <= 0.25)
def test_theta(self):
"""Tests that pairs of vertices adjacent if and only if their sum
weights exceeds the threshold parameter theta.
"""
G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert_true((G.nodes[u]['weight'] + G.nodes[v]['weight']) >= 0.1)
| 34.801187
| 81
| 0.590041
|
cf231a3f42081237f270c1cddf82de720b8ae621
| 6,845
|
py
|
Python
|
tests/unit/test_faultydata.py
|
ljishen/server
|
b641df7de19afb67df28d70c9b64b4faa3c56b23
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_faultydata.py
|
ljishen/server
|
b641df7de19afb67df28d70c9b64b4faa3c56b23
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_faultydata.py
|
ljishen/server
|
b641df7de19afb67df28d70c9b64b4faa3c56b23
|
[
"Apache-2.0"
] | null | null | null |
"""
Unit tests for faulty data sets.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
import ga4gh.datamodel.datasets as datasets
import ga4gh.datamodel.references as references
import ga4gh.datamodel.variants as variants
import ga4gh.exceptions as exceptions
import ga4gh.protocol as protocol
import ga4gh.backend as backend
import ga4gh.datarepo as datarepo
class FaultyVariantDataTest(unittest.TestCase):
"""
Superclass of faulty variant data tests.
"""
def setUp(self):
self.testDataDir = "tests/faultydata/variants"
self.dataset = datasets.AbstractDataset('dataset1')
def getFullPath(self, localId):
return os.path.join(self.testDataDir, localId)
class TestVariantSetNoIndexedVcf(FaultyVariantDataTest):
localIds = ["no_indexed_vcf"]
def testInstantiation(self):
for localId in self.localIds:
path = self.getFullPath(localId)
self.assertRaises(
exceptions.NotIndexedException,
variants.HtslibVariantSet, self.dataset, localId, path,
None)
class TestInconsistentMetaData(FaultyVariantDataTest):
localIds = ["inconsist_meta"]
def testInstantiation(self):
for localId in self.localIds:
path = self.getFullPath(localId)
variantSet = variants.HtslibVariantSet(
self.dataset, localId, path, None)
with self.assertRaises(exceptions.InconsistentMetaDataException):
variantSet.checkConsistency()
class TestInconsistentCallSetId(FaultyVariantDataTest):
localIds = ["inconsist_sampleid", "inconsist_sampleid2"]
def testInstantiation(self):
for localId in self.localIds:
path = self.getFullPath(localId)
variantSet = variants.HtslibVariantSet(
self.dataset, localId, path, None)
with self.assertRaises(exceptions.InconsistentCallSetIdException):
variantSet.checkConsistency()
class TestOverlappingVcfVariants(FaultyVariantDataTest):
localIds = ["overlapping_vcf"]
def testInstantiation(self):
for localId in self.localIds:
path = self.getFullPath(localId)
with self.assertRaises(exceptions.OverlappingVcfException):
variants.HtslibVariantSet(self.dataset, localId, path, None)
class TestEmptyDirException(FaultyVariantDataTest):
localIds = ["empty_dir"]
def testInstantiation(self):
for localId in self.localIds:
path = self.getFullPath(localId)
self.assertRaises(
exceptions.EmptyDirException,
variants.HtslibVariantSet, self.dataset, localId, path, None)
class TestDuplicateCallSetId(FaultyVariantDataTest):
"""
THIS SECTION IS CURRENTLY NOT WORKING
It returns the following error:
[E::bcf_hdr_add_sample] Duplicated sample name 'S1'
Aborted (core dumped)
which is coming from:
htslib/vcf.c function bcf_hdr_add_sample
UNABLE TO CAPTURE EXCEPTION
"""
localIds = ["duplicated_sampleid"]
@unittest.skipIf(protocol.version.startswith("0.6"), "")
def testInstantiation(self):
for localId in self.localIds:
path = self.getFullPath(localId)
self.assertRaises(
exceptions.DuplicateCallSetIdException,
variants.HtslibVariantSet, self.dataset, localId, path,
None)
class FaultyReferenceDataTest(unittest.TestCase):
"""
Superclass of faulty reference data tests
"""
def getFullPath(self, localId):
testDataDir = "tests/faultydata/references"
return os.path.join(testDataDir, localId)
class TestTwoReferences(FaultyReferenceDataTest):
"""
Tests for FASTA files with more than one reference.
"""
def testInstantiation(self):
localId = "two_references"
path = self.getFullPath(localId)
self.assertRaises(
exceptions.NotExactlyOneReferenceException,
references.HtslibReferenceSet, localId, path, None)
class TestInconsistentReferenceName(FaultyReferenceDataTest):
"""
Tests the case in which we have a reference file with a different
name to the ID in the fasta file.
"""
def testInstantiation(self):
localId = "inconsistent_reference_name"
path = self.getFullPath(localId)
self.assertRaises(
exceptions.InconsistentReferenceNameException,
references.HtslibReferenceSet, localId, path, None)
class FaultyReferenceSetDataTest(unittest.TestCase):
"""
Superclass of faulty reference set data tests
"""
def getFullPath(self, localId):
testDataDir = "tests/faultydata/references"
return os.path.join(testDataDir, localId)
class TestNoReferenceSetMetadata(FaultyReferenceSetDataTest):
"""
Tests an error is thrown with a missing reference set metadata file
"""
def testNoReferenceSetMetadata(self):
localId = "no_refset_meta"
path = self.getFullPath(localId)
with self.assertRaises(IOError):
references.HtslibReferenceSet(localId, path, None)
class TestMissingReferenceSetMetadata(FaultyReferenceSetDataTest):
"""
Tests an error is thrown with a reference set metadata file that
is missing entries
"""
def testMissingReferenceSetMetadata(self):
localId = "missing_refset_meta"
path = self.getFullPath(localId)
with self.assertRaises(exceptions.MissingReferenceSetMetadata):
references.HtslibReferenceSet(localId, path, None)
class TestInvalidReferenceSetMetadata(FaultyReferenceSetDataTest):
"""
Tests an error is thrown with a reference set metadata file that
can not be parsed
"""
def testMissingReferenceSetMetadata(self):
localId = "invalid_refset_meta"
path = self.getFullPath(localId)
with self.assertRaises(ValueError):
references.HtslibReferenceSet(localId, path, None)
class FaultyDatasetTest(unittest.TestCase):
"""
Superclass of faulty dataset tests.
"""
def setUp(self):
self.testDataDir = "tests/faultydata/datasets"
def getFullPath(self, localId):
return os.path.join(self.testDataDir, localId)
class TestBadDatasetMetadata(FaultyDatasetTest):
"""
Tests that we raise an expcetion if the metadata is not correct.
"""
def testBadReferenceDatasetMetadata(self):
localId = "bad_metadata"
path = self.getFullPath(localId)
localBackend = backend.Backend(datarepo.EmptyDataRepository())
with self.assertRaises(exceptions.MissingDatasetMetadataException):
datasets.FileSystemDataset(localId, path, localBackend)
| 31.689815
| 78
| 0.696275
|
1b01d47120a44acff1d17ce744a4a78aa8af8a94
| 438
|
py
|
Python
|
app/machines/context_processors.py
|
jayfk/machinery
|
b654a3cffb49809e6df705e7130dae9bbdb5cad5
|
[
"Apache-2.0"
] | 15
|
2015-05-22T14:04:53.000Z
|
2021-01-04T04:32:08.000Z
|
app/machines/context_processors.py
|
jayfk/machinery
|
b654a3cffb49809e6df705e7130dae9bbdb5cad5
|
[
"Apache-2.0"
] | 1
|
2016-04-05T10:10:56.000Z
|
2016-04-05T10:10:56.000Z
|
app/machines/context_processors.py
|
jayfk/machinery
|
b654a3cffb49809e6df705e7130dae9bbdb5cad5
|
[
"Apache-2.0"
] | 7
|
2015-08-31T08:35:52.000Z
|
2018-02-19T02:07:24.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.core.urlresolvers import reverse
from .core import machines_ls
def machine_cache_context_processor(request):
"""
Basic context processor to populate the context with a cached list of machines.
"""
return {"machines": machines_ls(cached=True),
"sidebar_url": reverse("machines:list-sidebar-partial")}
| 36.5
| 83
| 0.742009
|
55458262fdd68f856e10cd80621eeedde7d36f1e
| 1,027
|
py
|
Python
|
ionoscloud/api/__init__.py
|
ionos-cloud/sdk-python
|
bb22b5b93505b25de6aebae97c523a6c2242ec2e
|
[
"Apache-2.0"
] | null | null | null |
ionoscloud/api/__init__.py
|
ionos-cloud/sdk-python
|
bb22b5b93505b25de6aebae97c523a6c2242ec2e
|
[
"Apache-2.0"
] | 6
|
2021-11-26T16:18:51.000Z
|
2022-02-18T10:08:49.000Z
|
ionoscloud/api/__init__.py
|
ionos-cloud/sdk-python
|
bb22b5b93505b25de6aebae97c523a6c2242ec2e
|
[
"Apache-2.0"
] | 1
|
2021-04-20T09:29:17.000Z
|
2021-04-20T09:29:17.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from ionoscloud.api.backup_unit_api import BackupUnitApi
from ionoscloud.api.contract_api import ContractApi
from ionoscloud.api.data_center_api import DataCenterApi
from ionoscloud.api.ip_blocks_api import IPBlocksApi
from ionoscloud.api.image_api import ImageApi
from ionoscloud.api.kubernetes_api import KubernetesApi
from ionoscloud.api.label_api import LabelApi
from ionoscloud.api.lan_api import LanApi
from ionoscloud.api.load_balancer_api import LoadBalancerApi
from ionoscloud.api.location_api import LocationApi
from ionoscloud.api.nic_api import NicApi
from ionoscloud.api.private_cross_connect_api import PrivateCrossConnectApi
from ionoscloud.api.request_api import RequestApi
from ionoscloud.api.server_api import ServerApi
from ionoscloud.api.snapshot_api import SnapshotApi
from ionoscloud.api.user_management_api import UserManagementApi
from ionoscloud.api.volume_api import VolumeApi
from ionoscloud.api.__api import Api
| 42.791667
| 75
| 0.877313
|
b7dc53c62ad44e00235364d5e92ebb0ea69ae909
| 440
|
py
|
Python
|
project_sf_crime_data_project/tests.py
|
seoruosa/streaming-data-nanodegree
|
14961bc42c626e74ac23cc94f69e25eab39a2da1
|
[
"MIT"
] | null | null | null |
project_sf_crime_data_project/tests.py
|
seoruosa/streaming-data-nanodegree
|
14961bc42c626e74ac23cc94f69e25eab39a2da1
|
[
"MIT"
] | null | null | null |
project_sf_crime_data_project/tests.py
|
seoruosa/streaming-data-nanodegree
|
14961bc42c626e74ac23cc94f69e25eab39a2da1
|
[
"MIT"
] | null | null | null |
import json
FILEPATH = 'police-department-calls-for-service.json'
def main():
# https://stackoverflow.com/questions/34010778/python-read-in-an-array-of-json-objects-using-json-loads
with open(FILEPATH, 'r') as f:
json_data = json.loads(f.read())
i = 0
for line in json_data:
print(line)
i += 1
if i>10:
return
print(json_data[0])
if __name__ == '__main__':
main()
| 22
| 107
| 0.604545
|
12289fb4dcc6bbea29f6e2a1bdb4b93f5e02899a
| 54,464
|
py
|
Python
|
safe_transaction_service/history/models.py
|
gnosis/safe-transaction-service
|
e153a80a36adcc471664aa2e11892c72f7eca911
|
[
"MIT"
] | 67
|
2019-08-16T16:26:42.000Z
|
2022-03-21T20:32:43.000Z
|
safe_transaction_service/history/models.py
|
gnosis/safe-transaction-service
|
e153a80a36adcc471664aa2e11892c72f7eca911
|
[
"MIT"
] | 550
|
2019-07-11T12:09:06.000Z
|
2022-03-31T16:32:00.000Z
|
safe_transaction_service/history/models.py
|
gnosis/safe-transaction-service
|
e153a80a36adcc471664aa2e11892c72f7eca911
|
[
"MIT"
] | 83
|
2019-12-06T11:22:32.000Z
|
2022-03-30T10:09:22.000Z
|
import datetime
from enum import Enum
from itertools import islice
from logging import getLogger
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Type, TypedDict
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.indexes import GinIndex
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, models
from django.db.models import Case, Count, Index, JSONField, Max, Q, QuerySet, Sum
from django.db.models.expressions import F, OuterRef, RawSQL, Subquery, Value, When
from django.db.models.functions import Coalesce
from django.db.models.signals import post_save
from django.utils.translation import gettext_lazy as _
from eth_typing import ChecksumAddress
from hexbytes import HexBytes
from model_utils.models import TimeStampedModel
from packaging.version import Version
from gnosis.eth.constants import ERC20_721_TRANSFER_TOPIC
from gnosis.eth.django.models import (
EthereumAddressField,
HexField,
Sha3HashField,
Uint256Field,
)
from gnosis.safe import SafeOperation
from gnosis.safe.safe_signature import SafeSignature, SafeSignatureType
from safe_transaction_service.contracts.models import Contract
from .utils import clean_receipt_log
logger = getLogger(__name__)
class ConfirmationType(Enum):
CONFIRMATION = 0
EXECUTION = 1
class EthereumTxCallType(Enum):
# https://ethereum.stackexchange.com/questions/63743/whats-the-difference-between-type-and-calltype-in-parity-trace
CALL = 0
DELEGATE_CALL = 1
CALL_CODE = 2
STATIC_CALL = 3
@staticmethod
def parse_call_type(call_type: Optional[str]):
if not call_type:
return None
call_type = call_type.lower()
if call_type == "call":
return EthereumTxCallType.CALL
elif call_type == "delegatecall":
return EthereumTxCallType.DELEGATE_CALL
elif call_type == "callcode":
return EthereumTxCallType.CALL_CODE
elif call_type == "staticcall":
return EthereumTxCallType.STATIC_CALL
else:
return None
class InternalTxType(Enum):
CALL = 0
CREATE = 1
SELF_DESTRUCT = 2
REWARD = 3
@staticmethod
def parse(tx_type: str):
tx_type = tx_type.upper()
if tx_type == "CALL":
return InternalTxType.CALL
elif tx_type == "CREATE":
return InternalTxType.CREATE
elif tx_type in ("SUICIDE", "SELFDESTRUCT"):
return InternalTxType.SELF_DESTRUCT
elif tx_type == "REWARD":
return InternalTxType.REWARD
else:
raise ValueError(f"{tx_type} is not a valid InternalTxType")
class TransferDict(TypedDict):
block_number: int
transaction_hash: HexBytes
to: str
_from: str
value: int
execution_date: datetime.datetime
token_id: int
token_address: str
class BulkCreateSignalMixin:
def bulk_create(
self, objs, batch_size: Optional[int] = None, ignore_conflicts: bool = False
):
objs = list(objs) # If not it won't be iterate later
result = super().bulk_create(
objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts
)
for obj in objs:
post_save.send(obj.__class__, instance=obj, created=True)
return result
def bulk_create_from_generator(
self, objs, batch_size: int = 100, ignore_conflicts: bool = False
) -> int:
"""
Implementation in Django is not ok, as it will do `objs = list(objs)`. If objects come from a generator
they will be brought to RAM. This approach is more friendly
:return: Count of inserted elements
"""
assert batch_size is not None and batch_size > 0
total = 0
while True:
if inserted := len(
self.bulk_create(
islice(objs, batch_size), ignore_conflicts=ignore_conflicts
)
):
total += inserted
else:
return total
class EthereumBlockManager(models.Manager):
def get_or_create_from_block(self, block: Dict[str, Any], confirmed: bool = False):
try:
return self.get(number=block["number"])
except self.model.DoesNotExist:
return self.create_from_block(block, confirmed=confirmed)
def create_from_block(
self, block: Dict[str, Any], confirmed: bool = False
) -> "EthereumBlock":
"""
:param block: Block Dict returned by Web3
:param confirmed: If True we will not check for reorgs in the future
:return: EthereumBlock model
"""
try:
return super().create(
number=block["number"],
gas_limit=block["gasLimit"],
gas_used=block["gasUsed"],
timestamp=datetime.datetime.fromtimestamp(
block["timestamp"], datetime.timezone.utc
),
block_hash=block["hash"],
parent_hash=block["parentHash"],
confirmed=confirmed,
)
except IntegrityError:
# The block could be created in the meantime by other task while the block was fetched from blockchain
return self.get(number=block["number"])
class EthereumBlockQuerySet(models.QuerySet):
def not_confirmed(self, to_block_number: Optional[int] = None):
"""
:param to_block_number:
:return: Block not confirmed until ``to_block_number``, if provided
"""
queryset = self.filter(confirmed=False)
if to_block_number is not None:
queryset = queryset.filter(number__lte=to_block_number)
return queryset.order_by("number")
class EthereumBlock(models.Model):
objects = EthereumBlockManager.from_queryset(EthereumBlockQuerySet)()
number = models.PositiveIntegerField(primary_key=True)
gas_limit = models.PositiveIntegerField()
gas_used = models.PositiveIntegerField()
timestamp = models.DateTimeField()
block_hash = Sha3HashField(unique=True)
parent_hash = Sha3HashField(unique=True)
# For reorgs, True if `current_block_number` - `number` >= MIN_CONFIRMATIONS
confirmed = models.BooleanField(default=False, db_index=True)
def __str__(self):
return f"Block number={self.number} on {self.timestamp}"
def _set_confirmed(self, confirmed: bool):
if self.confirmed != confirmed:
self.confirmed = confirmed
self.save(update_fields=["confirmed"])
def set_confirmed(self):
return self._set_confirmed(True)
def set_not_confirmed(self):
return self._set_confirmed(False)
class EthereumTxManager(models.Manager):
def create_from_tx_dict(
self,
tx: Dict[str, Any],
tx_receipt: Optional[Dict[str, Any]] = None,
ethereum_block: Optional[EthereumBlock] = None,
) -> "EthereumTx":
data = HexBytes(tx.get("data") or tx.get("input"))
# Supporting EIP1559
if "gasPrice" in tx:
gas_price = tx["gasPrice"]
else:
assert tx_receipt, f"Tx-receipt is required for EIP1559 tx {tx}"
gas_price = tx_receipt.get("effectiveGasPrice")
assert gas_price is not None, f"Gas price for tx {tx} cannot be None"
gas_price = int(gas_price, 0)
return super().create(
block=ethereum_block,
tx_hash=HexBytes(tx["hash"]).hex(),
_from=tx["from"],
gas=tx["gas"],
gas_price=gas_price,
gas_used=tx_receipt and tx_receipt["gasUsed"],
logs=tx_receipt
and [clean_receipt_log(log) for log in tx_receipt.get("logs", list())],
status=tx_receipt and tx_receipt.get("status"),
transaction_index=tx_receipt and tx_receipt["transactionIndex"],
data=data if data else None,
nonce=tx["nonce"],
to=tx.get("to"),
value=tx["value"],
)
class EthereumTx(TimeStampedModel):
objects = EthereumTxManager()
block = models.ForeignKey(
EthereumBlock,
on_delete=models.CASCADE,
null=True,
default=None,
related_name="txs",
) # If mined
tx_hash = Sha3HashField(primary_key=True)
gas_used = Uint256Field(null=True, default=None) # If mined
status = models.IntegerField(
null=True, default=None, db_index=True
) # If mined. Old txs don't have `status`
logs = ArrayField(JSONField(), null=True, default=None) # If mined
transaction_index = models.PositiveIntegerField(null=True, default=None) # If mined
_from = EthereumAddressField(null=True, db_index=True)
gas = Uint256Field()
gas_price = Uint256Field()
data = models.BinaryField(null=True)
nonce = Uint256Field()
to = EthereumAddressField(null=True, db_index=True)
value = Uint256Field()
def __str__(self):
return "{} status={} from={} to={}".format(
self.tx_hash, self.status, self._from, self.to
)
@property
def execution_date(self) -> Optional[datetime.datetime]:
if self.block_id is not None:
return self.block.timestamp
return None
@property
def success(self) -> Optional[bool]:
if self.status is not None:
return self.status == 1
def update_with_block_and_receipt(
self, ethereum_block: "EthereumBlock", tx_receipt: Dict[str, Any]
):
if self.block is None:
self.block = ethereum_block
self.gas_used = tx_receipt["gasUsed"]
self.logs = [
clean_receipt_log(log) for log in tx_receipt.get("logs", list())
]
self.status = tx_receipt.get("status")
self.transaction_index = tx_receipt["transactionIndex"]
return self.save(
update_fields=[
"block",
"gas_used",
"logs",
"status",
"transaction_index",
]
)
class EthereumEventQuerySet(models.QuerySet):
def not_erc_20_721_events(self):
return self.exclude(topic=ERC20_721_TRANSFER_TOPIC)
def erc20_and_721_events(
self, token_address: Optional[str] = None, address: Optional[str] = None
):
queryset = self.filter(topic=ERC20_721_TRANSFER_TOPIC)
if token_address:
queryset = queryset.filter(address=token_address)
if address:
queryset = queryset.filter(
Q(arguments__to=address) | Q(arguments__from=address)
)
return queryset
def erc20_events(
self, token_address: Optional[str] = None, address: Optional[str] = None
):
return self.erc20_and_721_events(
token_address=token_address, address=address
).filter(arguments__has_key="value")
def erc721_events(
self, token_address: Optional[str] = None, address: Optional[str] = None
):
# TODO Sql with tokens registered as NFT
return self.erc20_and_721_events(
token_address=token_address, address=address
).filter(arguments__has_key="tokenId")
def erc721_owned_by(self, address: str) -> List[Tuple[str, int]]:
"""
Returns erc721 owned by address, removing the ones sent
:return: List of tuples(token_address: str, token_id: int)
"""
# Get all the token history
erc721_events = self.erc721_events(address=address)
# Get tokens received and remove tokens transferred
tokens_in: Tuple[str, int] = []
tokens_out: Tuple[str, int] = []
for erc721_event in erc721_events:
token_address = erc721_event.address
token_id = erc721_event.arguments.get("tokenId")
if token_id is None:
logger.error(
"TokenId for ERC721 info token=%s with owner=%s can never be None",
token_address,
address,
)
continue
if erc721_event.arguments.get("to") == address:
list_to_append = tokens_in
else:
list_to_append = tokens_out
list_to_append.append((token_address, token_id))
for token_out in tokens_out: # Remove tokens sent from list
if token_out in tokens_in:
tokens_in.remove(token_out)
return tokens_in
class EthereumEventManager(BulkCreateSignalMixin, models.Manager):
def from_decoded_event(self, decoded_event: Dict[str, Any]) -> "EthereumEvent":
"""
Does not create the model. Requires that `ethereum_tx` exists
:param decoded_event:
:return: `EthereumEvent` instance (not stored in database)
"""
return EthereumEvent(
ethereum_tx_id=decoded_event["transactionHash"],
log_index=decoded_event["logIndex"],
address=decoded_event["address"],
topic=decoded_event["topics"][0],
topics=decoded_event["topics"],
arguments=decoded_event["args"],
)
def erc20_tokens_used_by_address(
self, address: ChecksumAddress
) -> Set[ChecksumAddress]:
"""
:param address:
:return: List of token addresses used by an address
"""
# return self.erc20_events(address=address).values_list('address', flat=True).distinct()
address_as_postgres_text = f'"{address}"'
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT "address" FROM "history_ethereumevent" WHERE
("topic" = %s
AND (("arguments" -> 'to')::text = %s
OR ("arguments" -> 'from')::text = %s)
AND "arguments" ? 'value')
""",
[
ERC20_721_TRANSFER_TOPIC[2:],
address_as_postgres_text,
address_as_postgres_text,
],
)
return {row[0] for row in cursor.fetchall()}
def erc721_tokens_used_by_address(
self, address: ChecksumAddress
) -> Set[ChecksumAddress]:
"""
:param address:
:return: List of token addresses used by an address
"""
# return self.erc721_events(address=address).values_list('address', flat=True).distinct()
address_as_postgres_text = f'"{address}"'
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT "id", "address" FROM "history_ethereumevent" WHERE
("topic" = '%s'
AND (("arguments" -> 'to')::text = '"%s"'
OR ("arguments" -> 'from')::text = '"%s"')
AND "arguments" ? 'tokenId')
""",
[
ERC20_721_TRANSFER_TOPIC[2:],
address_as_postgres_text,
address_as_postgres_text,
],
)
return {row[0] for row in cursor.fetchall()}
def erc20_tokens_with_balance(
self, address: ChecksumAddress
) -> List[Dict[str, Any]]:
"""
:return: List of dictionaries {'token_address': str, 'balance': int}
"""
arguments_value_field = RawSQL("(arguments->>'value')::numeric", ())
return (
self.erc20_events(address=address)
.values("address")
.annotate(
balance=Sum(
Case(
When(arguments__from=address, then=-arguments_value_field),
default=arguments_value_field,
)
)
)
.order_by("-balance")
.values("address", "balance")
)
def get_or_create_erc20_or_721_event(self, decoded_event: Dict[str, Any]):
if (
"value" not in decoded_event["args"]
or "tokenId" not in decoded_event["args"]
):
raise ValueError("Invalid ERC20 or ERC721 event %s" % decoded_event)
else:
return self.get_or_create(
ethereum_tx_id=decoded_event["transactionHash"],
log_index=decoded_event["logIndex"],
defaults={
"address": decoded_event["address"],
"topic": decoded_event["topics"][0],
"topics": decoded_event["topics"],
"arguments": decoded_event["args"],
},
)
class EthereumEvent(models.Model):
objects = EthereumEventManager.from_queryset(EthereumEventQuerySet)()
ethereum_tx = models.ForeignKey(
EthereumTx, on_delete=models.CASCADE, related_name="events"
)
log_index = models.PositiveIntegerField()
address = EthereumAddressField(db_index=True)
topic = Sha3HashField(db_index=True)
topics = ArrayField(Sha3HashField())
arguments = JSONField()
class Meta:
indexes = [GinIndex(fields=["arguments"])]
unique_together = (("ethereum_tx", "log_index"),)
# There are also 2 indexes created manually by 0026 migration, both Btree for arguments->to and arguments->from
# To use that indexes json queries must be rewritten to use `::text` fields
def __str__(self):
return f"Tx-hash={self.ethereum_tx_id} Log-index={self.log_index} Topic={self.topic} Arguments={self.arguments}"
@property
def created(self):
return self.ethereum_tx.block.timestamp
def is_erc20(self) -> bool:
return (
self.topic == ERC20_721_TRANSFER_TOPIC
and "value" in self.arguments
and "to" in self.arguments
)
def is_erc721(self) -> bool:
return (
self.topic == ERC20_721_TRANSFER_TOPIC
and "tokenId" in self.arguments
and "to" in self.arguments
)
class InternalTxManager(BulkCreateSignalMixin, models.Manager):
def _trace_address_to_str(self, trace_address: Sequence[int]) -> str:
return ",".join([str(address) for address in trace_address])
def build_from_trace(
self, trace: Dict[str, Any], ethereum_tx: EthereumTx
) -> "InternalTx":
"""
Build a InternalTx object from trace, but it doesn't insert it on database
:param trace:
:param ethereum_tx:
:return: InternalTx not inserted
"""
data = trace["action"].get("input") or trace["action"].get("init")
tx_type = InternalTxType.parse(trace["type"])
call_type = EthereumTxCallType.parse_call_type(trace["action"].get("callType"))
trace_address_str = self._trace_address_to_str(trace["traceAddress"])
return InternalTx(
ethereum_tx=ethereum_tx,
trace_address=trace_address_str,
_from=trace["action"].get("from"),
gas=trace["action"].get("gas", 0),
data=data if data else None,
to=trace["action"].get("to") or trace["action"].get("address"),
value=trace["action"].get("value") or trace["action"].get("balance", 0),
gas_used=(trace.get("result") or {}).get("gasUsed", 0),
contract_address=(trace.get("result") or {}).get("address"),
code=(trace.get("result") or {}).get("code"),
output=(trace.get("result") or {}).get("output"),
refund_address=trace["action"].get("refundAddress"),
tx_type=tx_type.value,
call_type=call_type.value if call_type else None,
error=trace.get("error"),
)
def get_or_create_from_trace(
self, trace: Dict[str, Any], ethereum_tx: EthereumTx
) -> Tuple["InternalTx", bool]:
tx_type = InternalTxType.parse(trace["type"])
call_type = EthereumTxCallType.parse_call_type(trace["action"].get("callType"))
trace_address_str = self._trace_address_to_str(trace["traceAddress"])
return self.get_or_create(
ethereum_tx=ethereum_tx,
trace_address=trace_address_str,
defaults={
"_from": trace["action"].get("from"),
"gas": trace["action"].get("gas", 0),
"data": trace["action"].get("input") or trace["action"].get("init"),
"to": trace["action"].get("to") or trace["action"].get("address"),
"value": trace["action"].get("value")
or trace["action"].get("balance", 0),
"gas_used": (trace.get("result") or {}).get("gasUsed", 0),
"contract_address": (trace.get("result") or {}).get("address"),
"code": (trace.get("result") or {}).get("code"),
"output": (trace.get("result") or {}).get("output"),
"refund_address": trace["action"].get("refundAddress"),
"tx_type": tx_type.value,
"call_type": call_type.value if call_type else None,
"error": trace.get("error"),
},
)
class InternalTxQuerySet(models.QuerySet):
def ether_txs(self):
return (
self.filter(call_type=EthereumTxCallType.CALL.value, value__gt=0)
.annotate(
transaction_hash=F("ethereum_tx_id"),
block_number=F("ethereum_tx__block_id"),
execution_date=F("ethereum_tx__block__timestamp"),
token_id=Value(None, output_field=Uint256Field()),
token_address=Value(None, output_field=EthereumAddressField()),
)
.order_by("-ethereum_tx__block_id")
)
def ether_txs_for_address(self, address: str):
return self.ether_txs().filter(Q(to=address) | Q(_from=address))
def ether_incoming_txs_for_address(self, address: str):
return self.ether_txs().filter(to=address)
def token_txs(self):
return (
EthereumEvent.objects.erc20_and_721_events()
.annotate(
to=RawSQL("arguments->>%s", ("to",)), # Order is really important!
_from=RawSQL("arguments->>%s", ("from",)),
value=RawSQL("(arguments->>%s)::numeric", ("value",)),
transaction_hash=F("ethereum_tx_id"),
block_number=F("ethereum_tx__block_id"),
execution_date=F("ethereum_tx__block__timestamp"),
token_id=RawSQL("(arguments->>%s)::numeric", ("tokenId",)),
token_address=F("address"),
)
.order_by("-ethereum_tx__block_id")
)
def token_txs_for_address(self, address: str):
return self.token_txs().filter(
Q(arguments__to=address) | Q(arguments__from=address)
)
def token_incoming_txs_for_address(self, address: str):
return self.token_txs().filter(arguments__to=address)
def ether_and_token_txs(self, address: str):
tokens_queryset = self.token_txs_for_address(address)
ether_queryset = self.ether_txs_for_address(address)
return self.union_ether_and_token_txs(tokens_queryset, ether_queryset)
def ether_and_token_incoming_txs(self, address: str):
tokens_queryset = self.token_incoming_txs_for_address(address)
ether_queryset = self.ether_incoming_txs_for_address(address)
return self.union_ether_and_token_txs(tokens_queryset, ether_queryset)
def union_ether_and_token_txs(
self, tokens_queryset: QuerySet, ether_queryset: QuerySet
) -> TransferDict:
values = (
"block_number",
"transaction_hash",
"to",
"_from",
"value",
"execution_date",
"token_id",
"token_address",
)
return (
ether_queryset.values(*values)
.union(tokens_queryset.values(*values))
.order_by("-block_number")
)
def can_be_decoded(self):
"""
Every InternalTx can be decoded if:
- Has data
- InternalTx is not errored
- EthereumTx is successful (not reverted or out of gas)
- CallType is a DELEGATE_CALL (to the master copy contract)
- Not already decoded
:return: Txs that can be decoded
"""
return self.exclude(data=None).filter(
call_type=EthereumTxCallType.DELEGATE_CALL.value,
error=None,
ethereum_tx__status=1,
decoded_tx=None,
)
class InternalTx(models.Model):
objects = InternalTxManager.from_queryset(InternalTxQuerySet)()
ethereum_tx = models.ForeignKey(
EthereumTx, on_delete=models.CASCADE, related_name="internal_txs"
)
_from = EthereumAddressField(
null=True, db_index=True
) # For SELF-DESTRUCT it can be null
gas = Uint256Field()
data = models.BinaryField(null=True) # `input` for Call, `init` for Create
to = EthereumAddressField(null=True, db_index=True)
value = Uint256Field()
gas_used = Uint256Field()
contract_address = EthereumAddressField(null=True, db_index=True) # Create
code = models.BinaryField(null=True) # Create
output = models.BinaryField(null=True) # Call
refund_address = EthereumAddressField(null=True, db_index=True) # For SELF-DESTRUCT
tx_type = models.PositiveSmallIntegerField(
choices=[(tag.value, tag.name) for tag in InternalTxType], db_index=True
)
call_type = models.PositiveSmallIntegerField(
null=True,
choices=[(tag.value, tag.name) for tag in EthereumTxCallType],
db_index=True,
) # Call
trace_address = models.CharField(max_length=600) # Stringified traceAddress
error = models.CharField(max_length=200, null=True)
class Meta:
unique_together = (("ethereum_tx", "trace_address"),)
def __str__(self):
if self.to:
return "Internal tx hash={} from={} to={}".format(
self.ethereum_tx_id, self._from, self.to
)
else:
return "Internal tx hash={} from={}".format(self.ethereum_tx_id, self._from)
@property
def block_number(self):
return self.ethereum_tx.block_id
@property
def created(self):
return self.ethereum_tx.block.timestamp
@property
def can_be_decoded(self) -> bool:
return bool(
self.is_delegate_call
and not self.error
and self.data
and self.ethereum_tx.success
)
@property
def is_call(self):
return InternalTxType(self.tx_type) == InternalTxType.CALL
@property
def is_create(self):
return InternalTxType(self.tx_type) == InternalTxType.CREATE
@property
def is_decoded(self):
try:
return bool(self.decoded_tx)
except InternalTxDecoded.DoesNotExist:
return False
@property
def is_delegate_call(self) -> bool:
if self.call_type is None:
return False
else:
return (
EthereumTxCallType(self.call_type) == EthereumTxCallType.DELEGATE_CALL
)
@property
def is_ether_transfer(self) -> bool:
return self.call_type == EthereumTxCallType.CALL.value and self.value > 0
@property
def is_relevant(self):
return self.can_be_decoded or self.is_ether_transfer or self.contract_address
@property
def trace_address_as_list(self) -> List[int]:
if not self.trace_address:
return []
else:
return [int(x) for x in self.trace_address.split(",")]
def get_parent(self) -> Optional["InternalTx"]:
if (
"," not in self.trace_address
): # We are expecting something like 0,0,1 or 1,1
return None
parent_trace_address = ",".join(self.trace_address.split(",")[:-1])
try:
return InternalTx.objects.filter(
ethereum_tx_id=self.ethereum_tx_id, trace_address=parent_trace_address
).get()
except InternalTx.DoesNotExist:
return None
def get_child(self, index: int) -> Optional["InternalTx"]:
child_trace_address = f"{self.trace_address},{index}"
try:
return InternalTx.objects.filter(
ethereum_tx_id=self.ethereum_tx_id, trace_address=child_trace_address
).get()
except InternalTx.DoesNotExist:
return None
class InternalTxDecodedManager(BulkCreateSignalMixin, models.Manager):
pass
class InternalTxDecodedQuerySet(models.QuerySet):
def for_safe(self, safe_address: str):
"""
:param safe_address:
:return: Queryset of all InternalTxDecoded for one Safe with `safe_address`
"""
return self.filter(internal_tx___from=safe_address)
def for_indexed_safes(self):
"""
:return: Queryset of InternalTxDecoded for Safes already indexed or calling `setup`. Use this to index Safes
for the first time
"""
return self.filter(
Q(
internal_tx___from__in=SafeContract.objects.values("address")
) # Just Safes indexed
| Q(function_name="setup") # This way we can index new Safes without events
)
def not_processed(self):
return self.filter(processed=False)
def order_by_processing_queue(self):
"""
:return: Transactions ordered to be processed. First `setup` and then older transactions
"""
return self.annotate(
is_setup=Case(
When(function_name="setup", then=Value(0)),
default=Value(1),
)
).order_by(
"is_setup",
"internal_tx__ethereum_tx__block_id",
"internal_tx__ethereum_tx__transaction_index",
"internal_tx__trace_address",
)
def pending_for_safes(self):
"""
:return: Pending `InternalTxDecoded` sorted by block number and then transaction index inside the block
"""
return (
self.not_processed()
.for_indexed_safes()
.select_related(
"internal_tx__ethereum_tx__block",
)
.order_by_processing_queue()
)
def pending_for_safe(self, safe_address: str):
"""
:return: Pending `InternalTxDecoded` sorted by block number and then transaction index inside the block
"""
return self.pending_for_safes().filter(internal_tx___from=safe_address)
def safes_pending_to_be_processed(self) -> QuerySet:
"""
:return: List of Safe addresses that have transactions pending to be processed
"""
return (
self.not_processed()
.for_indexed_safes()
.values_list("internal_tx___from", flat=True)
.distinct("internal_tx___from")
)
class InternalTxDecoded(models.Model):
objects = InternalTxDecodedManager.from_queryset(InternalTxDecodedQuerySet)()
internal_tx = models.OneToOneField(
InternalTx,
on_delete=models.CASCADE,
related_name="decoded_tx",
primary_key=True,
)
function_name = models.CharField(max_length=256, db_index=True)
arguments = JSONField()
processed = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(
name="history_decoded_processed_idx",
fields=["processed"],
condition=Q(processed=False),
)
]
verbose_name_plural = "Internal txs decoded"
def __str__(self):
return (
f'{"Processed" if self.processed else "Not Processed"} '
f"fn-name={self.function_name} with arguments={self.arguments}"
)
@property
def address(self) -> str:
return self.internal_tx._from
@property
def block_number(self) -> Type[int]:
return self.internal_tx.ethereum_tx.block_id
@property
def tx_hash(self) -> Type[int]:
return self.internal_tx.ethereum_tx_id
def set_processed(self):
self.processed = True
return self.save(update_fields=["processed"])
class MultisigTransactionManager(models.Manager):
def last_nonce(self, safe: str) -> Optional[int]:
"""
:param safe:
:return: nonce of the last executed and mined transaction. It will be None if there's no transactions or none
of them is mined
"""
nonce_query = (
self.filter(safe=safe)
.exclude(ethereum_tx=None)
.order_by("-nonce")
.values("nonce")
.first()
)
if nonce_query:
return nonce_query["nonce"]
def last_valid_transaction(self, safe: str) -> Optional["MultisigTransaction"]:
"""
Find last transaction where signers match the owners registered for that Safe. Transactions out of sync
have an invalid `safeNonce`, so `safeTxHash` is not valid and owners recovered from the signatures wouldn't be
valid. We exclude `Approved hashes` and `Contract signatures` as that owners are not retrieved using the
signature, so they will show the right owner even if `safeNonce` is not valid
:param safe:
:return: Last valid indexed transaction mined
"""
# Build list of every owner known for that Safe (even if it was deleted/replaced). Changes of collision for
# invalid recovered owners from signatures are almost impossible
owners_set = set()
for owners_list in (
SafeStatus.objects.filter(address=safe)
.values_list("owners", flat=True)
.distinct()
):
owners_set.update(owners_list)
return (
MultisigTransaction.objects.filter(
safe=safe,
confirmations__owner__in=owners_set,
confirmations__signature_type__in=[
SafeSignatureType.EOA.value,
SafeSignatureType.ETH_SIGN.value,
],
)
.exclude(ethereum_tx=None)
.order_by("-nonce")
.first()
)
def safes_with_number_of_transactions_executed(self):
return (
self.executed()
.values("safe")
.annotate(transactions=Count("safe"))
.order_by("-transactions")
)
def safes_with_number_of_transactions_executed_and_master_copy(self):
master_copy_query = (
SafeStatus.objects.filter(address=OuterRef("safe"))
.order_by("-nonce")
.values("master_copy")
)
return (
self.safes_with_number_of_transactions_executed()
.annotate(master_copy=Subquery(master_copy_query[:1]))
.order_by("-transactions")
)
def not_indexed_metadata_contract_addresses(self):
"""
Find contracts with metadata (abi, contract name) not indexed
:return:
"""
return (
MultisigTransaction.objects.exclude(data=None)
.exclude(to__in=Contract.objects.values("address"))
.values_list("to", flat=True)
.distinct()
)
class MultisigTransactionQuerySet(models.QuerySet):
def executed(self):
return self.exclude(ethereum_tx__block=None)
def not_executed(self):
return self.filter(ethereum_tx__block=None)
def with_confirmations(self):
return self.exclude(confirmations__isnull=True)
def without_confirmations(self):
return self.filter(confirmations__isnull=True)
def with_confirmations_required(self):
"""
Add confirmations required for execution when the tx was mined (threshold of the Safe at that point)
:return: queryset with `confirmations_required: int` field
"""
threshold_query = (
SafeStatus.objects.filter(internal_tx__ethereum_tx=OuterRef("ethereum_tx"))
.sorted_reverse_by_internal_tx()
.values("threshold")
)
return self.annotate(confirmations_required=Subquery(threshold_query[:1]))
def queued(self, safe_address: str):
"""
:return: Transactions not executed with safe-nonce greater than the last executed nonce. If no transaction is
executed every transaction is returned
"""
subquery = (
self.executed()
.filter(safe=safe_address)
.values("safe")
.annotate(max_nonce=Max("nonce"))
.values("max_nonce")
)
return (
self.not_executed()
.annotate(
max_executed_nonce=Coalesce(
Subquery(subquery), Value(-1), output_field=Uint256Field()
)
)
.filter(nonce__gt=F("max_executed_nonce"), safe=safe_address)
)
class MultisigTransaction(TimeStampedModel):
objects = MultisigTransactionManager.from_queryset(MultisigTransactionQuerySet)()
safe_tx_hash = Sha3HashField(primary_key=True)
safe = EthereumAddressField(db_index=True)
ethereum_tx = models.ForeignKey(
EthereumTx,
null=True,
default=None,
blank=True,
on_delete=models.SET_NULL,
related_name="multisig_txs",
)
to = EthereumAddressField(null=True, db_index=True)
value = Uint256Field()
data = models.BinaryField(null=True)
operation = models.PositiveSmallIntegerField(
choices=[(tag.value, tag.name) for tag in SafeOperation]
)
safe_tx_gas = Uint256Field()
base_gas = Uint256Field()
gas_price = Uint256Field()
gas_token = EthereumAddressField(null=True)
refund_receiver = EthereumAddressField(null=True)
signatures = models.BinaryField(null=True) # When tx is executed
nonce = Uint256Field(db_index=True)
failed = models.BooleanField(null=True, default=None, db_index=True)
origin = models.CharField(
null=True, default=None, max_length=200
) # To store arbitrary data on the tx
trusted = models.BooleanField(
default=False, db_index=True
) # Txs proposed by a delegate or with one confirmation
def __str__(self):
return f"{self.safe} - {self.nonce} - {self.safe_tx_hash}"
@property
def execution_date(self) -> Optional[datetime.datetime]:
if self.ethereum_tx_id and self.ethereum_tx.block_id is not None:
return self.ethereum_tx.block.timestamp
return None
@property
def executed(self) -> bool:
return bool(self.ethereum_tx_id and (self.ethereum_tx.block_id is not None))
@property
def owners(self) -> Optional[List[str]]:
if not self.signatures:
return []
else:
signatures = bytes(self.signatures)
safe_signatures = SafeSignature.parse_signature(
signatures, self.safe_tx_hash
)
return [safe_signature.owner for safe_signature in safe_signatures]
class ModuleTransaction(TimeStampedModel):
internal_tx = models.OneToOneField(
InternalTx, on_delete=models.CASCADE, related_name="module_tx", primary_key=True
)
safe = EthereumAddressField(
db_index=True
) # Just for convenience, it could be retrieved from `internal_tx`
module = EthereumAddressField(
db_index=True
) # Just for convenience, it could be retrieved from `internal_tx`
to = EthereumAddressField(db_index=True)
value = Uint256Field()
data = models.BinaryField(null=True)
operation = models.PositiveSmallIntegerField(
choices=[(tag.value, tag.name) for tag in SafeOperation]
)
failed = models.BooleanField(default=False)
def __str__(self):
if self.value:
return f"{self.safe} - {self.to} - {self.value}"
else:
return (
f"{self.safe} - {self.to} - {HexBytes(self.data.tobytes()).hex()[:8]}"
)
@property
def execution_date(self) -> Optional[datetime.datetime]:
if (
self.internal_tx.ethereum_tx_id
and self.internal_tx.ethereum_tx.block_id is not None
):
return self.internal_tx.ethereum_tx.block.timestamp
return None
class MultisigConfirmationManager(models.Manager):
def remove_unused_confirmations(
self, safe: str, current_safe_none: int, owner: str
) -> int:
"""
:return: Remove confirmations for not executed transactions with nonce higher or equal than
the current Safe nonce for a Safe and an owner (as an owner can be an owner of multiple Safes).
Used when an owner is removed from the Safe.
"""
return self.filter(
multisig_transaction__ethereum_tx=None, # Not executed
multisig_transaction__safe=safe,
multisig_transaction__nonce__gte=current_safe_none,
owner=owner,
).delete()[0]
class MultisigConfirmationQuerySet(models.QuerySet):
def without_transaction(self):
return self.filter(multisig_transaction=None)
def with_transaction(self):
return self.exclude(multisig_transaction=None)
class MultisigConfirmation(TimeStampedModel):
objects = MultisigConfirmationManager.from_queryset(MultisigConfirmationQuerySet)()
ethereum_tx = models.ForeignKey(
EthereumTx,
on_delete=models.CASCADE,
related_name="multisig_confirmations",
null=True,
) # `null=True` for signature confirmations
multisig_transaction = models.ForeignKey(
MultisigTransaction,
on_delete=models.CASCADE,
null=True,
related_name="confirmations",
)
multisig_transaction_hash = Sha3HashField(
null=True, db_index=True
) # Use this while we don't have a `multisig_transaction`
owner = EthereumAddressField()
signature = HexField(null=True, default=None, max_length=2000)
signature_type = models.PositiveSmallIntegerField(
choices=[(tag.value, tag.name) for tag in SafeSignatureType], db_index=True
)
class Meta:
unique_together = (("multisig_transaction_hash", "owner"),)
ordering = ["created"]
def __str__(self):
if self.multisig_transaction_id:
return f"Confirmation of owner={self.owner} for transaction-hash={self.multisig_transaction_hash}"
else:
return f"Confirmation of owner={self.owner} for existing transaction={self.multisig_transaction_hash}"
class MonitoredAddress(models.Model):
address = EthereumAddressField(primary_key=True)
initial_block_number = models.IntegerField(
default=0
) # Block number when address received first tx
tx_block_number = models.IntegerField(
null=True, default=None, db_index=True
) # Block number when last internal tx scan ended
class Meta:
abstract = True
verbose_name_plural = "Monitored addresses"
def __str__(self):
return (
f"Address={self.address} - Initial-block-number={self.initial_block_number}"
f" - Tx-block-number={self.tx_block_number}"
)
class ProxyFactory(MonitoredAddress):
class Meta:
verbose_name_plural = "Proxy factories"
ordering = ["tx_block_number"]
def validate_version(value: str):
try:
if not value:
raise ValueError("Empty version not allowed")
Version(value)
except ValueError as exc:
raise ValidationError(
_("%(value)s is not a valid version: %(reason)s"),
params={"value": value, "reason": str(exc)},
)
class SafeMasterCopyManager(models.Manager):
def get_version_for_address(self, address: ChecksumAddress) -> Optional[str]:
try:
return self.filter(address=address).only("version").get().version
except self.model.DoesNotExist:
return None
class SafeMasterCopyQueryset(models.QuerySet):
def l2(self):
return self.filter(l2=True)
def not_l2(self):
return self.filter(l2=False)
class SafeMasterCopy(MonitoredAddress):
objects = SafeMasterCopyManager.from_queryset(SafeMasterCopyQueryset)()
version = models.CharField(max_length=20, validators=[validate_version])
deployer = models.CharField(max_length=50, default="Gnosis")
l2 = models.BooleanField(default=False)
class Meta:
verbose_name_plural = "Safe master copies"
ordering = ["tx_block_number"]
class SafeContract(models.Model):
address = EthereumAddressField(primary_key=True)
ethereum_tx = models.ForeignKey(
EthereumTx, on_delete=models.CASCADE, related_name="safe_contracts"
)
erc20_block_number = models.IntegerField(
default=0, db_index=True
) # Block number of last scan of erc20
def __str__(self):
return f"Safe address={self.address} - ethereum-tx={self.ethereum_tx_id}"
@property
def created(self):
return self.ethereum_tx.block.timestamp
@property
def created_block_number(self) -> Optional[Type[int]]:
if self.ethereum_tx:
return self.ethereum_tx.block_id
class SafeContractDelegateManager(models.Manager):
def get_delegates_for_safe(self, address: ChecksumAddress) -> Set[ChecksumAddress]:
return set(
self.filter(safe_contract_id=address)
.values_list("delegate", flat=True)
.distinct()
)
def get_delegates_for_safe_and_owners(
self, safe_address: ChecksumAddress, owner_addresses: Sequence[ChecksumAddress]
) -> Set[ChecksumAddress]:
if not owner_addresses:
return set()
return set(
self.filter(
# If safe_contract is null on SafeContractDelegate, delegates are valid for every Safe
Q(safe_contract_id=safe_address)
| Q(safe_contract=None)
)
.filter(delegator__in=owner_addresses)
.values_list("delegate", flat=True)
.distinct()
)
class SafeContractDelegate(models.Model):
"""
The owners of the Safe can add users so they can propose/retrieve txs as if they were the owners of the Safe
"""
objects = SafeContractDelegateManager()
safe_contract = models.ForeignKey(
SafeContract,
on_delete=models.CASCADE,
related_name="safe_contract_delegates",
null=True,
default=None,
)
delegate = EthereumAddressField()
delegator = EthereumAddressField() # Owner who created the delegate
label = models.CharField(max_length=50)
read = models.BooleanField(default=True) # For permissions in the future
write = models.BooleanField(default=True)
class Meta:
unique_together = (("safe_contract", "delegate", "delegator"),)
def __str__(self):
return (
f"Delegator={self.delegator} Delegate={self.delegate} for Safe={self.safe_contract_id} - "
f"Label={self.label}"
)
class SafeStatusManager(models.Manager):
pass
class SafeStatusQuerySet(models.QuerySet):
def sorted_by_internal_tx(self):
"""
Last SafeStatus first. Usually ordering by `nonce` it should be enough, but in some cases
(MultiSend, calling functions inside the Safe like adding/removing owners...) there could be multiple
transactions with the same nonce. `address` must be part of the expression to use `distinct()` later
:return: SafeStatus QuerySet sorted
"""
return self.order_by(
"address",
"-nonce",
"-internal_tx__ethereum_tx__block_id",
"-internal_tx__ethereum_tx__transaction_index",
"-internal_tx__trace_address",
)
def sorted_reverse_by_internal_tx(self):
return self.order_by(
"address",
"nonce",
"internal_tx__ethereum_tx__block_id",
"internal_tx__ethereum_tx__transaction_index",
"internal_tx__trace_address",
)
def addresses_for_owner(self, owner_address: str) -> Set[str]:
"""
Use raw query to get the Safes for an owner. We order by the internal_tx_id instead of using JOIN to get
the internal tx index as a shortcut. It's not as accurate but should be enough
:param owner_address:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT(address)
FROM (
SELECT address, owners,
rank() OVER (PARTITION BY address ORDER BY nonce DESC, internal_tx_id DESC) AS pos
FROM history_safestatus
WHERE address IN (
SELECT address FROM history_safestatus
WHERE owners @> ARRAY[%s]::varchar(42)[]
)
) AS ss
WHERE pos = 1 AND owners @> ARRAY[%s]::varchar(42)[];
""",
[owner_address, owner_address],
)
return {row[0] for row in cursor.fetchall()}
def last_for_every_address(self) -> QuerySet:
return (
self.distinct("address") # Uses PostgreSQL `DISTINCT ON`
.select_related("internal_tx__ethereum_tx")
.sorted_by_internal_tx()
)
def last_for_address(self, address: str) -> Optional["SafeStatus"]:
return self.filter(address=address).sorted_by_internal_tx().first()
class SafeStatus(models.Model):
objects = SafeStatusManager.from_queryset(SafeStatusQuerySet)()
internal_tx = models.OneToOneField(
InternalTx,
on_delete=models.CASCADE,
related_name="safe_status",
primary_key=True,
)
address = EthereumAddressField(db_index=True)
owners = ArrayField(EthereumAddressField())
threshold = Uint256Field()
nonce = Uint256Field(default=0)
master_copy = EthereumAddressField()
fallback_handler = EthereumAddressField()
guard = EthereumAddressField(default=None, null=True)
enabled_modules = ArrayField(EthereumAddressField(), default=list)
class Meta:
indexes = [
Index(fields=["address", "-nonce"]), # Index on address and nonce DESC
Index(fields=["address", "-nonce", "-internal_tx"]), # For Window search
GinIndex(fields=["owners"]),
]
unique_together = (("internal_tx", "address"),)
verbose_name_plural = "Safe statuses"
def __str__(self):
return f"safe={self.address} threshold={self.threshold} owners={self.owners} nonce={self.nonce}"
@property
def block_number(self):
return self.internal_tx.ethereum_tx.block_id
def is_corrupted(self):
"""
SafeStatus nonce must be incremental. If current nonce is bigger than the number of SafeStatus for that Safe
something is wrong. There could be more SafeStatus than nonce (e.g. a call to a MultiSend
adding owners and enabling a Module in the same contract `execTransaction`)
:return: True if corrupted, False otherwise
"""
return (
self.__class__.objects.filter(
address=self.address, nonce__lte=self.nonce
).count()
<= self.nonce
)
def store_new(self, internal_tx: InternalTx) -> None:
self.internal_tx = internal_tx
return self.save(force_insert=True)
class WebHookType(Enum):
NEW_CONFIRMATION = 0
PENDING_MULTISIG_TRANSACTION = 1
EXECUTED_MULTISIG_TRANSACTION = 2
INCOMING_ETHER = 3
INCOMING_TOKEN = 4
CONFIRMATION_REQUEST = 5
SAFE_CREATED = 6
MODULE_TRANSACTION = 7
OUTGOING_ETHER = 8
OUTGOING_TOKEN = 9
class WebHookQuerySet(models.QuerySet):
def matching_for_address(self, address: str):
return self.filter(Q(address=address) | Q(address=""))
class WebHook(models.Model):
objects = WebHookQuerySet.as_manager()
address = EthereumAddressField(db_index=True, blank=True)
url = models.URLField()
# Configurable webhook types to listen to
new_confirmation = models.BooleanField(default=True)
pending_outgoing_transaction = models.BooleanField(default=True)
new_executed_outgoing_transaction = models.BooleanField(default=True)
new_incoming_transaction = models.BooleanField(default=True)
new_safe = models.BooleanField(default=True)
new_module_transaction = models.BooleanField(default=True)
new_outgoing_transaction = models.BooleanField(default=True)
class Meta:
unique_together = (("address", "url"),)
def __str__(self):
if self.address:
return f"Webhook for safe={self.address} to url={self.url}"
else:
return f"Webhook to every address to url={self.url}"
def is_valid_for_webhook_type(self, webhook_type: WebHookType):
if webhook_type == WebHookType.NEW_CONFIRMATION and not self.new_confirmation:
return False
elif (
webhook_type == WebHookType.PENDING_MULTISIG_TRANSACTION
and not self.pending_outgoing_transaction
):
return False
elif (
webhook_type == WebHookType.EXECUTED_MULTISIG_TRANSACTION
and not self.new_executed_outgoing_transaction
):
return False
elif (
webhook_type in (WebHookType.INCOMING_TOKEN, WebHookType.INCOMING_ETHER)
and not self.new_incoming_transaction
):
return False
elif webhook_type == WebHookType.SAFE_CREATED and not self.new_safe:
return False
elif (
webhook_type == WebHookType.MODULE_TRANSACTION
and not self.new_module_transaction
):
return False
elif (
webhook_type in (WebHookType.OUTGOING_TOKEN, WebHookType.OUTGOING_ETHER)
and not self.new_outgoing_transaction
):
return False
else:
return True
| 35.808021
| 120
| 0.621254
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.