hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
539189bc34fac0ccbcd5cd2cd5682646c8fc21fa | 639 | py | Python | setup.py | elonhub/papers | 27636eadb6836b672301eaee64b61200fc89ed78 | [
"MIT"
] | 105 | 2018-05-25T20:45:34.000Z | 2022-03-30T05:10:18.000Z | setup.py | elonhub/papers | 27636eadb6836b672301eaee64b61200fc89ed78 | [
"MIT"
] | 18 | 2017-12-02T12:56:10.000Z | 2022-02-20T21:32:26.000Z | setup.py | perrette/myref | a7417ea82ebb296ef5517ea00e21e54b97a1ed78 | [
"MIT"
] | 23 | 2017-12-23T14:28:14.000Z | 2022-01-26T10:01:49.000Z | from distutils.core import setup
import versioneer
version = versioneer.get_version()
setup(name='papers-cli',
version=version,
cmdclass = versioneer.get_cmdclass(),
author='Mahe Perrette',
author_email='mahe.perrette@gmail.com',
description='utilities to keep your PDF library organized',
url='https://github.com/perrette/papers',
download_url=f'https://github.com/perrette/papers/archive/{version}.tar.gz',
packages=['papers'],
scripts=['scripts/papers'],
license = "MIT",
requires = ["bibtexparser","crossrefapi","rapidfuzz", "unidecode", "scholarly", "six"],
)
| 33.631579 | 93 | 0.671362 | from distutils.core import setup
import versioneer
version = versioneer.get_version()
setup(name='papers-cli',
version=version,
cmdclass = versioneer.get_cmdclass(),
author='Mahe Perrette',
author_email='mahe.perrette@gmail.com',
description='utilities to keep your PDF library organized',
url='https://github.com/perrette/papers',
download_url=f'https://github.com/perrette/papers/archive/{version}.tar.gz',
packages=['papers'],
scripts=['scripts/papers'],
license = "MIT",
requires = ["bibtexparser","crossrefapi","rapidfuzz", "unidecode", "scholarly", "six"],
)
| 0 | 0 | 0 |
f47952bf3243e98c7839fafda31165a7f8fed6ab | 2,929 | py | Python | pygears/common/cart.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | pygears/common/cart.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | pygears/common/cart.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | from pygears.core.gear import alternative, gear
from pygears.typing import Queue, Tuple, typeof
from pygears.common.shred import shred
from pygears.common.ccat import ccat
from pygears.util.utils import quiter_async
from pygears import module
@gear(enablement=b'len(din) == 2')
@alternative(cart)
@gear
# TODO: Lowest eot for each uncart output needs to be shortened to 1 data using
# flattening
@gear
@gear(enablement=b'len(din) == 2')
@alternative(cart_sync)
@gear
@gear
| 26.387387 | 79 | 0.615227 | from pygears.core.gear import alternative, gear
from pygears.typing import Queue, Tuple, typeof
from pygears.common.shred import shred
from pygears.common.ccat import ccat
from pygears.util.utils import quiter_async
from pygears import module
def lvl_if_queue(t):
if not issubclass(t, Queue):
return 0
else:
return t.lvl
def cart_type(dtypes):
arg_queue_lvl = [lvl_if_queue(d) for d in dtypes]
base_type = Tuple[tuple(
d if lvl == 0 else d[0] for d, lvl in zip(dtypes, arg_queue_lvl))]
# If there are no Queues, i.e. sum(arg_queue_lvl) == 0, the type below
# will resolve to just base_type
return Queue[base_type, sum(arg_queue_lvl)]
@gear(enablement=b'len(din) == 2')
async def cart(*din) -> b'cart_type(din)':
din_t = [d.dtype for d in din]
if all(typeof(t, Queue) for t in din_t):
queue_id, single_id = 1, 0
elif typeof(din_t[0], Queue):
queue_id, single_id = 0, 1
else:
queue_id, single_id = 1, 0
async with din[single_id] as single_data:
if typeof(din_t[single_id], Queue):
single_eot = single_data.eot
single_data = single_data.data
else:
single_eot = []
async for queue_data in quiter_async(din[queue_id]):
out_data = [0, 0]
out_data[queue_id] = queue_data.data
out_data[single_id] = single_data
yield module().tout((tuple(out_data), *queue_data.eot,
*single_eot))
@alternative(cart)
@gear
def cart_vararg(*din, enablement=b'len(din) > 2'):
ret = cart(din[0], din[1])
for d in din[2:]:
ret = cart(ret, d)
return ret | cart_type([d.dtype for d in din])
# TODO: Lowest eot for each uncart output needs to be shortened to 1 data using
# flattening
@gear
def uncart(din, *, dtypes):
zdata = din[0]
zlast = din[1:]
def split():
for i, d in enumerate(dtypes):
data = zdata[i]
if issubclass(d, Queue):
yield ccat(data, zlast[:d.lvl]) | Queue[data.dtype, d.lvl]
else:
yield data
return tuple(split())
@gear(enablement=b'len(din) == 2')
async def cart_sync(*din) -> b'din':
din_t = [d.dtype for d in din]
queue_id, single_id = (0, 1) if typeof(din_t[0], Queue) else (1, 0)
async with din[single_id] as single_data:
async for queue_data in quiter_async(din[queue_id]):
dout = [0, 0]
dout[single_id] = single_data
dout[queue_id] = queue_data
yield tuple(dout)
@alternative(cart_sync)
@gear
def cart_sync_vararg(*din):
return din | cart | uncart(dtypes=[d.dtype for d in din])
@gear
def cart_sync_with(sync_in, din, *, balance=None):
if balance:
sync_in = sync_in | balance
din_sync, sync_in_sync = cart_sync(din, sync_in)
sync_in_sync | shred
return din_sync
| 2,264 | 0 | 178 |
6cce156e9dd815f900433f2cbb598128c23c884a | 2,357 | py | Python | parsedata_jhu.py | manar-c/covid19intexas | e1f69717aa8c6dbf1920fd771de0d9525e9e1bbb | [
"MIT"
] | null | null | null | parsedata_jhu.py | manar-c/covid19intexas | e1f69717aa8c6dbf1920fd771de0d9525e9e1bbb | [
"MIT"
] | null | null | null | parsedata_jhu.py | manar-c/covid19intexas | e1f69717aa8c6dbf1920fd771de0d9525e9e1bbb | [
"MIT"
] | null | null | null |
import pandas as pd
import requests
import io
import numpy as np
#Goal: go through daily reports of JHU to get data for
# Texas, Travis, Harris, Dallas
baseurl = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
#Start from March 1
#March 1 to March 21: jh only reported texas
#March 22 onwards, jh reported county level
results = {}
#JHU changed the formats several times
maxday0 = 9
for i in range(maxday0):
strbase = '03-0'+str(i+1)+'-2020.csv'
url = baseurl + strbase
df = pd.read_csv(url)
result = df[df['Province/State'].str.contains(', TX', na=False)].Confirmed.sum(axis=0)
print(result)
results[strbase] = result
#x = sdfsdfsfd
maxday1 = 21
for i in range(maxday0,maxday1):
strbase = '03-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl+strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province/State']=='Texas'].Confirmed.to_numpy()
if len(result) > 0:
results[strbase] = np.ndarray.item(result)
# print(np.size(result))
maxday2 = 31
for i in range(maxday1, maxday2):
strbase = '03-'+str(i+1) + '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
maxday2 = 30
for i in range(0, maxday2):
strbase = '04-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
maxday2 = 29
for i in range(0, maxday2):
strbase = '05-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
print(results)
| 26.483146 | 124 | 0.562155 |
import pandas as pd
import requests
import io
import numpy as np
#Goal: go through daily reports of JHU to get data for
# Texas, Travis, Harris, Dallas
baseurl = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
#Start from March 1
#March 1 to March 21: jh only reported texas
#March 22 onwards, jh reported county level
results = {}
#JHU changed the formats several times
maxday0 = 9
for i in range(maxday0):
strbase = '03-0'+str(i+1)+'-2020.csv'
url = baseurl + strbase
df = pd.read_csv(url)
result = df[df['Province/State'].str.contains(', TX', na=False)].Confirmed.sum(axis=0)
print(result)
results[strbase] = result
#x = sdfsdfsfd
maxday1 = 21
for i in range(maxday0,maxday1):
strbase = '03-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl+strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province/State']=='Texas'].Confirmed.to_numpy()
if len(result) > 0:
results[strbase] = np.ndarray.item(result)
# print(np.size(result))
maxday2 = 31
for i in range(maxday1, maxday2):
strbase = '03-'+str(i+1) + '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
maxday2 = 30
for i in range(0, maxday2):
strbase = '04-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
maxday2 = 29
for i in range(0, maxday2):
strbase = '05-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
print(results)
| 0 | 0 | 0 |
5cecff2de7c350bc190f3cf7900410255d462c2e | 1,905 | py | Python | DeSSL/transforms/transform_many_times.py | Fragile-azalea/SSL-toolk | 4562901e5deb59605a9a191fe74adbb3de96bfc3 | [
"MIT"
] | 1 | 2021-12-15T02:32:30.000Z | 2021-12-15T02:32:30.000Z | DeSSL/transforms/transform_many_times.py | Fragile-azalea/SSL-toolkit | 4562901e5deb59605a9a191fe74adbb3de96bfc3 | [
"MIT"
] | null | null | null | DeSSL/transforms/transform_many_times.py | Fragile-azalea/SSL-toolkit | 4562901e5deb59605a9a191fe74adbb3de96bfc3 | [
"MIT"
] | null | null | null | from typing import Callable
from torchvision import transforms as tf
from . import TRANSFORM_REGISTRY
__all__ = ['ManyTimes', 'Twice']
@TRANSFORM_REGISTRY.register
class IdentityAndManyTimes:
"""
This class changes an image to a normalized tensor image and a series of augmented image.
Args:
transform: A list of image augmentation.
norm: A list of image normalization.
n: The times that the transform perform.
"""
@TRANSFORM_REGISTRY.register
class ManyTimes:
"""
This class transfers an image to a series of augmented images.
Args:
transform: The transform for augmentation and normalization of images.
n: The times that the transform performs.
Returns:
The tuple of augmented images.
"""
def __call__(self, inp) -> tuple:
"""
Call of this class.
Args:
inp: something importance.
"""
return (*(self.transform(inp) for _ in range(self.n)),)
@TRANSFORM_REGISTRY.register
def Twice(transform: Callable) -> ManyTimes:
"""
The easy call method of ManyTimes(transform, 2).
Args:
transform: The transform for augmentation and normalization of images.
Returns:
The class of ManyTimes(transform, 2).
"""
return ManyTimes(transform, 2)
| 25.065789 | 93 | 0.612073 | from typing import Callable
from torchvision import transforms as tf
from . import TRANSFORM_REGISTRY
__all__ = ['ManyTimes', 'Twice']
@TRANSFORM_REGISTRY.register
class IdentityAndManyTimes:
"""
This class changes an image to a normalized tensor image and a series of augmented image.
Args:
transform: A list of image augmentation.
norm: A list of image normalization.
n: The times that the transform perform.
"""
def __init__(self,
transform: list,
norm: list,
n: int):
self.transform = tf.Compose(transform + norm)
self.norm = tf.Compose(norm)
self.n = n
def __call__(self, inp):
return (self.norm(inp), *(self.transform(inp) for _ in range(self.n)))
@TRANSFORM_REGISTRY.register
class ManyTimes:
"""
This class transfers an image to a series of augmented images.
Args:
transform: The transform for augmentation and normalization of images.
n: The times that the transform performs.
Returns:
The tuple of augmented images.
"""
def __init__(self,
transform: Callable,
n: int):
self.transform = transform
self.n = n
def __call__(self, inp) -> tuple:
"""
Call of this class.
Args:
inp: something importance.
"""
return (*(self.transform(inp) for _ in range(self.n)),)
def __str__(self):
return 'transform:%s\ntimes:%d' % (str(self.transform), self.n)
@TRANSFORM_REGISTRY.register
def Twice(transform: Callable) -> ManyTimes:
"""
The easy call method of ManyTimes(transform, 2).
Args:
transform: The transform for augmentation and normalization of images.
Returns:
The class of ManyTimes(transform, 2).
"""
return ManyTimes(transform, 2)
| 462 | 0 | 108 |
ec11f7fd973b9babc2b655f4e09d0c98675c49fa | 379 | py | Python | netqasm/examples/apps/anonymous_transmission/app_charlie.py | Doomsk/netqasm | 5d6c6ad00c4e0f9ab0ec05518cfa827675f357e7 | [
"MIT"
] | 6 | 2021-11-10T15:03:59.000Z | 2022-02-16T19:35:01.000Z | netqasm/examples/apps/anonymous_transmission/app_charlie.py | Doomsk/netqasm | 5d6c6ad00c4e0f9ab0ec05518cfa827675f357e7 | [
"MIT"
] | 13 | 2021-11-26T09:19:46.000Z | 2022-03-29T09:21:42.000Z | netqasm/examples/apps/anonymous_transmission/app_charlie.py | Doomsk/netqasm | 5d6c6ad00c4e0f9ab0ec05518cfa827675f357e7 | [
"MIT"
] | 4 | 2021-11-19T15:46:17.000Z | 2022-01-23T18:59:15.000Z | from src.protocol import anonymous_transmission
if __name__ == "__main__":
main()
| 15.791667 | 47 | 0.604222 | from src.protocol import anonymous_transmission
def main(
app_config=None,
sender=False,
receiver=False,
phi=0.0,
theta=0.0,
):
return anonymous_transmission(
app_name="charlie",
app_config=app_config,
sender=sender,
receiver=receiver,
phi=phi,
theta=theta,
)
if __name__ == "__main__":
main()
| 267 | 0 | 23 |
6abd731b4559a3a3494ccf4ad0fee6c93e7f4f41 | 54,040 | py | Python | sklearn/feature_extraction/tests/test_text.py | LSturtew/scikit-learn | 5aecf201a3d9ee8896566a057b3a576f1e31d410 | [
"BSD-3-Clause"
] | 1 | 2021-11-27T08:04:53.000Z | 2021-11-27T08:04:53.000Z | sklearn/feature_extraction/tests/test_text.py | LSturtew/scikit-learn | 5aecf201a3d9ee8896566a057b3a576f1e31d410 | [
"BSD-3-Clause"
] | null | null | null | sklearn/feature_extraction/tests/test_text.py | LSturtew/scikit-learn | 5aecf201a3d9ee8896566a057b3a576f1e31d410 | [
"BSD-3-Clause"
] | 1 | 2021-11-03T09:49:02.000Z | 2021-11-03T09:49:02.000Z | # -*- coding: utf-8 -*-
from collections.abc import Mapping
import re
import pytest
from scipy import sparse
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import (
assert_almost_equal,
fails_if_pypy,
assert_allclose_dense_sparse,
skip_if_32bit,
)
from collections import defaultdict
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_custom_token_pattern(get_names):
"""Check `get_feature_names()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
vectorizer = CountVectorizer(token_pattern=token_pattern)
vectorizer.fit_transform(corpus)
expected = ["document", "one", "sample"]
feature_names_out = getattr(vectorizer, get_names)()
assert_array_equal(feature_names_out, expected)
def test_countvectorizer_custom_token_pattern_with_several_group():
"""Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
err_msg = "More than 1 capturing group in token pattern"
vectorizer = CountVectorizer(token_pattern=token_pattern)
with pytest.raises(ValueError, match=err_msg):
vectorizer.fit(corpus)
def test_tf_transformer_feature_names_out():
"""Check get_feature_names_out for TfidfTransformer"""
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
feature_names_in = ["a", "c", "b"]
feature_names_out = tr.get_feature_names_out(feature_names_in)
assert_array_equal(feature_names_in, feature_names_out)
@fails_if_pypy
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize(
"params, err_type, message",
(
({"max_df": 2.0}, ValueError, "max_df == 2.0, must be <= 1.0."),
({"min_df": 1.5}, ValueError, "min_df == 1.5, must be <= 1.0."),
({"max_df": -2}, ValueError, "max_df == -2, must be >= 0."),
({"min_df": -10}, ValueError, "min_df == -10, must be >= 0."),
({"min_df": 3, "max_df": 2.0}, ValueError, "max_df == 2.0, must be <= 1.0."),
({"min_df": 1.5, "max_df": 50}, ValueError, "min_df == 1.5, must be <= 1.0."),
({"max_features": -10}, ValueError, "max_features == -10, must be >= 0."),
(
{"max_features": 3.5},
TypeError,
"max_features must be an instance of <class 'numbers.Integral'>, not <class"
" 'float'>",
),
),
)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@fails_if_pypy
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
@fails_if_pypy
@pytest.mark.parametrize(
"factory",
[
CountVectorizer.build_analyzer,
CountVectorizer.build_preprocessor,
CountVectorizer.build_tokenizer,
],
)
def test_pickling_built_processors(factory):
"""Tokenizers cannot be pickled
https://github.com/scikit-learn/scikit-learn/issues/12833
"""
vec = CountVectorizer()
function = factory(vec)
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
roundtripped_function = pickle.loads(pickle.dumps(function))
expected = function(text)
result = roundtripped_function(text)
assert result == expected
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@fails_if_pypy
@pytest.mark.parametrize(
"Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer)
)
@pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"vectorizer_dtype, output_dtype, warning_expected",
[
(np.int32, np.float64, True),
(np.int64, np.float64, True),
(np.float32, np.float32, False),
(np.float64, np.float64, False),
],
)
@pytest.mark.parametrize(
"vec",
[
HashingVectorizer(ngram_range=(2, 1)),
CountVectorizer(ngram_range=(2, 1)),
TfidfVectorizer(ngram_range=(2, 1)),
],
)
@fails_if_pypy
@skip_if_32bit
def test_countvectorizer_sort_features_64bit_sparse_indices():
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = sparse.csr_matrix((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
@fails_if_pypy
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"input_type, err_type, err_msg",
[
("filename", FileNotFoundError, ""),
("file", AttributeError, "'str' object has no attribute 'read'"),
],
)
@pytest.mark.parametrize(
"Estimator",
[
CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy),
],
)
@pytest.mark.parametrize(
"analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()]
)
@pytest.mark.parametrize("input_type", ["file", "filename"])
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer]
)
@pytest.mark.parametrize(
"stop_words, tokenizer, preprocessor, ngram_range, token_pattern,"
"analyzer, unused_name, ovrd_name, ovrd_msg",
[
(
["you've", "you'll"],
None,
None,
(1, 1),
None,
"char",
"'stop_words'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
None,
"char",
"'tokenizer'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
r"\w+",
"word",
"'token_pattern'",
"'tokenizer'",
"is not None",
),
(
None,
None,
lambda s: s.upper(),
(1, 1),
r"\w+",
lambda s: s.upper(),
"'preprocessor'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 2),
None,
lambda s: s.upper(),
"'ngram_range'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 1),
r"\w+",
"char",
"'token_pattern'",
"'analyzer'",
"!= 'word'",
),
],
)
@pytest.mark.parametrize(
"Vectorizer, X",
(
(HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]),
(CountVectorizer, JUNK_FOOD_DOCS),
),
)
# TODO: Remove in 1.2 when get_feature_names is removed
@fails_if_pypy
| 32.534618 | 88 | 0.657902 | # -*- coding: utf-8 -*-
from collections.abc import Mapping
import re
import pytest
from scipy import sparse
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import (
assert_almost_equal,
fails_if_pypy,
assert_allclose_dense_sparse,
skip_if_32bit,
)
from collections import defaultdict
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace("é", "e")
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ["the_ultimate_feature"]
def test_strip_accents():
# check some classical latin accentuated symbols
a = "àáâãäåçèéêë"
expected = "aaaaaaceeee"
assert strip_accents_unicode(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = "iiiinooooouuuuy"
assert strip_accents_unicode(a) == expected
# check some arabic
a = "\u0625" # alef with a hamza below: إ
expected = "\u0627" # simple alef: ا
assert strip_accents_unicode(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = "this is a test"
assert strip_accents_unicode(a) == expected
# strings that are already decomposed
a = "o\u0308" # o with diaeresis
expected = "o"
assert strip_accents_unicode(a) == expected
# combining marks by themselves
a = "\u0300\u0301\u0302\u0303"
expected = ""
assert strip_accents_unicode(a) == expected
# Multiple combining marks on one character
a = "o\u0308\u0304"
expected = "o"
assert strip_accents_unicode(a) == expected
def test_to_ascii():
# check some classical latin accentuated symbols
a = "àáâãäåçèéêë"
expected = "aaaaaaceeee"
assert strip_accents_ascii(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = "iiiinooooouuuuy"
assert strip_accents_ascii(a) == expected
# check some arabic
a = "\u0625" # halef with a hamza below
expected = "" # halef has no direct ascii match
assert strip_accents_ascii(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = "this is a test"
assert strip_accents_ascii(a) == expected
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
def test_word_analyzer_unigrams(Vectorizer):
wa = Vectorizer(strip_accents="ascii").build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"ai",
"mange",
"du",
"kangourou",
"ce",
"midi",
"etait",
"pas",
"tres",
"bon",
]
assert wa(text) == expected
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ["this", "is", "test", "really", "met", "harry", "yesterday"]
assert wa(text) == expected
wa = Vectorizer(input="file").build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ["this", "is", "test", "with", "file", "like", "object"]
assert wa(text) == expected
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"AI",
"MANGE",
"DU",
"KANGOUROU",
"CE",
"MIDI",
"ETAIT",
"PAS",
"TRES",
"BON",
]
assert wa(text) == expected
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"j'ai",
"mange",
"du",
"kangourou",
"ce",
"midi,",
"c'etait",
"pas",
"tres",
"bon.",
]
assert wa(text) == expected
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(
analyzer="word", strip_accents="unicode", ngram_range=(1, 2)
).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"ai",
"mange",
"du",
"kangourou",
"ce",
"midi",
"etait",
"pas",
"tres",
"bon",
"ai mange",
"mange du",
"du kangourou",
"kangourou ce",
"ce midi",
"midi etait",
"etait pas",
"pas tres",
"tres bon",
]
assert wa(text) == expected
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
text_bytes = text.encode("utf-8")
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer()
with pytest.raises(UnicodeDecodeError):
wa(text_bytes)
ca = CountVectorizer(
analyzer="char", ngram_range=(3, 6), encoding="ascii"
).build_analyzer()
with pytest.raises(UnicodeDecodeError):
ca(text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(
analyzer="char", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
expected = ["j'a", "'ai", "ai ", "i m", " ma"]
assert cnga(text)[:5] == expected
expected = ["s tres", " tres ", "tres b", "res bo", "es bon"]
assert cnga(text)[-5:] == expected
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ["thi", "his", "is ", "s i", " is"]
assert cnga(text)[:5] == expected
expected = [" yeste", "yester", "esterd", "sterda", "terday"]
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(
input="file", analyzer="char", ngram_range=(3, 6)
).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ["thi", "his", "is ", "s i", " is"]
assert cnga(text)[:5] == expected
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(
analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [" th", "thi", "his", "is ", " thi"]
assert cnga(text)[:5] == expected
expected = ["yester", "esterd", "sterda", "terday", "erday "]
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(
input="file", analyzer="char_wb", ngram_range=(3, 6)
).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [" a ", " te", "tes", "est", "st ", " tes"]
assert cnga(text)[:6] == expected
def test_word_ngram_analyzer():
cnga = CountVectorizer(
analyzer="word", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ["this is test", "is test really", "test really met"]
assert cnga(text)[:3] == expected
expected = [
"test really met harry yesterday",
"this is test really met harry",
"is test really met harry yesterday",
]
assert cnga(text)[-3:] == expected
cnga_file = CountVectorizer(
input="file", analyzer="word", ngram_range=(3, 6)
).build_analyzer()
file = StringIO(text)
assert cnga_file(file) == cnga(text)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert vect.vocabulary_ == vocab
else:
assert set(vect.vocabulary_) == terms
X = vect.transform(JUNK_FOOD_DOCS)
assert X.shape[1] == len(terms)
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
inv = vect.inverse_transform(X)
assert len(inv) == X.shape[0]
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline(
[
("count", CountVectorizer(vocabulary=what_we_like)),
("tfidf", TfidfTransformer()),
]
)
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like)
assert X.shape[1] == len(what_we_like)
def test_countvectorizer_custom_vocabulary_repeated_indices():
vocab = {"pizza": 0, "beer": 0}
msg = "Vocabulary contains repeated indices"
with pytest.raises(ValueError, match=msg):
vect = CountVectorizer(vocabulary=vocab)
vect.fit(["pasta_siziliana"])
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
with pytest.raises(ValueError, match="doesn't contain index"):
vect = CountVectorizer(vocabulary=vocab)
vect.fit(["pasta_verdura"])
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words="english")
assert cv.get_stop_words() == ENGLISH_STOP_WORDS
cv.set_params(stop_words="_bad_str_stop_")
with pytest.raises(ValueError):
cv.get_stop_words()
cv.set_params(stop_words="_bad_unicode_stop_")
with pytest.raises(ValueError):
cv.get_stop_words()
stoplist = ["some", "other", "words"]
cv.set_params(stop_words=stoplist)
assert cv.get_stop_words() == set(stoplist)
def test_countvectorizer_empty_vocabulary():
with pytest.raises(ValueError, match="empty vocabulary"):
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
with pytest.raises(ValueError, match="empty vocabulary"):
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert X1.shape[1] != X2.shape[1]
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_custom_token_pattern(get_names):
"""Check `get_feature_names()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
vectorizer = CountVectorizer(token_pattern=token_pattern)
vectorizer.fit_transform(corpus)
expected = ["document", "one", "sample"]
feature_names_out = getattr(vectorizer, get_names)()
assert_array_equal(feature_names_out, expected)
def test_countvectorizer_custom_token_pattern_with_several_group():
"""Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
err_msg = "More than 1 capturing group in token pattern"
vectorizer = CountVectorizer(token_pattern=token_pattern)
with pytest.raises(ValueError, match=err_msg):
vectorizer.fit(corpus)
def test_countvectorizer_uppercase_in_vocab():
vocabulary = ["Sample", "Upper", "CaseVocabulary"]
message = (
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary)
with pytest.warns(UserWarning, match=message):
vectorizer.fit_transform(vocabulary)
def test_tf_transformer_feature_names_out():
"""Check get_feature_names_out for TfidfTransformer"""
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
feature_names_in = ["a", "c", "b"]
feature_names_out = tr.get_feature_names_out(feature_names_in)
assert_array_equal(feature_names_in, feature_names_out)
def test_tf_idf_smoothing():
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1.0, 1.0, 1.0])
# this is robust to features with only zeros
X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
def test_tfidf_no_smoothing():
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1.0, 1.0, 1.0])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm="l2")
in_warning_message = "divide by zero"
with pytest.warns(RuntimeWarning, match=in_warning_message):
tr.fit_transform(X).toarray()
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert tfidf[0] == 1
assert tfidf[1] > tfidf[0]
assert tfidf[2] > tfidf[1]
assert tfidf[1] < 2
assert tfidf[2] < 3
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, "tocsr"):
counts_train = counts_train.tocsr()
assert counts_train[0, v1.vocabulary_["pizza"]] == 2
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, "tocsr"):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert counts_test[0, vocabulary["salad"]] == 1
assert counts_test[0, vocabulary["tomato"]] == 1
assert counts_test[0, vocabulary["water"]] == 1
# stop word from the fixed list
assert "the" not in vocabulary
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert "copyright" not in vocabulary
# not present in the sample
assert counts_test[0, vocabulary["coke"]] == 0
assert counts_test[0, vocabulary["burger"]] == 0
assert counts_test[0, vocabulary["beer"]] == 0
assert counts_test[0, vocabulary["pizza"]] == 0
# test tf-idf
t1 = TfidfTransformer(norm="l1")
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert len(t1.idf_) == len(v1.vocabulary_)
assert tfidf.shape == (n_train, len(v1.vocabulary_))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_))
# test tf alone
t2 = TfidfTransformer(norm="l1", use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert not hasattr(t2, "idf_")
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
with pytest.raises(ValueError):
t3.transform(counts_train)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm="l1")
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert not tv.fixed_vocabulary_
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
with pytest.raises(ValueError):
v3.transform(train_data)
# ascii preprocessor?
v3.set_params(strip_accents="ascii", lowercase=False)
processor = v3.build_preprocessor()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = strip_accents_ascii(text)
result = processor(text)
assert expected == result
# error on bad strip_accents param
v3.set_params(strip_accents="_gabbledegook_", preprocessor=None)
with pytest.raises(ValueError):
v3.build_preprocessor()
# error with bad analyzer type
v3.set_params = "_invalid_analyzer_type_"
with pytest.raises(ValueError):
v3.build_analyzer()
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm="l2", use_idf=False, smooth_idf=False, sublinear_tf=False)
tv.norm = "l1"
assert tv._tfidf.norm == "l1"
tv.use_idf = True
assert tv._tfidf.use_idf
tv.smooth_idf = True
assert tv._tfidf.smooth_idf
tv.sublinear_tf = True
assert tv._tfidf.sublinear_tf
@fails_if_pypy
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.min(X.data) < 0
assert np.max(X.data) > 0
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), norm="l1")
X = v.transform(ALL_FOOD_DOCS)
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert ngrams_nnz > token_nnz
assert ngrams_nnz < 2 * token_nnz
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_feature_names(get_names):
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
with pytest.raises(ValueError):
getattr(cv, get_names)()
assert not cv.fixed_vocabulary_
# test for vocabulary learned from data
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert len(cv.vocabulary_) == n_features
feature_names = getattr(cv, get_names)()
if get_names == "get_feature_names_out":
assert isinstance(feature_names, np.ndarray)
assert feature_names.dtype == object
else:
# get_feature_names
assert isinstance(feature_names, list)
assert len(feature_names) == n_features
assert_array_equal(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
],
feature_names,
)
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
# test for custom vocabulary
vocab = [
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
cv = CountVectorizer(vocabulary=vocab)
feature_names = getattr(cv, get_names)()
assert_array_equal(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
],
feature_names,
)
assert cv.fixed_vocabulary_
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
def test_vectorizer_max_features(Vectorizer):
expected_vocabulary = {"burger", "beer", "salad", "pizza"}
expected_stop_words = {
"celeri",
"tomato",
"copyright",
"coke",
"sparkling",
"water",
"the",
}
# test bounded number of extracted features
vectorizer = Vectorizer(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert set(vectorizer.vocabulary_) == expected_vocabulary
assert vectorizer.stop_words_ == expected_stop_words
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_count_vectorizer_max_features(get_names):
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = getattr(cv_1, get_names)()
features_3 = getattr(cv_3, get_names)()
features_None = getattr(cv_None, get_names)()
# The most common feature is "the", with frequency 7.
assert 7 == counts_1.max()
assert 7 == counts_3.max()
assert 7 == counts_None.max()
# The most common feature should be the same
assert "the" == features_1[np.argmax(counts_1)]
assert "the" == features_3[np.argmax(counts_3)]
assert "the" == features_None[np.argmax(counts_None)]
def test_vectorizer_max_df():
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(analyzer="char", max_df=1.0)
vect.fit(test_data)
assert "a" in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
assert len(vect.stop_words_) == 0
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert "a" not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
assert "a" in vect.stop_words_
assert len(vect.stop_words_) == 2
vect.max_df = 1
vect.fit(test_data)
assert "a" not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
assert "a" in vect.stop_words_
assert len(vect.stop_words_) == 2
def test_vectorizer_min_df():
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(analyzer="char", min_df=1)
vect.fit(test_data)
assert "a" in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
assert len(vect.stop_words_) == 0
vect.min_df = 2
vect.fit(test_data)
assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored
assert len(vect.vocabulary_.keys()) == 2 # {ae} remain
assert "c" in vect.stop_words_
assert len(vect.stop_words_) == 4
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored
assert len(vect.vocabulary_.keys()) == 1 # {a} remains
assert "c" in vect.stop_words_
assert len(vect.stop_words_) == 5
@pytest.mark.parametrize(
"params, err_type, message",
(
({"max_df": 2.0}, ValueError, "max_df == 2.0, must be <= 1.0."),
({"min_df": 1.5}, ValueError, "min_df == 1.5, must be <= 1.0."),
({"max_df": -2}, ValueError, "max_df == -2, must be >= 0."),
({"min_df": -10}, ValueError, "min_df == -10, must be >= 0."),
({"min_df": 3, "max_df": 2.0}, ValueError, "max_df == 2.0, must be <= 1.0."),
({"min_df": 1.5, "max_df": 50}, ValueError, "min_df == 1.5, must be <= 1.0."),
({"max_features": -10}, ValueError, "max_features == -10, must be >= 0."),
(
{"max_features": 3.5},
TypeError,
"max_features must be an instance of <class 'numbers.Integral'>, not <class"
" 'float'>",
),
),
)
def test_vectorizer_params_validation(params, err_type, message):
with pytest.raises(err_type, match=message):
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(**params, analyzer="char")
vect.fit(test_data)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_count_binary_occurrences(get_names):
# by default multiple occurrences are counted as longs
test_data = ["aaabc", "abbde"]
vect = CountVectorizer(analyzer="char", max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(["a", "b", "c", "d", "e"], getattr(vect, get_names)())
assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert X_sparse.dtype == np.float32
@fails_if_pypy
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ["aaabc", "abbde"]
vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None)
X = vect.transform(test_data)
assert np.max(X[0:1].data) == 3
assert np.max(X[1:2].data) == 2
assert X.dtype == np.float64
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(
analyzer="char", alternate_sign=False, binary=True, norm=None
)
X = vect.transform(test_data)
assert np.max(X.data) == 1
assert X.dtype == np.float64
# check the ability to change the dtype
vect = HashingVectorizer(
analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64
)
X = vect.transform(test_data)
assert X.dtype == np.float64
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
def test_vectorizer_inverse_transform(Vectorizer):
# raw documents
data = ALL_FOOD_DOCS
vectorizer = Vectorizer()
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
assert isinstance(inversed_data, list)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
assert sparse.issparse(transformed_data)
assert transformed_data.format == "csr"
# Test that inverse_transform also works with numpy arrays and
# scipy
transformed_data2 = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data2)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
# Check that inverse_transform also works on non CSR sparse data:
transformed_data3 = transformed_data.tocsc()
inversed_data3 = vectorizer.inverse_transform(transformed_data3)
for terms, terms3 in zip(inversed_data, inversed_data3):
assert_array_equal(np.sort(terms), np.sort(terms3))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC())])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"svc__loss": ("hinge", "squared_hinge"),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
assert best_vectorizer.ngram_range == (1, 1)
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=0.1, random_state=0
)
pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC())])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"vect__norm": ("l1", "l2"),
"svc__loss": ("hinge", "squared_hinge"),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
assert best_vectorizer.ngram_range == (1, 1)
assert best_vectorizer.norm == "l2"
assert not best_vectorizer.fixed_vocabulary_
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1.0, 1.0, 1.0])
@fails_if_pypy
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"Машинное обучение — обширный подраздел искусственного "
"интеллекта, изучающий методы построения алгоритмов, "
"способных обучаться."
)
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert X_counted.shape == (1, 12)
vect = HashingVectorizer(norm=None, alternate_sign=False)
X_hashed = vect.transform([document])
assert X_hashed.shape == (1, 2 ** 20)
# No collisions on such a small dataset
assert X_counted.nnz == X_hashed.nnz
# When norm is None and not alternate_sign, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ["pizza", "celeri"]
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert vect.fixed_vocabulary_
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm="l1"),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert type(copy) == orig.__class__
assert copy.get_params() == orig.get_params()
if IS_PYPY and isinstance(orig, HashingVectorizer):
continue
else:
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray(),
)
@pytest.mark.parametrize(
"factory",
[
CountVectorizer.build_analyzer,
CountVectorizer.build_preprocessor,
CountVectorizer.build_tokenizer,
],
)
def test_pickling_built_processors(factory):
"""Tokenizers cannot be pickled
https://github.com/scikit-learn/scikit-learn/issues/12833
"""
vec = CountVectorizer()
function = factory(vec)
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
roundtripped_function = pickle.loads(pickle.dumps(function))
expected = function(text)
result = roundtripped_function(text)
assert result == expected
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_vocab_sets_when_pickling(get_names):
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
)
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_array_equal(getattr(cv, get_names)(), getattr(unpickled_cv, get_names)())
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_vocab_dicts_when_pickling(get_names):
rng = np.random.RandomState(0)
vocab_words = np.array(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
)
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_array_equal(getattr(cv, get_names)(), getattr(unpickled_cv, get_names)())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, "stop_words_")
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert type(copy) == orig.__class__
assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray())
def test_transformer_idf_setter():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
copy = TfidfTransformer()
copy.idf_ = orig.idf_
assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray())
def test_tfidf_vectorizer_setter():
orig = TfidfVectorizer(use_idf=True)
orig.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
copy.idf_ = orig.idf_
assert_array_equal(
copy.transform(JUNK_FOOD_DOCS).toarray(),
orig.transform(JUNK_FOOD_DOCS).toarray(),
)
def test_tfidfvectorizer_invalid_idf_attr():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)
expected_idf_len = len(vect.idf_)
invalid_idf = [1.0] * (expected_idf_len + 1)
with pytest.raises(ValueError):
setattr(copy, "idf_", invalid_idf)
def test_non_unique_vocab():
vocab = ["a", "b", "c", "a", "a"]
vect = CountVectorizer(vocabulary=vocab)
with pytest.raises(ValueError):
vect.fit([])
@fails_if_pypy
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(["hello world", np.nan, "hello hello"])
with pytest.raises(exception, match=message):
func()
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert v.binary
X = v.fit_transform(["hello world", "hello hello"]).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(["hello world", "hello hello"]).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_
@pytest.mark.parametrize(
"Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer)
)
def test_vectorizer_string_object_as_input(Vectorizer):
message = "Iterable over raw text documents expected, string object received."
vec = Vectorizer()
with pytest.raises(ValueError, match=message):
vec.fit_transform("hello world!")
with pytest.raises(ValueError, match=message):
vec.fit("hello world!")
vec.fit(["some text", "some other text"])
with pytest.raises(ValueError, match=message):
vec.transform("hello world!")
@pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
def test_tfidf_transformer_type(X_dtype):
X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)
X_trans = TfidfTransformer().fit_transform(X)
assert X_trans.dtype == X.dtype
def test_tfidf_transformer_sparse():
X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
X_csc = sparse.csc_matrix(X)
X_csr = sparse.csr_matrix(X)
X_trans_csc = TfidfTransformer().fit_transform(X_csc)
X_trans_csr = TfidfTransformer().fit_transform(X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)
assert X_trans_csc.format == X_trans_csr.format
@pytest.mark.parametrize(
"vectorizer_dtype, output_dtype, warning_expected",
[
(np.int32, np.float64, True),
(np.int64, np.float64, True),
(np.float32, np.float32, False),
(np.float64, np.float64, False),
],
)
def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected):
X = np.array(["numpy", "scipy", "sklearn"])
vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)
warning_msg_match = "'dtype' should be used."
warning_cls = UserWarning
expected_warning_cls = warning_cls if warning_expected else None
with pytest.warns(expected_warning_cls, match=warning_msg_match) as record:
X_idf = vectorizer.fit_transform(X)
if expected_warning_cls is None:
relevant_warnings = [w for w in record if isinstance(w, warning_cls)]
assert len(relevant_warnings) == 0
assert X_idf.dtype == output_dtype
@pytest.mark.parametrize(
"vec",
[
HashingVectorizer(ngram_range=(2, 1)),
CountVectorizer(ngram_range=(2, 1)),
TfidfVectorizer(ngram_range=(2, 1)),
],
)
def test_vectorizers_invalid_ngram_range(vec):
# vectorizers could be initialized with invalid ngram range
# test for raising error message
invalid_range = vec.ngram_range
message = re.escape(
f"Invalid value for ngram_range={invalid_range} "
"lower boundary larger than the upper boundary."
)
if isinstance(vec, HashingVectorizer) and IS_PYPY:
pytest.xfail(reason="HashingVectorizer is not supported on PyPy")
with pytest.raises(ValueError, match=message):
vec.fit(["good news everyone"])
with pytest.raises(ValueError, match=message):
vec.fit_transform(["good news everyone"])
if isinstance(vec, HashingVectorizer):
with pytest.raises(ValueError, match=message):
vec.transform(["good news everyone"])
def _check_stop_words_consistency(estimator):
stop_words = estimator.get_stop_words()
tokenize = estimator.build_tokenizer()
preprocess = estimator.build_preprocessor()
return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize)
@fails_if_pypy
def test_vectorizer_stop_words_inconsistent():
lstr = r"\['and', 'll', 've'\]"
message = (
"Your stop_words may be inconsistent with your "
"preprocessing. Tokenizing the stop words generated "
"tokens %s not in stop_words." % lstr
)
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
vec.set_params(stop_words=["you've", "you", "you'll", "AND"])
with pytest.warns(UserWarning, match=message):
vec.fit_transform(["hello world"])
# reset stop word validation
del vec._stop_words_id
assert _check_stop_words_consistency(vec) is False
# Only one warning per stop list
with pytest.warns(None) as record:
vec.fit_transform(["hello world"])
assert not len(record)
assert _check_stop_words_consistency(vec) is None
# Test caching of inconsistency assessment
vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"])
with pytest.warns(UserWarning, match=message):
vec.fit_transform(["hello world"])
@skip_if_32bit
def test_countvectorizer_sort_features_64bit_sparse_indices():
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = sparse.csr_matrix((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
@fails_if_pypy
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
def test_stop_word_validation_custom_preprocessor(Estimator):
data = [{"text": "some text"}]
vec = Estimator()
assert _check_stop_words_consistency(vec) is True
vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"])
assert _check_stop_words_consistency(vec) == "error"
# checks are cached
assert _check_stop_words_consistency(vec) is None
vec.fit_transform(data)
class CustomEstimator(Estimator):
def build_preprocessor(self):
return lambda x: x["text"]
vec = CustomEstimator(stop_words=["and"])
assert _check_stop_words_consistency(vec) == "error"
vec = Estimator(
tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"]
)
assert _check_stop_words_consistency(vec) is True
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"input_type, err_type, err_msg",
[
("filename", FileNotFoundError, ""),
("file", AttributeError, "'str' object has no attribute 'read'"),
],
)
def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg):
if issubclass(Estimator, HashingVectorizer):
pytest.xfail("HashingVectorizer is not supported on PyPy")
data = ["this is text, not file or filename"]
with pytest.raises(err_type, match=err_msg):
Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data)
@pytest.mark.parametrize(
"Estimator",
[
CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy),
],
)
@pytest.mark.parametrize(
"analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()]
)
@pytest.mark.parametrize("input_type", ["file", "filename"])
def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type):
data = ["this is text, not file or filename"]
with pytest.raises((FileNotFoundError, AttributeError)):
Estimator(analyzer=analyzer, input=input_type).fit_transform(data)
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
def test_callable_analyzer_reraise_error(tmpdir, Estimator):
# check if a custom exception from the analyzer is shown to the user
def analyzer(doc):
raise Exception("testing")
if issubclass(Estimator, HashingVectorizer):
pytest.xfail("HashingVectorizer is not supported on PyPy")
f = tmpdir.join("file.txt")
f.write("sample content\n")
with pytest.raises(Exception, match="testing"):
Estimator(analyzer=analyzer, input="file").fit_transform([f])
@pytest.mark.parametrize(
"Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer]
)
@pytest.mark.parametrize(
"stop_words, tokenizer, preprocessor, ngram_range, token_pattern,"
"analyzer, unused_name, ovrd_name, ovrd_msg",
[
(
["you've", "you'll"],
None,
None,
(1, 1),
None,
"char",
"'stop_words'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
None,
"char",
"'tokenizer'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
r"\w+",
"word",
"'token_pattern'",
"'tokenizer'",
"is not None",
),
(
None,
None,
lambda s: s.upper(),
(1, 1),
r"\w+",
lambda s: s.upper(),
"'preprocessor'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 2),
None,
lambda s: s.upper(),
"'ngram_range'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 1),
r"\w+",
"char",
"'token_pattern'",
"'analyzer'",
"!= 'word'",
),
],
)
def test_unused_parameters_warn(
Vectorizer,
stop_words,
tokenizer,
preprocessor,
ngram_range,
token_pattern,
analyzer,
unused_name,
ovrd_name,
ovrd_msg,
):
train_data = JUNK_FOOD_DOCS
# setting parameter and checking for corresponding warning messages
vect = Vectorizer()
vect.set_params(
stop_words=stop_words,
tokenizer=tokenizer,
preprocessor=preprocessor,
ngram_range=ngram_range,
token_pattern=token_pattern,
analyzer=analyzer,
)
msg = "The parameter %s will not be used since %s %s" % (
unused_name,
ovrd_name,
ovrd_msg,
)
with pytest.warns(UserWarning, match=msg):
vect.fit(train_data)
@pytest.mark.parametrize(
"Vectorizer, X",
(
(HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]),
(CountVectorizer, JUNK_FOOD_DOCS),
),
)
def test_n_features_in(Vectorizer, X):
# For vectorizers, n_features_in_ does not make sense
vectorizer = Vectorizer()
assert not hasattr(vectorizer, "n_features_in_")
vectorizer.fit(X)
assert not hasattr(vectorizer, "n_features_in_")
def test_tie_breaking_sample_order_invariance():
# Checks the sample order invariance when setting max_features
# non-regression test for #17939
vec = CountVectorizer(max_features=1)
vocab1 = vec.fit(["hello", "world"]).vocabulary_
vocab2 = vec.fit(["world", "hello"]).vocabulary_
assert vocab1 == vocab2
# TODO: Remove in 1.2 when get_feature_names is removed
def test_get_feature_names_deprecated():
cv = CountVectorizer(max_df=0.5).fit(ALL_FOOD_DOCS)
msg = "get_feature_names is deprecated in 1.0"
with pytest.warns(FutureWarning, match=msg):
cv.get_feature_names()
@fails_if_pypy
def test_nonnegative_hashing_vectorizer_result_indices():
# add test for pr 19035
hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3))
indices = hashing.transform(["22pcs efuture"]).indices
assert indices[0] >= 0
| 41,971 | 0 | 1,561 |
ac47bb9a44cf37a97db3e576ed067939f6618234 | 108 | py | Python | processing/extraction/__init__.py | yashpatel5400/allercery | 09b201ea7f3a7ecf7393cb102f4bdebc780145c7 | [
"MIT"
] | null | null | null | processing/extraction/__init__.py | yashpatel5400/allercery | 09b201ea7f3a7ecf7393cb102f4bdebc780145c7 | [
"MIT"
] | null | null | null | processing/extraction/__init__.py | yashpatel5400/allercery | 09b201ea7f3a7ecf7393cb102f4bdebc780145c7 | [
"MIT"
] | null | null | null | """
__author__ = HackPrinceton 2017 Best Team
__description__ = Initializes files for extraction module
"""
| 21.6 | 57 | 0.787037 | """
__author__ = HackPrinceton 2017 Best Team
__description__ = Initializes files for extraction module
"""
| 0 | 0 | 0 |
73c62dc035f06d89c56d3612826161e36082d017 | 5,149 | py | Python | src/openfermion/utils/__init__.py | Spaceenter/OpenFermion | c1bf76582ec94373333d95fc27d1b92248ba3efd | [
"Apache-2.0"
] | 3 | 2018-08-03T22:48:47.000Z | 2022-02-10T15:05:35.000Z | src/openfermion/utils/__init__.py | Spaceenter/OpenFermion | c1bf76582ec94373333d95fc27d1b92248ba3efd | [
"Apache-2.0"
] | null | null | null | src/openfermion/utils/__init__.py | Spaceenter/OpenFermion | c1bf76582ec94373333d95fc27d1b92248ba3efd | [
"Apache-2.0"
] | 1 | 2019-03-25T13:39:13.000Z | 2019-03-25T13:39:13.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._bch_expansion import bch_expand
from ._channel_state import (amplitude_damping_channel, dephasing_channel,
depolarizing_channel)
from ._commutators import anticommutator, commutator, double_commutator
from ._grid import Grid
from ._lcu_util import (lambda_norm,
preprocess_lcu_coefficients_for_reversible_sampling)
from ._operator_utils import (chemist_ordered, count_qubits,
eigenspectrum, fourier_transform,
freeze_orbitals, get_file_path,
hermitian_conjugated, inline_sum,
inverse_fourier_transform,
is_hermitian, is_identity,
normal_ordered, prune_unused_indices,
reorder, up_then_down,
load_operator, save_operator)
from ._rdm_mapping_functions import (kronecker_delta,
map_two_pdm_to_two_hole_dm,
map_two_pdm_to_one_pdm,
map_one_pdm_to_one_hole_dm,
map_one_hole_dm_to_one_pdm,
map_two_pdm_to_particle_hole_dm,
map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm,
map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
from ._slater_determinants import (gaussian_state_preparation_circuit,
slater_determinant_preparation_circuit)
from ._special_operators import (majorana_operator, number_operator,
s_minus_operator, s_plus_operator,
s_squared_operator,
sx_operator, sy_operator, sz_operator,
up_index, down_index)
from ._testing_utils import (random_antisymmetric_matrix,
random_diagonal_coulomb_hamiltonian,
random_hermitian_matrix,
random_interaction_operator,
random_quadratic_hamiltonian,
random_unitary_matrix)
from ._trotter_error import error_bound, error_operator
from ._trotter_exp_to_qgates import (pauli_exp_to_qasm,
trotterize_exp_qubop_to_qasm,
trotter_operator_grouping)
from ._unitary_cc import (uccsd_convert_amplitude_format,
uccsd_generator,
uccsd_singlet_generator,
uccsd_singlet_get_packed_amplitudes,
uccsd_singlet_paramsize)
# Imports out of alphabetical order to avoid circular dependency.
from ._jellium_hf_state import hartree_fock_state_jellium
from ._low_depth_trotter_error import (
low_depth_second_order_trotter_error_bound,
low_depth_second_order_trotter_error_operator)
from ._sparse_tools import (boson_ladder_sparse,
boson_operator_sparse,
expectation,
expectation_computational_basis_state,
get_density_matrix,
get_gap,
get_ground_state,
get_linear_qubit_operator_diagonal,
inner_product,
jordan_wigner_sparse,
jw_configuration_state,
jw_hartree_fock_state,
jw_get_gaussian_state,
jw_get_ground_state_at_particle_number,
jw_number_restrict_operator,
jw_number_restrict_state,
jw_slater_determinant,
jw_sz_restrict_operator,
jw_sz_restrict_state,
qubit_operator_sparse,
sparse_eigenspectrum,
variance)
from ._davidson import Davidson, DavidsonOptions, QubitDavidson, SparseDavidson
from ._linear_qubit_operator import (
LinearQubitOperator,
LinearQubitOperatorOptions,
ParallelLinearQubitOperator,
generate_linear_qubit_operator,
)
from ._pubchem import geometry_from_pubchem
| 45.566372 | 79 | 0.575257 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._bch_expansion import bch_expand
from ._channel_state import (amplitude_damping_channel, dephasing_channel,
depolarizing_channel)
from ._commutators import anticommutator, commutator, double_commutator
from ._grid import Grid
from ._lcu_util import (lambda_norm,
preprocess_lcu_coefficients_for_reversible_sampling)
from ._operator_utils import (chemist_ordered, count_qubits,
eigenspectrum, fourier_transform,
freeze_orbitals, get_file_path,
hermitian_conjugated, inline_sum,
inverse_fourier_transform,
is_hermitian, is_identity,
normal_ordered, prune_unused_indices,
reorder, up_then_down,
load_operator, save_operator)
from ._rdm_mapping_functions import (kronecker_delta,
map_two_pdm_to_two_hole_dm,
map_two_pdm_to_one_pdm,
map_one_pdm_to_one_hole_dm,
map_one_hole_dm_to_one_pdm,
map_two_pdm_to_particle_hole_dm,
map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm,
map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
from ._slater_determinants import (gaussian_state_preparation_circuit,
slater_determinant_preparation_circuit)
from ._special_operators import (majorana_operator, number_operator,
s_minus_operator, s_plus_operator,
s_squared_operator,
sx_operator, sy_operator, sz_operator,
up_index, down_index)
from ._testing_utils import (random_antisymmetric_matrix,
random_diagonal_coulomb_hamiltonian,
random_hermitian_matrix,
random_interaction_operator,
random_quadratic_hamiltonian,
random_unitary_matrix)
from ._trotter_error import error_bound, error_operator
from ._trotter_exp_to_qgates import (pauli_exp_to_qasm,
trotterize_exp_qubop_to_qasm,
trotter_operator_grouping)
from ._unitary_cc import (uccsd_convert_amplitude_format,
uccsd_generator,
uccsd_singlet_generator,
uccsd_singlet_get_packed_amplitudes,
uccsd_singlet_paramsize)
# Imports out of alphabetical order to avoid circular dependency.
from ._jellium_hf_state import hartree_fock_state_jellium
from ._low_depth_trotter_error import (
low_depth_second_order_trotter_error_bound,
low_depth_second_order_trotter_error_operator)
from ._sparse_tools import (boson_ladder_sparse,
boson_operator_sparse,
expectation,
expectation_computational_basis_state,
get_density_matrix,
get_gap,
get_ground_state,
get_linear_qubit_operator_diagonal,
inner_product,
jordan_wigner_sparse,
jw_configuration_state,
jw_hartree_fock_state,
jw_get_gaussian_state,
jw_get_ground_state_at_particle_number,
jw_number_restrict_operator,
jw_number_restrict_state,
jw_slater_determinant,
jw_sz_restrict_operator,
jw_sz_restrict_state,
qubit_operator_sparse,
sparse_eigenspectrum,
variance)
from ._davidson import Davidson, DavidsonOptions, QubitDavidson, SparseDavidson
from ._linear_qubit_operator import (
LinearQubitOperator,
LinearQubitOperatorOptions,
ParallelLinearQubitOperator,
generate_linear_qubit_operator,
)
from ._pubchem import geometry_from_pubchem
| 0 | 0 | 0 |
eb9d2ce0896069df10b60597c4a04e16909f1f51 | 18,579 | py | Python | vespid/data/neo4j_tools/neo4j_arrow.py | QS-2/VESPID | f7d27f0c4aa99229d12d90fce9a52a48339e0a59 | [
"Apache-2.0"
] | 16 | 2021-09-11T11:16:05.000Z | 2022-03-14T23:09:17.000Z | vespid/data/neo4j_tools/neo4j_arrow.py | QS-2/VESPID | f7d27f0c4aa99229d12d90fce9a52a48339e0a59 | [
"Apache-2.0"
] | 6 | 2021-09-24T23:17:28.000Z | 2022-02-15T21:18:31.000Z | vespid/data/neo4j_tools/neo4j_arrow.py | QS-2/VESPID | f7d27f0c4aa99229d12d90fce9a52a48339e0a59 | [
"Apache-2.0"
] | 1 | 2022-02-22T14:44:21.000Z | 2022-02-22T14:44:21.000Z | import base64
import json
import struct
from collections import abc
from enum import Enum
from os import environ as env
from time import sleep, time
from typing import cast, Any, Dict, Iterable, Iterator, List, Optional, \
Tuple, TypeVar, Union
import pyarrow as pa
from pyarrow.lib import ArrowKeyError, RecordBatch, Schema, Table
import pyarrow.flight as flight
# Known job types supported by the Java plugin.
_JOB_BULK_IMPORT = "import.bulk"
_JOB_CYPHER = "cypher.read"
_JOB_GDS_READ = "gds.read" # TODO: rename
_JOB_GDS_WRITE_NODES = "gds.write.nodes"
_JOB_GDS_WRITE_RELS = "gds.write.relationships"
_JOB_KHOP = "khop"
_JOB_STATUS = "job.status"
_JOB_INFO_VERSION = "info.version"
_JOB_INFO_STATUS = "info.jobs"
# These defaults should stay in sync with those in the Java plugin.
# See org.neo4j.arrow.Neo4jDefaults for reference.
_ID = 'ID'
_LABELS = 'LABELS'
_START_ID = 'START_ID'
_END_ID = 'END_ID'
_TYPE = 'TYPE'
_DEFAULT_HOST = env.get('NEO4J_ARROW_HOST', 'localhost')
_DEFAULT_PORT = int(env.get('NEO4J_ARROW_PORT', '9999'))
pa.enable_signal_handlers(True)
TableLike = TypeVar('TableLike', bound=Union[RecordBatch, Table])
class JobStatus(Enum):
"""Represents the state of a server-side job."""
UNKNOWN = "UNKNOWN"
INITIALIZING = "INITIALIZING"
PENDING = "PENDING"
COMPLETE = "COMPLETE"
ERROR = "ERROR"
PRODUCING = "PRODUCING"
@classmethod
def _coerce_ticket(maybe_ticket: Union[bytes, flight.Ticket]) -> flight.Ticket:
"""
Coerce the given value into a Flight Ticket.
:param maybe_ticket: possible Ticket
:return: a Ticket
"""
ticket: flight.Ticket
if type(maybe_ticket) is flight.Ticket:
ticket = maybe_ticket
else:
ticket = flight.Ticket.deserialize(cast(bytes, maybe_ticket))
return ticket
def _coerce_table(data: Union[Dict[Any, Any],
TableLike,
flight.FlightStreamChunk]) -> Table:
"""
Coerce a TableLike value into a PyArrow Table.
:param data: coercible value
:return: a PyArrow Table
"""
if type(data) is dict:
return Table.from_pydict(data)
elif type(data) is RecordBatch:
return Table.from_batches([data])
elif type(data) is Table:
return data
elif type(data) is flight.FlightStreamChunk:
# TODO: this is a pretty wasteful wrapping
return Table.from_batches([data.data])
# yolo
return pa.table(data=data)
class Neo4jArrow:
"""
A client for interacting with a remote Neo4j Arrow service. Useful for
working with large datasets, retrieving bulk data, and async batch jobs!
"""
# TODO: rename camelCase args to snake case
_client: flight.FlightClient
_location: flight.Location
_options: flight.FlightCallOptions
def __init__(self, user: str, password: str,
location: Tuple[str, int] = (_DEFAULT_HOST, _DEFAULT_PORT),
tls: bool = False, verify_tls: bool = True):
"""
Create a new Neo4jArrow client. Note: the client connects
:param user: Neo4j user to authenticate as
:param password: password for user
:param location: tuple of host, port (optional)
:param tls: use TLS?
:param verify_tls: verify server identity in x.509 certificate?
"""
token = base64.b64encode(f'{user}:{password}'.encode('utf8'))
self._options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + token)
])
host, port = location
if tls:
self._location = flight.Location.for_grpc_tls(host, port)
else:
self._location = flight.Location.for_grpc_tcp(host, port)
self._client = flight.FlightClient(self._location,
disable_server_verification=(not verify_tls))
def list_actions(self) -> List[flight.Action]:
"""
List all actions available on the server.
:return: list of all available Actions
"""
return list(self._client.list_actions(self._options))
def list_flights(self) -> List[flight.FlightInfo]:
"""
List all known/existing Flights on the server.
:return: list of Flights
"""
return list(self._client.list_flights(None, self._options))
def info(self) -> Dict[str, Any]:
"""
Get info on the Neo4j Arrow server
:return: metadata describing Neo4j Arrow server (e.g. version)
"""
result = self._client.do_action(
(_JOB_INFO_VERSION, b''), self._options)
obj = json.loads(next(result).body.to_pybytes())
if type(obj) is dict:
return obj
raise RuntimeError("server returned unexpected data format")
def _submit(self, action: Union[Tuple[str, bytes],
flight.Action]) -> flight.Ticket:
"""Attempt to ticket the given action/job"""
results = self._client.do_action(action, self._options)
return flight.Ticket.deserialize((next(results).body.to_pybytes()))
def cypher(self, cypher: str, database: str = 'neo4j',
params: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""Submit a Cypher job with optional parameters. Returns a ticket."""
cypher_bytes = cypher.encode('utf8')
db_bytes = database.encode('utf8')
params_bytes = json.dumps(params or {}).encode('utf8')
# Our CypherMessage format is simple:
# - 16 bit unsigned length of the cypher byte string
# - the cypher byte string payload
# - 16 bit unsigned length of the database byte string
# - the database byte string payload
# - 16 bit unsigned length of the param json payload
# - the param json byte string payload
fmt = f"!H{len(cypher_bytes)}sH{len(db_bytes)}sH{len(params_bytes)}s"
buffer = struct.pack(fmt,
len(cypher_bytes), cypher_bytes,
len(db_bytes), db_bytes,
len(params_bytes), params_bytes)
return self._submit((_JOB_CYPHER, buffer))
def gds_nodes(self, graph: str, database: str = 'neo4j',
properties: Optional[List[str]] = None,
filters: Optional[List[str]] = None,
node_id: str = '',
extra: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""Submit a GDS job for streaming Node properties. Returns a ticket."""
params = {
'db': database,
'graph': graph,
'type': 'node',
'node_id': node_id,
'properties': properties or [],
'filters': filters or [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def gds_write_nodes(self, graph: str, database: str = 'neo4j',
id_field: str = _ID,
labels_field: str = _LABELS) -> flight.Ticket:
"""Submit a GDS Write Job for creating Nodes and Node Properties."""
params = {
'db': database,
'graph': graph,
'idField': id_field,
'labelsField': labels_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_WRITE_NODES, params_bytes))
def gds_write_relationships(self, graph: str, database: str = 'neo4j',
source_field: str = _START_ID,
target_field: str = _END_ID,
type_field: str = _TYPE) -> flight.Ticket:
"""Submit a GDS Write Job for creating Rels and Rel Properties."""
params = {
'db': database,
'graph': graph,
'source_field': source_field,
'target_field': target_field,
'type_field': type_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_WRITE_RELS, params_bytes))
def gds_relationships(self, graph: str, database: str = 'neo4j',
properties: Optional[List[str]] = None,
filters: Optional[List[str]] = None,
node_id: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""
Submit a GDS job for retrieving Relationship properties.
:param graph: name of the GDS graph
:param database: name of the underlying Neo4j database
:param properties: relationship properties to retrieve
:param filters: relationship type filter
:param node_id: property to use as an alternative node id (default is
to use the internal opaque id)
:param extra: additional custom message parameters
:return: new Ticket
"""
params = {
'db': database,
'graph': graph,
'type': 'relationship',
'node_id': node_id or '',
'properties': properties or [],
'filters': filters or [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def khop(self, graph: str, database: str = 'neo4j',
node_id: Optional[str] = None, rel_property: str = '_type_',
extra: Optional[Dict[str, Any]] = None) -> pa.flight.Ticket:
"""
**Experimental** K-Hop Job support
:param graph: gds graph to analyze
:param database: underlying neo4j database
:param node_id: optional property to use as a logical node id
:param rel_property: special relationship property used to encode
orientation of the edge
:param extra: any extra k/v pairs for the KhopMessage
:return: ticket to a new KHop job
"""
params = {
'db': database,
'graph': graph,
'node_id': node_id or '',
'type': 'khop',
'properties': [rel_property],
'filters': [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def status(self, ticket: Union[bytes, flight.Ticket]) -> JobStatus:
"""
Inspect the status a server-side Job associated with a given Ticket.
:param ticket: Optional Ticket for filtering Jobs
:return: list of tuples of Job ID (a string) and Job Status
"""
body = _coerce_ticket(ticket).serialize()
action = (_JOB_STATUS, body)
results = self._client.do_action(action, self._options)
status = next(results).body.to_pybytes().decode('utf8')
return JobStatus.from_str(status)
def wait_for_job(self, ticket: Union[bytes, pa.flight.Ticket],
desired: JobStatus = JobStatus.PRODUCING,
must_exist: bool = True,
timeout: Optional[int] = None) -> bool:
"""Block until a given job (specified by a ticket) reaches a status."""
start = time()
timeout = timeout or (1 << 25) # well beyond someone's patience
while time() - start < timeout:
try:
current = self.status(ticket)
if current == desired:
return True
except ArrowKeyError:
if must_exist:
print(f'no job found for ticket {ticket!r}')
return False
sleep(1) # TODO: is 1s too fast? too slow? just right?
return False
def stream(self, ticket: Union[bytes, flight.Ticket],
timeout: Optional[int] = None) -> flight.FlightStreamReader:
"""
Read the stream associated with the given ticket.
:param ticket: ticket to an active Read Job
:param timeout: timeout to wait for stream to start producing
:return: new FlightStreamReader for consuming the results
"""
ticket = _coerce_ticket(ticket)
self.wait_for_job(ticket, timeout=timeout)
return self._client.do_get(ticket, self._options)
def put(self, ticket: Union[bytes, flight.Ticket],
data: Union[Dict[Any, Any], TableLike, Iterable[TableLike],
Iterator[TableLike], flight.FlightStreamReader],
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) -> Tuple[int, int]:
"""
Send data to the server for the corresponding Flight.
:param ticket: a Ticket to a Flight stream
:param data: the data to stream to the server
:param metadata: optional metadata to append to the stream's Schema
:return: number of rows sent, number of bytes sent
"""
ticket = _coerce_ticket(ticket)
if isinstance(data, flight.FlightStreamReader):
# XXX must come first as it's also an instance of Iterable!
return self.put_stream_from_reader(ticket, data, schema, metadata)
elif isinstance(data, (abc.Iterable, abc.Iterator)):
return self.put_stream_batches(ticket, data, schema, metadata)
return self.put_stream(ticket, data, metadata)
def put_stream(self, ticket: Union[bytes, flight.Ticket],
data: Union[Dict[Any, Any], TableLike],
metadata: Optional[Dict[Any, Any]] = None) -> Tuple[int, int]:
"""
Write a stream to the server
:param ticket: ticket for the associated Flight
:param data: Table or convertible table
:param metadata: optional metadata to include in the Table Schema
:return: number of rows and number of bytes transmitted
"""
table = _coerce_table(data)
ticket = _coerce_ticket(ticket)
if metadata:
schema = table.schema.with_metadata(metadata)
table = table.replace_schema_metadata(schema.metadata)
try:
descriptor = flight.FlightDescriptor.for_command(
ticket.serialize())
writer, _ = self._client.do_put(descriptor, table.schema,
self._options)
# TODO: configurable or auto-chosen chunksize
writer.write_table(table, max_chunksize=8192)
writer.close()
# TODO: server should be telling us what the results were.
# We shouldn't assume all data was accepted.
return table.num_rows, table.nbytes
except Exception as e:
print(f"put_stream error: {e}")
return 0, 0
def put_stream_batches(self, ticket: flight.Ticket,
batches: Union[Iterable[TableLike],
Iterator[TableLike]],
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) \
-> Tuple[int, int]:
"""
Write a stream using a batch producer.
:param ticket: ticket for the Flight
:param batches: a RecordBatchStream producing the input data
:param schema: optional overriding Schema for the stream
:param metadata: optional metadata to append to the Schema
:return: number of rows and number of bytes transmitted
"""
descriptor = flight.FlightDescriptor.for_command(ticket.serialize())
batches = iter(batches)
# peek and get our schema, updating with any overrides desired
batch = next(batches)
table = _coerce_table(batch)
schema = schema or table.schema
if metadata:
schema = schema.with_metadata(metadata)
writer, _ = self._client.do_put(descriptor, schema, self._options)
try:
writer.write_table(table)
rows, nbytes = len(batch), batch.nbytes
for batch in batches:
writer.write_table(_coerce_table(batch))
nbytes += batch.nbytes
rows += len(batch)
finally:
writer.close()
print(f"wrote {rows:,} rows, {round(nbytes / (1 << 20), 2):,} MiB")
return rows, nbytes
def put_stream_from_reader(self, ticket: flight.Ticket,
reader: flight.FlightStreamReader,
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) \
-> Tuple[int, int]:
"""
Relay an existing Arrow Flight stream provided by the given reader.
:param ticket:
:param reader:
:param schema:
:param metadata:
:return:
"""
descriptor = flight.FlightDescriptor.for_command(ticket.serialize())
chunk_stream = iter(reader)
table = _coerce_table(next(chunk_stream))
schema = schema or table.schema
if metadata:
schema = schema.with_metadata(metadata)
writer, _ = self._client.do_put(descriptor, schema, self._options)
try:
writer.write_table(table)
rows, nbytes = len(table), table.nbytes
for chunk in chunk_stream:
table = _coerce_table(chunk)
writer.write_table(table)
nbytes += table.nbytes
rows += len(table)
finally:
writer.close()
print(f"wrote {rows:,} rows, {round(nbytes / (1 << 20), 2):,} MiB")
return rows, nbytes
| 39.698718 | 88 | 0.591097 | import base64
import json
import struct
from collections import abc
from enum import Enum
from os import environ as env
from time import sleep, time
from typing import cast, Any, Dict, Iterable, Iterator, List, Optional, \
Tuple, TypeVar, Union
import pyarrow as pa
from pyarrow.lib import ArrowKeyError, RecordBatch, Schema, Table
import pyarrow.flight as flight
# Known job types supported by the Java plugin.
_JOB_BULK_IMPORT = "import.bulk"
_JOB_CYPHER = "cypher.read"
_JOB_GDS_READ = "gds.read" # TODO: rename
_JOB_GDS_WRITE_NODES = "gds.write.nodes"
_JOB_GDS_WRITE_RELS = "gds.write.relationships"
_JOB_KHOP = "khop"
_JOB_STATUS = "job.status"
_JOB_INFO_VERSION = "info.version"
_JOB_INFO_STATUS = "info.jobs"
# These defaults should stay in sync with those in the Java plugin.
# See org.neo4j.arrow.Neo4jDefaults for reference.
_ID = 'ID'
_LABELS = 'LABELS'
_START_ID = 'START_ID'
_END_ID = 'END_ID'
_TYPE = 'TYPE'
_DEFAULT_HOST = env.get('NEO4J_ARROW_HOST', 'localhost')
_DEFAULT_PORT = int(env.get('NEO4J_ARROW_PORT', '9999'))
pa.enable_signal_handlers(True)
TableLike = TypeVar('TableLike', bound=Union[RecordBatch, Table])
class JobStatus(Enum):
"""Represents the state of a server-side job."""
UNKNOWN = "UNKNOWN"
INITIALIZING = "INITIALIZING"
PENDING = "PENDING"
COMPLETE = "COMPLETE"
ERROR = "ERROR"
PRODUCING = "PRODUCING"
@classmethod
def from_str(cls, s: str) -> 'JobStatus':
for status in JobStatus:
if status.value == s:
return status
return JobStatus.UNKNOWN
def _coerce_ticket(maybe_ticket: Union[bytes, flight.Ticket]) -> flight.Ticket:
"""
Coerce the given value into a Flight Ticket.
:param maybe_ticket: possible Ticket
:return: a Ticket
"""
ticket: flight.Ticket
if type(maybe_ticket) is flight.Ticket:
ticket = maybe_ticket
else:
ticket = flight.Ticket.deserialize(cast(bytes, maybe_ticket))
return ticket
def _coerce_table(data: Union[Dict[Any, Any],
TableLike,
flight.FlightStreamChunk]) -> Table:
"""
Coerce a TableLike value into a PyArrow Table.
:param data: coercible value
:return: a PyArrow Table
"""
if type(data) is dict:
return Table.from_pydict(data)
elif type(data) is RecordBatch:
return Table.from_batches([data])
elif type(data) is Table:
return data
elif type(data) is flight.FlightStreamChunk:
# TODO: this is a pretty wasteful wrapping
return Table.from_batches([data.data])
# yolo
return pa.table(data=data)
class Neo4jArrow:
"""
A client for interacting with a remote Neo4j Arrow service. Useful for
working with large datasets, retrieving bulk data, and async batch jobs!
"""
# TODO: rename camelCase args to snake case
_client: flight.FlightClient
_location: flight.Location
_options: flight.FlightCallOptions
def __init__(self, user: str, password: str,
location: Tuple[str, int] = (_DEFAULT_HOST, _DEFAULT_PORT),
tls: bool = False, verify_tls: bool = True):
"""
Create a new Neo4jArrow client. Note: the client connects
:param user: Neo4j user to authenticate as
:param password: password for user
:param location: tuple of host, port (optional)
:param tls: use TLS?
:param verify_tls: verify server identity in x.509 certificate?
"""
token = base64.b64encode(f'{user}:{password}'.encode('utf8'))
self._options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + token)
])
host, port = location
if tls:
self._location = flight.Location.for_grpc_tls(host, port)
else:
self._location = flight.Location.for_grpc_tcp(host, port)
self._client = flight.FlightClient(self._location,
disable_server_verification=(not verify_tls))
def list_actions(self) -> List[flight.Action]:
"""
List all actions available on the server.
:return: list of all available Actions
"""
return list(self._client.list_actions(self._options))
def list_flights(self) -> List[flight.FlightInfo]:
"""
List all known/existing Flights on the server.
:return: list of Flights
"""
return list(self._client.list_flights(None, self._options))
def info(self) -> Dict[str, Any]:
"""
Get info on the Neo4j Arrow server
:return: metadata describing Neo4j Arrow server (e.g. version)
"""
result = self._client.do_action(
(_JOB_INFO_VERSION, b''), self._options)
obj = json.loads(next(result).body.to_pybytes())
if type(obj) is dict:
return obj
raise RuntimeError("server returned unexpected data format")
def _submit(self, action: Union[Tuple[str, bytes],
flight.Action]) -> flight.Ticket:
"""Attempt to ticket the given action/job"""
results = self._client.do_action(action, self._options)
return flight.Ticket.deserialize((next(results).body.to_pybytes()))
def cypher(self, cypher: str, database: str = 'neo4j',
params: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""Submit a Cypher job with optional parameters. Returns a ticket."""
cypher_bytes = cypher.encode('utf8')
db_bytes = database.encode('utf8')
params_bytes = json.dumps(params or {}).encode('utf8')
# Our CypherMessage format is simple:
# - 16 bit unsigned length of the cypher byte string
# - the cypher byte string payload
# - 16 bit unsigned length of the database byte string
# - the database byte string payload
# - 16 bit unsigned length of the param json payload
# - the param json byte string payload
fmt = f"!H{len(cypher_bytes)}sH{len(db_bytes)}sH{len(params_bytes)}s"
buffer = struct.pack(fmt,
len(cypher_bytes), cypher_bytes,
len(db_bytes), db_bytes,
len(params_bytes), params_bytes)
return self._submit((_JOB_CYPHER, buffer))
def gds_nodes(self, graph: str, database: str = 'neo4j',
properties: Optional[List[str]] = None,
filters: Optional[List[str]] = None,
node_id: str = '',
extra: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""Submit a GDS job for streaming Node properties. Returns a ticket."""
params = {
'db': database,
'graph': graph,
'type': 'node',
'node_id': node_id,
'properties': properties or [],
'filters': filters or [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def gds_write_nodes(self, graph: str, database: str = 'neo4j',
id_field: str = _ID,
labels_field: str = _LABELS) -> flight.Ticket:
"""Submit a GDS Write Job for creating Nodes and Node Properties."""
params = {
'db': database,
'graph': graph,
'idField': id_field,
'labelsField': labels_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_WRITE_NODES, params_bytes))
def gds_write_relationships(self, graph: str, database: str = 'neo4j',
source_field: str = _START_ID,
target_field: str = _END_ID,
type_field: str = _TYPE) -> flight.Ticket:
"""Submit a GDS Write Job for creating Rels and Rel Properties."""
params = {
'db': database,
'graph': graph,
'source_field': source_field,
'target_field': target_field,
'type_field': type_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_WRITE_RELS, params_bytes))
def gds_relationships(self, graph: str, database: str = 'neo4j',
properties: Optional[List[str]] = None,
filters: Optional[List[str]] = None,
node_id: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""
Submit a GDS job for retrieving Relationship properties.
:param graph: name of the GDS graph
:param database: name of the underlying Neo4j database
:param properties: relationship properties to retrieve
:param filters: relationship type filter
:param node_id: property to use as an alternative node id (default is
to use the internal opaque id)
:param extra: additional custom message parameters
:return: new Ticket
"""
params = {
'db': database,
'graph': graph,
'type': 'relationship',
'node_id': node_id or '',
'properties': properties or [],
'filters': filters or [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def khop(self, graph: str, database: str = 'neo4j',
node_id: Optional[str] = None, rel_property: str = '_type_',
extra: Optional[Dict[str, Any]] = None) -> pa.flight.Ticket:
"""
**Experimental** K-Hop Job support
:param graph: gds graph to analyze
:param database: underlying neo4j database
:param node_id: optional property to use as a logical node id
:param rel_property: special relationship property used to encode
orientation of the edge
:param extra: any extra k/v pairs for the KhopMessage
:return: ticket to a new KHop job
"""
params = {
'db': database,
'graph': graph,
'node_id': node_id or '',
'type': 'khop',
'properties': [rel_property],
'filters': [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def status(self, ticket: Union[bytes, flight.Ticket]) -> JobStatus:
"""
Inspect the status a server-side Job associated with a given Ticket.
:param ticket: Optional Ticket for filtering Jobs
:return: list of tuples of Job ID (a string) and Job Status
"""
body = _coerce_ticket(ticket).serialize()
action = (_JOB_STATUS, body)
results = self._client.do_action(action, self._options)
status = next(results).body.to_pybytes().decode('utf8')
return JobStatus.from_str(status)
def wait_for_job(self, ticket: Union[bytes, pa.flight.Ticket],
desired: JobStatus = JobStatus.PRODUCING,
must_exist: bool = True,
timeout: Optional[int] = None) -> bool:
"""Block until a given job (specified by a ticket) reaches a status."""
start = time()
timeout = timeout or (1 << 25) # well beyond someone's patience
while time() - start < timeout:
try:
current = self.status(ticket)
if current == desired:
return True
except ArrowKeyError:
if must_exist:
print(f'no job found for ticket {ticket!r}')
return False
sleep(1) # TODO: is 1s too fast? too slow? just right?
return False
def stream(self, ticket: Union[bytes, flight.Ticket],
timeout: Optional[int] = None) -> flight.FlightStreamReader:
"""
Read the stream associated with the given ticket.
:param ticket: ticket to an active Read Job
:param timeout: timeout to wait for stream to start producing
:return: new FlightStreamReader for consuming the results
"""
ticket = _coerce_ticket(ticket)
self.wait_for_job(ticket, timeout=timeout)
return self._client.do_get(ticket, self._options)
def put(self, ticket: Union[bytes, flight.Ticket],
data: Union[Dict[Any, Any], TableLike, Iterable[TableLike],
Iterator[TableLike], flight.FlightStreamReader],
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) -> Tuple[int, int]:
"""
Send data to the server for the corresponding Flight.
:param ticket: a Ticket to a Flight stream
:param data: the data to stream to the server
:param metadata: optional metadata to append to the stream's Schema
:return: number of rows sent, number of bytes sent
"""
ticket = _coerce_ticket(ticket)
if isinstance(data, flight.FlightStreamReader):
# XXX must come first as it's also an instance of Iterable!
return self.put_stream_from_reader(ticket, data, schema, metadata)
elif isinstance(data, (abc.Iterable, abc.Iterator)):
return self.put_stream_batches(ticket, data, schema, metadata)
return self.put_stream(ticket, data, metadata)
def put_stream(self, ticket: Union[bytes, flight.Ticket],
data: Union[Dict[Any, Any], TableLike],
metadata: Optional[Dict[Any, Any]] = None) -> Tuple[int, int]:
"""
Write a stream to the server
:param ticket: ticket for the associated Flight
:param data: Table or convertible table
:param metadata: optional metadata to include in the Table Schema
:return: number of rows and number of bytes transmitted
"""
table = _coerce_table(data)
ticket = _coerce_ticket(ticket)
if metadata:
schema = table.schema.with_metadata(metadata)
table = table.replace_schema_metadata(schema.metadata)
try:
descriptor = flight.FlightDescriptor.for_command(
ticket.serialize())
writer, _ = self._client.do_put(descriptor, table.schema,
self._options)
# TODO: configurable or auto-chosen chunksize
writer.write_table(table, max_chunksize=8192)
writer.close()
# TODO: server should be telling us what the results were.
# We shouldn't assume all data was accepted.
return table.num_rows, table.nbytes
except Exception as e:
print(f"put_stream error: {e}")
return 0, 0
def put_stream_batches(self, ticket: flight.Ticket,
batches: Union[Iterable[TableLike],
Iterator[TableLike]],
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) \
-> Tuple[int, int]:
"""
Write a stream using a batch producer.
:param ticket: ticket for the Flight
:param batches: a RecordBatchStream producing the input data
:param schema: optional overriding Schema for the stream
:param metadata: optional metadata to append to the Schema
:return: number of rows and number of bytes transmitted
"""
descriptor = flight.FlightDescriptor.for_command(ticket.serialize())
batches = iter(batches)
# peek and get our schema, updating with any overrides desired
batch = next(batches)
table = _coerce_table(batch)
schema = schema or table.schema
if metadata:
schema = schema.with_metadata(metadata)
writer, _ = self._client.do_put(descriptor, schema, self._options)
try:
writer.write_table(table)
rows, nbytes = len(batch), batch.nbytes
for batch in batches:
writer.write_table(_coerce_table(batch))
nbytes += batch.nbytes
rows += len(batch)
finally:
writer.close()
print(f"wrote {rows:,} rows, {round(nbytes / (1 << 20), 2):,} MiB")
return rows, nbytes
def put_stream_from_reader(self, ticket: flight.Ticket,
reader: flight.FlightStreamReader,
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) \
-> Tuple[int, int]:
"""
Relay an existing Arrow Flight stream provided by the given reader.
:param ticket:
:param reader:
:param schema:
:param metadata:
:return:
"""
descriptor = flight.FlightDescriptor.for_command(ticket.serialize())
chunk_stream = iter(reader)
table = _coerce_table(next(chunk_stream))
schema = schema or table.schema
if metadata:
schema = schema.with_metadata(metadata)
writer, _ = self._client.do_put(descriptor, schema, self._options)
try:
writer.write_table(table)
rows, nbytes = len(table), table.nbytes
for chunk in chunk_stream:
table = _coerce_table(chunk)
writer.write_table(table)
nbytes += table.nbytes
rows += len(table)
finally:
writer.close()
print(f"wrote {rows:,} rows, {round(nbytes / (1 << 20), 2):,} MiB")
return rows, nbytes
def bulk_import(self, database: str, id_field: str = _ID,
labels_field: str = _LABELS, type_field: str = _TYPE,
source_field: str = _START_ID,
target_field: str = _END_ID) -> flight.Ticket:
params = {
'db': database,
'id_field': id_field,
'labels_field': labels_field,
'source_field': source_field,
'target_field': target_field,
'type_field': type_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_BULK_IMPORT, params_bytes))
| 752 | 0 | 53 |
9bcd3d00f356e0a72c632ddcd420f05a25d61fab | 86 | py | Python | Data Scientist Career Path/3. Python Fundamentals/9. Python Classes/1. types.py | myarist/Codecademy | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | [
"MIT"
] | 23 | 2021-06-06T15:35:55.000Z | 2022-03-21T06:53:42.000Z | Data Scientist Career Path/3. Python Fundamentals/9. Python Classes/1. types.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Data Scientist Career Path/3. Python Fundamentals/9. Python Classes/1. types.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 9 | 2021-06-08T01:32:04.000Z | 2022-03-18T15:38:09.000Z | print(type(5))
my_dict = {}
print(type(my_dict))
my_list = []
print(type(my_list)) | 9.555556 | 20 | 0.651163 | print(type(5))
my_dict = {}
print(type(my_dict))
my_list = []
print(type(my_list)) | 0 | 0 | 0 |
5215ed9f3a1e89f1da086e79b175848d870140fb | 6,443 | py | Python | tests/test_util.py | luminantdata/great_expectations | f4b15d20a092fcbef690506f89bddec84b8140ff | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | luminantdata/great_expectations | f4b15d20a092fcbef690506f89bddec84b8140ff | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | luminantdata/great_expectations | f4b15d20a092fcbef690506f89bddec84b8140ff | [
"Apache-2.0"
] | null | null | null | import json
import datetime
import numpy as np
import unittest
import great_expectations as ge
if __name__ == "__main__":
unittest.main() | 45.055944 | 167 | 0.652491 | import json
import datetime
import numpy as np
import unittest
import great_expectations as ge
class TestUtilMethods(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUtilMethods, self).__init__(*args, **kwargs)
self.D = ge.read_csv('./tests/test_sets/distributional_expectations_data_base.csv')
with open('./tests/test_sets/test_partitions.json', 'r') as file:
self.test_partitions = json.loads(file.read())
def test_DotDict(self):
D = ge.util.DotDict({
'x' : [1,2,4],
'y' : [1,2,5],
'z' : ['hello', 'jello', 'mello'],
})
self.assertEqual(D.x[0],D.y[0])
self.assertNotEqual(D.x[0],D.z[0])
def test_continuous_partition_data_error(self):
with self.assertRaises(ValueError):
test_partition = ge.dataset.util.continuous_partition_data(self.D['norm_0_1'], bins=-1)
self.assertFalse(ge.dataset.util.is_valid_continuous_partition_object(test_partition))
test_partition = ge.dataset.util.continuous_partition_data(self.D['norm_0_1'], n_bins=-1)
self.assertFalse(ge.dataset.util.is_valid_continuous_partition_object(test_partition))
def test_partition_data_norm_0_1(self):
test_partition = ge.dataset.util.continuous_partition_data(self.D.norm_0_1)
for key, val in self.test_partitions['norm_0_1_auto'].items():
self.assertEqual(len(val), len(test_partition[key]))
self.assertTrue(np.allclose(test_partition[key], val))
def test_partition_data_bimodal(self):
test_partition = ge.dataset.util.continuous_partition_data(self.D.bimodal)
for key, val in self.test_partitions['bimodal_auto'].items():
self.assertEqual(len(val), len(test_partition[key]))
self.assertTrue(np.allclose(test_partition[key], val))
def test_kde_partition_data_norm_0_1(self):
test_partition = ge.dataset.util.kde_partition_data(self.D.norm_0_1)
for key, val in self.test_partitions['norm_0_1_kde'].items():
self.assertEqual(len(val), len(test_partition[key]))
self.assertTrue(np.allclose(test_partition[key], val))
def test_kde_partition_data_bimodal(self):
test_partition = ge.dataset.util.kde_partition_data(self.D.bimodal)
for key, val in self.test_partitions['bimodal_kde'].items():
self.assertEqual(len(val), len(test_partition[key]))
self.assertTrue(np.allclose(test_partition[key], val))
def test_categorical_data_fixed(self):
test_partition = ge.dataset.util.categorical_partition_data(self.D.categorical_fixed)
for k in self.test_partitions['categorical_fixed']['values']:
# Iterate over each categorical value and check that the weights equal those computed originally.
self.assertEqual(
self.test_partitions['categorical_fixed']['weights'][self.test_partitions['categorical_fixed']['values'].index(k)],
test_partition['weights'][test_partition['values'].index(k)])
def test_categorical_data_na(self):
df = ge.dataset.PandasDataSet({
'my_column': ["A", "B", "A", "B", None]
})
partition = ge.dataset.util.categorical_partition_data(df['my_column'])
self.assertTrue(ge.dataset.util.is_valid_categorical_partition_object(partition))
self.assertTrue(len(partition['values']) == 2)
def test_is_valid_partition_object_simple(self):
self.assertTrue(ge.dataset.util.is_valid_continuous_partition_object(ge.dataset.util.continuous_partition_data(self.D['norm_0_1'])))
self.assertTrue(ge.dataset.util.is_valid_continuous_partition_object(ge.dataset.util.continuous_partition_data(self.D['bimodal'])))
self.assertTrue(ge.dataset.util.is_valid_continuous_partition_object(ge.dataset.util.continuous_partition_data(self.D['norm_0_1'], bins='auto')))
self.assertTrue(ge.dataset.util.is_valid_continuous_partition_object(ge.dataset.util.continuous_partition_data(self.D['norm_0_1'], bins='uniform', n_bins=10)))
def test_generated_partition_objects(self):
for partition_name, partition_object in self.test_partitions.items():
result = ge.dataset.util.is_valid_partition_object(partition_object)
if not result:
print("Partition object " + partition_name + " is invalid.")
self.assertTrue(result)
def test_is_valid_partition_object_fails_length(self):
self.assertFalse(ge.dataset.util.is_valid_partition_object({'bins': [0,1], 'weights': [0,1,2]}))
def test_is_valid_partition_object_fails_weights(self):
self.assertFalse(ge.dataset.util.is_valid_partition_object({'bins': [0,1,2], 'weights': [0.5,0.6]}))
def test_is_valid_partition_object_fails_structure(self):
self.assertFalse(ge.dataset.util.is_valid_partition_object({'weights': [0.5,0.5]}))
self.assertFalse(ge.dataset.util.is_valid_partition_object({'bins': [0,1,2]}))
def test_recursively_convert_to_json_serializable(self):
D = ge.dataset.PandasDataSet({
'x' : [1,2,3,4,5,6,7,8,9,10],
})
D.expect_column_values_to_be_in_set("x", set([1,2,3,4,5,6,7,8,9]), mostly=.8)
part = ge.dataset.util.partition_data(D.x)
D.expect_column_kl_divergence_to_be_less_than("x", part, .6)
#Dumping this JSON object verifies that everything is serializable
json.dumps(D.get_expectations_config(), indent=2)
x = {
'w': [
"aaaa", "bbbb", 1.3, 5, 6, 7
],
'x': np.array([1, 2, 3]),
'y': {
'alpha' : None,
'beta' : np.nan,
'delta': np.inf,
'gamma' : -np.inf
},
'z': set([1,2,3,4,5]),
'zz': (1,2,3),
'zzz': [
datetime.datetime(2017,1,1),
datetime.date(2017,5,1),
]
}
x = ge.dataset.util.recursively_convert_to_json_serializable(x)
self.assertEqual(type(x['x']), list)
try:
x = unicode("abcdefg")
x = ge.dataset.util.recursively_convert_to_json_serializable(x)
self.assertEqual(type(x), unicode)
except NameError:
pass
if __name__ == "__main__":
unittest.main() | 5,827 | 20 | 447 |
d34b7bb0460a5a77dc69b2aa8301f3c06d1946f8 | 498 | py | Python | misc2_host_device_data_transfer.py | zfang92/pedagogical_cuda_code | 1bc78d575c8d93c8906f361a1d980086fe11a9d1 | [
"MIT"
] | null | null | null | misc2_host_device_data_transfer.py | zfang92/pedagogical_cuda_code | 1bc78d575c8d93c8906f361a1d980086fe11a9d1 | [
"MIT"
] | null | null | null | misc2_host_device_data_transfer.py | zfang92/pedagogical_cuda_code | 1bc78d575c8d93c8906f361a1d980086fe11a9d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Zheng Fang
"""
from numba import cuda
import numpy as np
n = int(2e4) # this is not to exceed 10^7
# supply data
data = np.random.normal(size=n, loc=0, scale=1).astype('float64')
# define convenience function
#======================================================================
for _ in range(5):
timer()
"""
%timeit -r 50 -n 10 timer()
"""
| 16.6 | 72 | 0.502008 | # -*- coding: utf-8 -*-
"""
@author: Zheng Fang
"""
from numba import cuda
import numpy as np
n = int(2e4) # this is not to exceed 10^7
# supply data
data = np.random.normal(size=n, loc=0, scale=1).astype('float64')
# define convenience function
def timer():
d_data = cuda.to_device(data)
d_data.copy_to_host()
#======================================================================
for _ in range(5):
timer()
"""
%timeit -r 50 -n 10 timer()
"""
| 53 | 0 | 23 |
77f31cd5ae12bdbd070064c80b7616ce1824e4c6 | 15,156 | py | Python | py/garage/garage/collections.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/garage/garage/collections.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/garage/garage/collections.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | """Collections of objects and collection helper functions."""
__all__ = [
'BiDict',
'DictBuilder',
'DictView',
'LoadingDict',
'LruCache',
'NamedTuple',
'SingletonMeta',
'Symbols',
'Trie',
'collect',
'collect_pairs',
'group',
'is_ordered',
'unique',
]
import operator
from collections import (
Mapping,
MutableMapping,
OrderedDict,
UserDict,
)
from garage.assertions import ASSERT
def is_ordered(lst, key=None, strict=False):
"""True if input list is (strictly) ordered."""
if key is None:
key = lambda item: item
cmp = operator.lt if strict else operator.le
return all(cmp(key(x0), key(x1)) for x0, x1 in zip(lst, lst[1:]))
def unique(iterable, key=None):
"""Return unique elements of an iterable."""
if key:
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), element)
return list(odict.values())
else:
return list(OrderedDict.fromkeys(iterable))
def collect(iterable, key=None, value=None):
"""Collect elements by key, preserving order."""
if key is None:
key = lambda element: element
if value is None:
value = lambda element: element
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), []).append(value(element))
return odict
def collect_pairs(iterable):
"""Collect pairs, preserving order."""
return collect(
iterable, key=lambda pair: pair[0], value=lambda pair: pair[1])
def group(iterable, key=None):
"""Group elements by key, preserving order."""
return list(collect(iterable, key=key).values())
class DictView(Mapping):
"""Read-only view of a dict-like object."""
class BiDict(MutableMapping):
"""Bidirectional dict."""
class DictBuilder:
"""A fluent-style builder of dict object."""
# It does not support nested if-block at the moment
# Setter methods
class NamedTupleMeta(type):
"""This is similar to typing.NamedTupleMeta but supports base
classes (so that you may use mixin pattern).
Note that, to adhere to Liskov Substitution Principle, you cannot
inherit from multiple subclass of NamedTuple.
"""
@staticmethod
def make_new(class_name, field_names):
"""Make a __new__ method for the new class."""
if not field_names:
args = ''
elif len(field_names) == 1:
# `(x)` is the same as `x` and you need the extra comma.
args = '{},'.format(field_names[0])
else:
args = ', '.join(field_names)
code = (
'def __new__(cls, {args}):\n'
' """Create new instance of {class_name}({args})."""\n'
' return tuple.__new__(cls, ({args}))\n'
.format(class_name=class_name, args=args)
)
variables = {'__name__': class_name}
exec(code, variables)
return variables['__new__']
@staticmethod
def make_repr(class_name, field_names):
"""Make a __repr__ method for the new class."""
field_formats = ('%s=%%r' % name for name in field_names)
repr_format = '%s(%s)' % (class_name, ', '.join(field_formats))
def __repr__(self):
"""Return a nicely formatted representation string"""
return repr_format % self
return __repr__
class SingletonMeta(type):
"""Metaclass to create singleton types."""
class Symbols:
"""Read-only namespace."""
| 29.659491 | 78 | 0.571391 | """Collections of objects and collection helper functions."""
__all__ = [
'BiDict',
'DictBuilder',
'DictView',
'LoadingDict',
'LruCache',
'NamedTuple',
'SingletonMeta',
'Symbols',
'Trie',
'collect',
'collect_pairs',
'group',
'is_ordered',
'unique',
]
import operator
from collections import (
Mapping,
MutableMapping,
OrderedDict,
UserDict,
)
from garage.assertions import ASSERT
def is_ordered(lst, key=None, strict=False):
"""True if input list is (strictly) ordered."""
if key is None:
key = lambda item: item
cmp = operator.lt if strict else operator.le
return all(cmp(key(x0), key(x1)) for x0, x1 in zip(lst, lst[1:]))
def unique(iterable, key=None):
"""Return unique elements of an iterable."""
if key:
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), element)
return list(odict.values())
else:
return list(OrderedDict.fromkeys(iterable))
def collect(iterable, key=None, value=None):
"""Collect elements by key, preserving order."""
if key is None:
key = lambda element: element
if value is None:
value = lambda element: element
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), []).append(value(element))
return odict
def collect_pairs(iterable):
"""Collect pairs, preserving order."""
return collect(
iterable, key=lambda pair: pair[0], value=lambda pair: pair[1])
def group(iterable, key=None):
"""Group elements by key, preserving order."""
return list(collect(iterable, key=key).values())
class DictView(Mapping):
"""Read-only view of a dict-like object."""
def __init__(self, data):
self._data = data
def __repr__(self):
return repr(self._data)
def __bool__(self):
return bool(self._data)
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
class BiDict(MutableMapping):
"""Bidirectional dict."""
def __init__(self):
self._data = {}
self._inverse = {}
self.inverse = DictView(self._inverse)
def __repr__(self):
return repr(self._data)
def __bool__(self):
return bool(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
if key in self._data:
self._inverse.pop(self._data[key])
self._data[key] = value
self._inverse[value] = key
def __delitem__(self, key):
self._inverse.pop(self._data.pop(key))
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
class DictBuilder:
"""A fluent-style builder of dict object."""
# It does not support nested if-block at the moment
def __init__(self, data=None):
# Don't make a copy because we want to modify it in place
self.dict = data if data is not None else {}
# Use finite state machine to parse non-nested if-elif-else
self._state = None
# True if we have chosen one of the if-elif-else branch
self._branch_chosen = False
# True if we should execute this instruction
self._predicate = True
def if_(self, condition):
ASSERT.none(self._state)
self._state = 'if'
self._branch_chosen = self._predicate = condition
return self
def elif_(self, condition):
ASSERT.equal(self._state, 'if')
if self._branch_chosen:
self._predicate = False
else:
self._branch_chosen = self._predicate = condition
return self
def else_(self):
ASSERT.equal(self._state, 'if')
self._state = 'else'
if self._branch_chosen:
self._predicate = False
else:
self._branch_chosen = self._predicate = True
return self
def end(self):
ASSERT.in_(self._state, ('if', 'else'))
self._state = None
self._branch_chosen = False
self._predicate = True
return self
# Setter methods
def assert_(self, assertion):
if self._predicate:
if not assertion(self.dict):
raise AssertionError
return self
def setitem(self, key, value):
if self._predicate:
self.dict[key] = value
return self
def setdefault(self, key, default):
if self._predicate:
self.dict.setdefault(key, default)
return self
def call(self, key, func):
if self._predicate:
func(self.dict[key])
return self
def call_and_update(self, key, func):
if self._predicate:
self.dict[key] = func(self.dict[key])
return self
class LoadingDict(UserDict):
def __init__(self, load, data=None):
super().__init__(**(data or {}))
self.load = load
def __missing__(self, key):
value = self.load(key)
self[key] = value
return value
class LruCache:
def __init__(self, capacity):
self.capacity = capacity
self._cache = OrderedDict()
def __contains__(self, key):
return key in self._cache
def __getitem__(self, key):
value = self._cache[key]
self._cache.move_to_end(key)
return value
def __setitem__(self, key, value):
self._cache[key] = value
self._cache.move_to_end(key)
while len(self._cache) > self.capacity:
self._cache.popitem(last=False)
class NamedTupleMeta(type):
"""This is similar to typing.NamedTupleMeta but supports base
classes (so that you may use mixin pattern).
Note that, to adhere to Liskov Substitution Principle, you cannot
inherit from multiple subclass of NamedTuple.
"""
def __new__(mcs, class_name, bases, namespace):
field_types = OrderedDict()
base_class = None
for base in bases:
if hasattr(base, '_field_types'):
if base_class:
raise TypeError(
'%s inherits from multiple NamedTuple bases' %
class_name
)
base_class = base
field_types.update(base._field_types)
for name, type_ in namespace.get('__annotations__', {}).items():
if name.startswith('_'):
raise ValueError(
'field name starts with underscore: %s' % name)
if name in field_types:
raise ValueError('duplicated field name: %s' % name)
field_types[name] = type_
field_names = tuple(field_types)
defaults = []
defaults_dict = {}
for name in field_names:
if name in namespace:
value = namespace[name]
defaults.append(value)
defaults_dict[name] = value
elif name in base_class._field_defaults:
value = base_class._field_defaults[name]
defaults.append(value)
defaults_dict[name] = value
elif defaults:
raise TypeError(
'non-default field %s appears after default field(s) %s' %
(name, list(defaults_dict.keys()))
)
def set_name(name, value):
"""Set name in namespace and check for overwrites."""
if name in namespace:
import warnings
warnings.warn(
'%s.%s is overwritten' % (class_name, name), stacklevel=3)
namespace[name] = value
set_name('__slots__', ())
set_name('_fields', field_names)
set_name('_field_defaults', defaults_dict)
set_name('_field_types', field_types)
set_name('__new__', mcs.make_new(class_name, field_names))
namespace['__new__'].__defaults__ = tuple(defaults)
# Provide a default __repr__
if '__repr__' not in namespace:
namespace['__repr__'] = mcs.make_repr(class_name, field_names)
# Replace annotation with property
for index, name in enumerate(field_names):
namespace[name] = property(
operator.itemgetter(index),
doc='Alias for field number %d' % index,
)
return super().__new__(mcs, class_name, bases, namespace)
@staticmethod
def make_new(class_name, field_names):
"""Make a __new__ method for the new class."""
if not field_names:
args = ''
elif len(field_names) == 1:
# `(x)` is the same as `x` and you need the extra comma.
args = '{},'.format(field_names[0])
else:
args = ', '.join(field_names)
code = (
'def __new__(cls, {args}):\n'
' """Create new instance of {class_name}({args})."""\n'
' return tuple.__new__(cls, ({args}))\n'
.format(class_name=class_name, args=args)
)
variables = {'__name__': class_name}
exec(code, variables)
return variables['__new__']
@staticmethod
def make_repr(class_name, field_names):
"""Make a __repr__ method for the new class."""
field_formats = ('%s=%%r' % name for name in field_names)
repr_format = '%s(%s)' % (class_name, ', '.join(field_formats))
def __repr__(self):
"""Return a nicely formatted representation string"""
return repr_format % self
return __repr__
class NamedTuple(tuple, metaclass=NamedTupleMeta):
# NOTE: super()'s magic relies on the implicit __class__ variable,
# and thus, if you want to call super(), you must make sure that
# that method is defined in the right class.
@classmethod
def _make(cls, iterable):
"""Make a new object from a sequence or iterable."""
obj = super().__new__(cls, iterable)
if len(obj) != len(cls._fields):
raise TypeError(
'expect %d arguments but get %d' %
(len(cls._fields), len(obj))
)
return obj
def _replace(self, **kwargs):
"""Return a new object replacing specified fields with new values."""
obj = self._make(map(kwargs.pop, self._fields, self))
if kwargs:
raise ValueError('get unexpected field names: %s' % list(kwargs))
return obj
def _asdict(self):
"""Return a new OrderedDict which maps field names to their values."""
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
"""Return self as a plain tuple (used by copy and pickle)."""
return tuple(self)
class SingletonMeta(type):
"""Metaclass to create singleton types."""
def __call__(cls, *args, **kwargs):
# Should I add a lock to make this thread-safe?
try:
instance = cls.__instance
except AttributeError:
instance = cls.__instance = super().__call__(*args, **kwargs)
return instance
class Symbols:
"""Read-only namespace."""
def __init__(self, *nv_pairs, **symbols):
for nv_pair in nv_pairs:
if isinstance(nv_pair, str):
name = value = nv_pair
else:
name, value = nv_pair
if name in symbols:
raise ValueError('overwrite name %r' % name)
if name.startswith('_'):
raise ValueError('symbol name %r starts with \'_\'' % name)
symbols[name] = value
# Return keys in deterministic order (i.e., sorted).
symbols = OrderedDict((key, symbols[key]) for key in sorted(symbols))
super().__setattr__('_Symbols__symbols', symbols)
def __iter__(self):
return iter(self.__symbols)
def _asdict(self):
return self.__symbols.copy()
def __getitem__(self, name):
return self.__symbols[name]
def __getattr__(self, name):
try:
return self.__symbols[name]
except KeyError:
msg = ('%r object has no attribute %r' %
(self.__class__.__name__, name))
raise AttributeError(msg) from None
def __setattr__(self, name, value):
raise TypeError('%r object does not support attribute assignment' %
self.__class__.__name__)
class Trie:
EMPTY = object()
class Node:
def __init__(self, parent, value):
self.parent = parent
self.children = {}
self.value = value
def get(self, key, exact, default):
node = self._get_node(key, exact)
if node is None or (exact and node.value is Trie.EMPTY):
return default
while node and node.value is Trie.EMPTY:
node = node.parent
return node.value if node else default
def _get_node(self, key, exact):
node = self
for element in key:
child = node.children.get(element)
if child is None:
return None if exact else node
node = child
return node
def get_values(self, key):
node = self
for i, element in enumerate(key):
if node.value is not Trie.EMPTY:
yield key[:i], node.value
child = node.children.get(element)
if child is None:
break
node = child
else:
if node.value is not Trie.EMPTY:
yield key, node.value
def values(self):
if self.value is not Trie.EMPTY:
yield self.value
children = sorted(self.children.items(), key=lambda kv: kv[0])
for _, child in children:
yield from child.values()
def upsert(self, key, value):
node = self
for i, element in enumerate(key):
child = node.children.get(element)
if child is None:
for new_element in key[i:]:
new_child = Trie.Node(node, Trie.EMPTY)
node.children[new_element] = new_child
node = new_child
break
node = child
node.value = value
def __init__(self):
self._root = Trie.Node(None, Trie.EMPTY)
def get(self, key, default=None, *, exact=True):
return self._root.get(key, exact, default)
def get_values(self, key):
return self._root.get_values(key)
def __getitem__(self, key):
value = self.get(key, Trie.EMPTY)
if value is Trie.EMPTY:
raise KeyError(key)
return value
def values(self):
return self._root.values()
def __setitem__(self, key, value):
self._root.upsert(key, value)
| 8,975 | 1,524 | 1,118 |
d40ceb086f0dfeadaad0a4a4e7992e8865fb8f3a | 3,831 | py | Python | mpsci/distributions/invchi2.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 7 | 2019-03-27T17:25:41.000Z | 2022-03-31T03:55:29.000Z | mpsci/distributions/invchi2.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 2 | 2019-05-09T16:09:45.000Z | 2021-01-04T03:55:09.000Z | mpsci/distributions/invchi2.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | null | null | null | """
Inverse chi-square distribution
-------------------------------
The probability density function for the inverse chi-square
distribution is
f(x, nu) = 2**(-nu/2) / Gamma(nu/2) * x**(-nu/2 - 1) * exp(-1/(2*x))
See the Wikipedia article `"Inverse-chi-squared distribution"
<https://en.wikipedia.org/wiki/Inverse-chi-squared_distribution>`_
for more information. The functions here implement the first
definition given in the wikipedia article. That is, if X has the
chi-square distribution with nu degrees of freedom, then 1/X has the
inverse chi-square distribution with nu degrees of freedom.
"""
import re
import mpmath
# module docstring substitution
_math_expression = r"""
.. math::
f(x, \\nu) = \\frac{2^{-\\nu/2}}{\\Gamma(\\nu/2)}
x^{-\\nu/2 - 1} e^{-1/(2x)}
"""
_docstring_re_subs = [
(r' f\(x,.*$', _math_expression, 0, re.MULTILINE),
(' nu ', r' :math:`\\nu` ', 0, 0),
]
__all__ = ['pdf', 'logpdf', 'cdf', 'sf', 'mean', 'mode', 'variance']
def pdf(x, nu):
"""
PDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
hnu = nu/2
p = (mpmath.power(2, -hnu) * x**(-hnu - 1) * mpmath.exp(-1/(2*x))
/ mpmath.gamma(hnu))
return p
def logpdf(x, nu):
"""
Logarithm of the PDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.ninf
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
hnu = nu/2
logp = (-hnu*mpmath.log(2) + (-hnu - 1)*mpmath.log(x) - 1/(2*x)
- mpmath.loggamma(hnu))
return logp
def cdf(x, nu):
"""
CDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
c = mpmath.gammainc(nu/2, a=1/(2*x), b=mpmath.inf, regularized=True)
return c
def sf(x, nu):
"""
Survival function for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.one
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
s = mpmath.gammainc(nu/2, a=0, b=1/(2*x), regularized=True)
return s
def mean(nu):
"""
Mean of the inverse chi-square distribution.
For nu > 2, the mean is 1/(nu - 2).
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return mpmath.mp.one / (nu - 2) if nu > 2 else mpmath.nan
mean._docstring_re_subs = [
(r' *1.*2\)$',
'\n'.join([r'.. math::',
r' \\frac{1}{\\nu - 2}',
r'']),
0, re.MULTILINE),
(r'1/\(nu - 2\)', r':math:`1/(\\nu - 2)`', 0, 0),
('nu > 2', r':math:`\\nu > 2`', 0, 0),
]
def mode(nu):
"""
Mode of the inverse chi-square distribution.
The mode is max(k - 2, 0).
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return 1 / (nu + 2)
def variance(nu):
"""
Variance of the inverse chi-square distribution.
For nu > 4, the variance is
2 / ((nu - 2)**2 (nu - 4))
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return 2/(nu - 2)**2 / (nu - 4) if nu > 4 else mpmath.nan
variance._docstring_re_subs = [
(r' *2.*4\)\)$',
'\n'.join([r'.. math::',
r' \\frac{2}{(\\nu - 2)^2 (\\nu - 4)}',
r'']),
0, re.MULTILINE),
('nu > 4', r':math:`\\nu > 4`', 0, 0),
]
| 23.648148 | 76 | 0.527539 | """
Inverse chi-square distribution
-------------------------------
The probability density function for the inverse chi-square
distribution is
f(x, nu) = 2**(-nu/2) / Gamma(nu/2) * x**(-nu/2 - 1) * exp(-1/(2*x))
See the Wikipedia article `"Inverse-chi-squared distribution"
<https://en.wikipedia.org/wiki/Inverse-chi-squared_distribution>`_
for more information. The functions here implement the first
definition given in the wikipedia article. That is, if X has the
chi-square distribution with nu degrees of freedom, then 1/X has the
inverse chi-square distribution with nu degrees of freedom.
"""
import re
import mpmath
# module docstring substitution
_math_expression = r"""
.. math::
f(x, \\nu) = \\frac{2^{-\\nu/2}}{\\Gamma(\\nu/2)}
x^{-\\nu/2 - 1} e^{-1/(2x)}
"""
_docstring_re_subs = [
(r' f\(x,.*$', _math_expression, 0, re.MULTILINE),
(' nu ', r' :math:`\\nu` ', 0, 0),
]
__all__ = ['pdf', 'logpdf', 'cdf', 'sf', 'mean', 'mode', 'variance']
def _validate_nu(nu):
if nu <= 0:
raise ValueError('nu must be positive')
def pdf(x, nu):
"""
PDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
hnu = nu/2
p = (mpmath.power(2, -hnu) * x**(-hnu - 1) * mpmath.exp(-1/(2*x))
/ mpmath.gamma(hnu))
return p
def logpdf(x, nu):
"""
Logarithm of the PDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.ninf
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
hnu = nu/2
logp = (-hnu*mpmath.log(2) + (-hnu - 1)*mpmath.log(x) - 1/(2*x)
- mpmath.loggamma(hnu))
return logp
def cdf(x, nu):
"""
CDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
c = mpmath.gammainc(nu/2, a=1/(2*x), b=mpmath.inf, regularized=True)
return c
def sf(x, nu):
"""
Survival function for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.one
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
s = mpmath.gammainc(nu/2, a=0, b=1/(2*x), regularized=True)
return s
def mean(nu):
"""
Mean of the inverse chi-square distribution.
For nu > 2, the mean is 1/(nu - 2).
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return mpmath.mp.one / (nu - 2) if nu > 2 else mpmath.nan
mean._docstring_re_subs = [
(r' *1.*2\)$',
'\n'.join([r'.. math::',
r' \\frac{1}{\\nu - 2}',
r'']),
0, re.MULTILINE),
(r'1/\(nu - 2\)', r':math:`1/(\\nu - 2)`', 0, 0),
('nu > 2', r':math:`\\nu > 2`', 0, 0),
]
def mode(nu):
"""
Mode of the inverse chi-square distribution.
The mode is max(k - 2, 0).
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return 1 / (nu + 2)
def variance(nu):
"""
Variance of the inverse chi-square distribution.
For nu > 4, the variance is
2 / ((nu - 2)**2 (nu - 4))
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return 2/(nu - 2)**2 / (nu - 4) if nu > 4 else mpmath.nan
variance._docstring_re_subs = [
(r' *2.*4\)\)$',
'\n'.join([r'.. math::',
r' \\frac{2}{(\\nu - 2)^2 (\\nu - 4)}',
r'']),
0, re.MULTILINE),
('nu > 4', r':math:`\\nu > 4`', 0, 0),
]
| 64 | 0 | 23 |
026118a250d1c4f591023f554e6cd2e4aca62694 | 3,562 | py | Python | traffic_test.py | yuecong/tools | 1d3f63e579c75c28d49d0f805e517bbb564e50ef | [
"MIT"
] | null | null | null | traffic_test.py | yuecong/tools | 1d3f63e579c75c28d49d0f805e517bbb564e50ef | [
"MIT"
] | null | null | null | traffic_test.py | yuecong/tools | 1d3f63e579c75c28d49d0f805e517bbb564e50ef | [
"MIT"
] | null | null | null | #!/usr/bin/python
import psutil
import subprocess
import simplejson
import time
import random
import multiprocessing as mp
procs_id = 0
procs = {}
procs_data = []
url_num = 0
# Define an output queue
output = mp.Queue()
MAX_THREAD_NUM = 500
#proxy_url='10.0.0.204:80'
proxy_url=''
urls = [
'http://drbd.linbit.com/home/what-is-drbd/',
'http://drbd.linbit.com/home/what-is-ha/',
'http://en.wikipedia.org/wiki/Main_Page',
'http://en.wikipedia.org/wiki/Walden%E2%80%93Wallkill_Rail_Trail',
'http://en.wikipedia.org/wiki/New_York_metropolitan_area',
'http://www.citrix.com/products.html',
'http://www.citrix.co.jp/products.html?posit=glnav',
'http://www.citrix.co.jp/products/gotowebinar/overview.html'
]
#Get http access time for particular url with/without proxy
# Runs command silently
#Main function
if __name__ == '__main__':
#warmup for ATS
print ("warmup start....")
for url in urls:
getInfoForCurl(url,proxy_url)
print ("test start....")
# Setup a list of processes that we want to run
print "add it into thead queue...."
processes = [mp.Process(target=accesswithOutput, args=(proxy_url,)) for x in range(MAX_THREAD_NUM)]
#processes = [mp.Process(target=accesswithOutput, args=('',)) for x in range(MAX_THREAD_NUM)]
# Run processes
print "thread start..."
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
print "thread exit!"
# Get process results from the output queue
results = [output.get() for p in processes]
time_sum=0
for result in results:
time_sum =time_sum + result[2]
print(time_sum)
# for url in urls:
# info= getInfoForCurl(url,proxy_url)
# print (info)
# info= getInfoForCurl(url)
# print (info)
| 27.612403 | 103 | 0.616227 | #!/usr/bin/python
import psutil
import subprocess
import simplejson
import time
import random
import multiprocessing as mp
procs_id = 0
procs = {}
procs_data = []
url_num = 0
# Define an output queue
output = mp.Queue()
MAX_THREAD_NUM = 500
#proxy_url='10.0.0.204:80'
proxy_url=''
urls = [
'http://drbd.linbit.com/home/what-is-drbd/',
'http://drbd.linbit.com/home/what-is-ha/',
'http://en.wikipedia.org/wiki/Main_Page',
'http://en.wikipedia.org/wiki/Walden%E2%80%93Wallkill_Rail_Trail',
'http://en.wikipedia.org/wiki/New_York_metropolitan_area',
'http://www.citrix.com/products.html',
'http://www.citrix.co.jp/products.html?posit=glnav',
'http://www.citrix.co.jp/products/gotowebinar/overview.html'
]
#Get http access time for particular url with/without proxy
def getInfoForCurl(url,proxy=''):
start_time = time.time()
if len(proxy) >0:
cmd = ['curl','--proxy',proxy,url]
print(cmd)
else:
cmd = ['curl',url]
runCommand(cmd, return_stdout = False, busy_wait = True)
end_time = time.time()
return [url,proxy,end_time - start_time]
def accesswithOutput(proxyUrl):
for x in range(5):
info = getInfoForCurl(random.choice(urls),proxyUrl)
output.put(info)
#url_num = url_num + 1
print (info)
# Runs command silently
def runCommand(cmd, use_shell = False, return_stdout = False, busy_wait = False, poll_duration = 0.5):
# Sanitize cmd to string
cmd = map(lambda x: '%s' % x, cmd)
if return_stdout:
proc = psutil.Popen(cmd, shell = use_shell, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
else:
proc = psutil.Popen(cmd, shell = use_shell,
stdout = open('/dev/null', 'w'),
stderr = open('/dev/null', 'w'))
global procs_id
global procs
global procs_data
proc_id = procs_id
procs[proc_id] = proc
procs_id += 1
data = { }
#print(proc_id)
while busy_wait:
returncode = proc.poll()
if returncode == None:
try:
data = proc.as_dict(attrs = ['get_io_counters', 'get_cpu_times'])
except Exception as e:
pass
time.sleep(poll_duration)
else:
break
(stdout, stderr) = proc.communicate()
returncode = proc.returncode
del procs[proc_id]
if returncode != 0:
raise Exception(stderr)
else:
if data:
procs_data.append(data)
return stdout
#Main function
if __name__ == '__main__':
#warmup for ATS
print ("warmup start....")
for url in urls:
getInfoForCurl(url,proxy_url)
print ("test start....")
# Setup a list of processes that we want to run
print "add it into thead queue...."
processes = [mp.Process(target=accesswithOutput, args=(proxy_url,)) for x in range(MAX_THREAD_NUM)]
#processes = [mp.Process(target=accesswithOutput, args=('',)) for x in range(MAX_THREAD_NUM)]
# Run processes
print "thread start..."
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
print "thread exit!"
# Get process results from the output queue
results = [output.get() for p in processes]
time_sum=0
for result in results:
time_sum =time_sum + result[2]
print(time_sum)
# for url in urls:
# info= getInfoForCurl(url,proxy_url)
# print (info)
# info= getInfoForCurl(url)
# print (info)
| 1,636 | 0 | 67 |
103b8401e6645a543a140fb6594e4acf25b6699c | 1,890 | py | Python | scrap_app_info.py | jahangir091/app-data-collection | 7ac87328c896225396d020b06ca3a4a8d9b9a8a5 | [
"MIT"
] | null | null | null | scrap_app_info.py | jahangir091/app-data-collection | 7ac87328c896225396d020b06ca3a4a8d9b9a8a5 | [
"MIT"
] | null | null | null | scrap_app_info.py | jahangir091/app-data-collection | 7ac87328c896225396d020b06ca3a4a8d9b9a8a5 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as soup
import os
output_filename = "output_files/slideshow/app_info/app_info.csv"
output_file = open(output_filename, "a")
headers = "title, subtitle, publisher, country, year, last_month_downloads, last_month_revenue\n"
output_file.write(headers)
for filename in os.listdir(os.getcwd() + "/input_files/slideshow/app_info"):
if filename.endswith(".html"):
with open(os.path.join(os.getcwd() + "/input_files/slideshow/app_info", filename), 'r') as f:
file_content = f.read()
page_soup = soup(file_content, 'html.parser')
title = clean_value(page_soup.find("span", {"class": "app-name-wrapper"}))
sub_title = clean_value(page_soup.find("h3", {"class": "subtitle-text"}))
last_month_downloads = clean_value(page_soup.find("span", {"class": "downloads"}))
last_month_revenue = clean_value(page_soup.find("span", {"class": "revenue"}))
about_items = page_soup.find("table", {"class": "about-app-table"}).find_all("tr")
for item in about_items:
if item.find("td", {"class": "name"}).text.strip() == "Support URL:":
publisher = clean_value(item.find("td", {"class": "value"}))
if item.find("td", {"class": "name"}).text.strip() == "Most Popular Country:":
country = clean_value(item.find("td", {"class": "value"}))
if item.find("td", {"class": "name"}).text.strip() == "Country Release Date:":
release_date = clean_value(item.find("td", {"class": "value"}))
output_file.write(title + ", " + sub_title + ", " + publisher + ", " + country + ", " + release_date + ", " + last_month_downloads + ", " + last_month_revenue + "\n")
output_file.close()
| 51.081081 | 174 | 0.609524 | from bs4 import BeautifulSoup as soup
import os
output_filename = "output_files/slideshow/app_info/app_info.csv"
output_file = open(output_filename, "a")
headers = "title, subtitle, publisher, country, year, last_month_downloads, last_month_revenue\n"
output_file.write(headers)
def clean_value(value):
if not value:
return ""
return value.text.strip()
for filename in os.listdir(os.getcwd() + "/input_files/slideshow/app_info"):
if filename.endswith(".html"):
with open(os.path.join(os.getcwd() + "/input_files/slideshow/app_info", filename), 'r') as f:
file_content = f.read()
page_soup = soup(file_content, 'html.parser')
title = clean_value(page_soup.find("span", {"class": "app-name-wrapper"}))
sub_title = clean_value(page_soup.find("h3", {"class": "subtitle-text"}))
last_month_downloads = clean_value(page_soup.find("span", {"class": "downloads"}))
last_month_revenue = clean_value(page_soup.find("span", {"class": "revenue"}))
about_items = page_soup.find("table", {"class": "about-app-table"}).find_all("tr")
for item in about_items:
if item.find("td", {"class": "name"}).text.strip() == "Support URL:":
publisher = clean_value(item.find("td", {"class": "value"}))
if item.find("td", {"class": "name"}).text.strip() == "Most Popular Country:":
country = clean_value(item.find("td", {"class": "value"}))
if item.find("td", {"class": "name"}).text.strip() == "Country Release Date:":
release_date = clean_value(item.find("td", {"class": "value"}))
output_file.write(title + ", " + sub_title + ", " + publisher + ", " + country + ", " + release_date + ", " + last_month_downloads + ", " + last_month_revenue + "\n")
output_file.close()
| 68 | 0 | 23 |
c01810a4d7924725e886f4b9697cf9e354731827 | 33 | py | Python | opm/linty/__init__.py | Open-Prose-Metrics/open_prose_metrics_app-core | 9df65edfe9ee9af0a0731c3f2e21ea25bced250c | [
"MIT"
] | null | null | null | opm/linty/__init__.py | Open-Prose-Metrics/open_prose_metrics_app-core | 9df65edfe9ee9af0a0731c3f2e21ea25bced250c | [
"MIT"
] | 4 | 2021-04-30T21:38:10.000Z | 2022-01-13T03:32:33.000Z | opm/linty/__init__.py | Open-Prose-Metrics/open_prose_metrics_app-core | 9df65edfe9ee9af0a0731c3f2e21ea25bced250c | [
"MIT"
] | 1 | 2021-03-21T14:08:28.000Z | 2021-03-21T14:08:28.000Z | from linty.linty import lint_text | 33 | 33 | 0.878788 | from linty.linty import lint_text | 0 | 0 | 0 |
8c1a23a66f3673146f1b7adcf6ca82eae41a4398 | 15,048 | py | Python | mmdb_writer.py | VimT/MaxMind-DB-Writer-python | edd4790c76bad6ae68b7dd621d1e0d64ea11fa07 | [
"MIT"
] | 6 | 2021-01-09T01:01:32.000Z | 2022-01-25T16:26:43.000Z | mmdb_writer.py | VimT/MaxMind-DB-Writer-python | edd4790c76bad6ae68b7dd621d1e0d64ea11fa07 | [
"MIT"
] | 1 | 2020-12-25T18:02:33.000Z | 2020-12-25T22:25:48.000Z | mmdb_writer.py | VimT/MaxMind-DB-Writer-python | edd4790c76bad6ae68b7dd621d1e0d64ea11fa07 | [
"MIT"
] | 4 | 2020-11-22T16:07:24.000Z | 2022-02-05T20:31:35.000Z | # coding: utf-8
__version__ = '0.1.0'
import logging
import math
import struct
import time
from typing import Union
from netaddr import IPSet
MMDBType = Union[dict, list, str, bytes, int, bool]
logger = logging.getLogger(__name__)
METADATA_MAGIC = b'\xab\xcd\xefMaxMind.com'
| 33.072527 | 110 | 0.54592 | # coding: utf-8
__version__ = '0.1.0'
import logging
import math
import struct
import time
from typing import Union
from netaddr import IPSet
MMDBType = Union[dict, list, str, bytes, int, bool]
logger = logging.getLogger(__name__)
METADATA_MAGIC = b'\xab\xcd\xefMaxMind.com'
class SearchTreeNode(object):
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def get_or_create(self, item):
if item == 0:
self.left = self.left or SearchTreeNode()
return self.left
elif item == 1:
self.right = self.right or SearchTreeNode()
return self.right
def __getitem__(self, item):
if item == 0:
return self.left
elif item == 1:
return self.right
def __setitem__(self, key, value):
if key == 0:
self.left = value
elif key == 1:
self.right = value
class SearchTreeLeaf(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "SearchTreeLeaf(value={value})".format(value=self.value)
__str__ = __repr__
class Encoder(object):
def __init__(self, cache=True):
self.data_cache = {}
self.data_list = []
self.data_pointer = 0
self.cache = cache
def _encode_pointer(self, value):
pointer = value
if pointer >= 134744064:
res = struct.pack('>BI', 0x38, pointer)
elif pointer >= 526336:
pointer -= 526336
res = struct.pack('>BBBB', 0x30 + ((pointer >> 24) & 0x07),
(pointer >> 16) & 0xff, (pointer >> 8) & 0xff,
pointer & 0xff)
elif pointer >= 2048:
pointer -= 2048
res = struct.pack('>BBB', 0x28 + ((pointer >> 16) & 0x07),
(pointer >> 8) & 0xff, pointer & 0xff)
else:
res = struct.pack('>BB', 0x20 + ((pointer >> 8) & 0x07),
pointer & 0xff)
return res
def _encode_utf8_string(self, value):
encoded_value = value.encode('utf-8')
res = self._make_header(2, len(encoded_value))
res += encoded_value
return res
def _encode_bytes(self, value):
return self._make_header(4, len(value)) + value
def _encode_uint(self, type_id, max_len):
def _encode_unsigned_value(value):
res = b''
while value != 0 and len(res) < max_len:
res = struct.pack('>B', value & 0xff) + res
value = value >> 8
return self._make_header(type_id, len(res)) + res
return _encode_unsigned_value
def _encode_map(self, value):
res = self._make_header(7, len(value))
for k, v in list(value.items()):
# Keys are always stored by value.
res += self.encode(k)
res += self.encode(v)
return res
def _encode_array(self, value):
res = self._make_header(11, len(value))
for k in value:
res += self.encode(k)
return res
def _encode_boolean(self, value):
return self._make_header(14, 1 if value else 0)
def _encode_pack_type(self, type_id, fmt):
def pack_type(value):
res = struct.pack(fmt, value)
return self._make_header(type_id, len(res)) + res
return pack_type
_type_decoder = None
@property
def type_decoder(self):
if self._type_decoder is None:
self._type_decoder = {
1: self._encode_pointer,
2: self._encode_utf8_string,
3: self._encode_pack_type(3, '>d'), # double,
4: self._encode_bytes,
5: self._encode_uint(5, 2), # uint16
6: self._encode_uint(6, 4), # uint32
7: self._encode_map,
8: self._encode_pack_type(8, '>i'), # int32
9: self._encode_uint(9, 8), # uint64
10: self._encode_uint(10, 16), # uint128
11: self._encode_array,
14: self._encode_boolean,
15: self._encode_pack_type(15, '>f'), # float,
}
return self._type_decoder
def _make_header(self, type_id, length):
if length >= 16843036:
raise Exception('length >= 16843036')
elif length >= 65821:
five_bits = 31
length -= 65821
b3 = length & 0xff
b2 = (length >> 8) & 0xff
b1 = (length >> 16) & 0xff
additional_length_bytes = struct.pack('>BBB', b1, b2, b3)
elif length >= 285:
five_bits = 30
length -= 285
b2 = length & 0xff
b1 = (length >> 8) & 0xff
additional_length_bytes = struct.pack('>BB', b1, b2)
elif length >= 29:
five_bits = 29
length -= 29
additional_length_bytes = struct.pack('>B', length & 0xff)
else:
five_bits = length
additional_length_bytes = b''
if type_id <= 7:
res = struct.pack('>B', (type_id << 5) + five_bits)
else:
res = struct.pack('>BB', five_bits, type_id - 7)
return res + additional_length_bytes
_python_type_id = {
float: 15,
bool: 14,
list: 11,
dict: 7,
bytes: 4,
str: 2
}
def python_type_id(self, value):
value_type = type(value)
type_id = self._python_type_id.get(value_type)
if type_id:
return type_id
if value_type is int:
if value > 0xffffffffffffffff:
return 10
elif value > 0xffffffff:
return 9
elif value > 0xffff:
return 6
elif value < 0:
return 8
else:
return 5
raise TypeError("unknown type {value_type}".format(value_type=value_type))
def encode_meta(self, meta):
res = self._make_header(7, len(meta))
meta_type = {'node_count': 6, 'record_size': 5, 'ip_version': 5,
'binary_format_major_version': 5, 'binary_format_minor_version': 5,
'build_epoch': 9}
for k, v in list(meta.items()):
# Keys are always stored by value.
res += self.encode(k)
res += self.encode(v, meta_type.get(k))
return res
def encode(self, value, type_id=None):
if self.cache:
try:
return self.data_cache[id(value)]
except KeyError:
pass
if not type_id:
type_id = self.python_type_id(value)
try:
encoder = self.type_decoder[type_id]
except KeyError:
raise ValueError("unknown type_id={type_id}".format(type_id=type_id))
res = encoder(value)
if self.cache:
# add to cache
if type_id == 1:
self.data_list.append(res)
self.data_pointer += len(res)
return res
else:
self.data_list.append(res)
pointer_position = self.data_pointer
self.data_pointer += len(res)
pointer = self.encode(pointer_position, 1)
self.data_cache[id(value)] = pointer
return pointer
return res
class TreeWriter(object):
encoder_cls = Encoder
def __init__(self, tree, meta):
self._node_idx = {}
self._leaf_offset = {}
self._node_list = []
self._node_counter = 0
self._record_size = 0
self.tree = tree
self.meta = meta
self.encoder = self.encoder_cls(cache=True)
@property
def _data_list(self):
return self.encoder.data_list
@property
def _data_pointer(self):
return self.encoder.data_pointer + 16
def _build_meta(self):
return {
"node_count": self._node_counter,
"record_size": self.record_size,
**self.meta
}
def _adjust_record_size(self):
# Tree records should be large enough to contain either tree node index
# or data offset.
max_id = self._node_counter + self._data_pointer + 1
# Estimate required bit count.
bit_count = int(math.ceil(math.log(max_id, 2)))
if bit_count <= 24:
self.record_size = 24
elif bit_count <= 28:
self.record_size = 28
elif bit_count <= 32:
self.record_size = 32
else:
raise Exception('record_size > 32')
self.data_offset = self.record_size * 2 / 8 * self._node_counter
def _enumerate_nodes(self, node):
if type(node) is SearchTreeNode:
node_id = id(node)
if node_id not in self._node_idx:
self._node_idx[node_id] = self._node_counter
self._node_counter += 1
self._node_list.append(node)
self._enumerate_nodes(node.left)
self._enumerate_nodes(node.right)
elif type(node) is SearchTreeLeaf:
node_id = id(node)
if node_id not in self._leaf_offset:
res = self.encoder.encode(node.value)
self._leaf_offset[node_id] = self._data_pointer - len(res)
else: # == None
return
def _calc_record_idx(self, node):
if node is None:
return self._node_counter
elif type(node) is SearchTreeNode:
return self._node_idx[id(node)]
elif type(node) is SearchTreeLeaf:
return self._leaf_offset[id(node)] + self._node_counter
else:
raise Exception("unexpected type")
def _cal_node_bytes(self, node) -> bytes:
left_idx = self._calc_record_idx(node.left)
right_idx = self._calc_record_idx(node.right)
if self.record_size == 24:
b1 = (left_idx >> 16) & 0xff
b2 = (left_idx >> 8) & 0xff
b3 = left_idx & 0xff
b4 = (right_idx >> 16) & 0xff
b5 = (right_idx >> 8) & 0xff
b6 = right_idx & 0xff
return struct.pack('>BBBBBB', b1, b2, b3, b4, b5, b6)
elif self.record_size == 28:
b1 = (left_idx >> 16) & 0xff
b2 = (left_idx >> 8) & 0xff
b3 = left_idx & 0xff
b4 = ((left_idx >> 24) & 0xf) * 16 + \
((right_idx >> 24) & 0xf)
b5 = (right_idx >> 16) & 0xff
b6 = (right_idx >> 8) & 0xff
b7 = right_idx & 0xff
return struct.pack('>BBBBBBB', b1, b2, b3, b4, b5, b6, b7)
elif self.record_size == 32:
return struct.pack('>II', left_idx, right_idx)
else:
raise Exception('self.record_size > 32')
def write(self, fname):
self._enumerate_nodes(self.tree)
self._adjust_record_size()
with open(fname, 'wb') as f:
for node in self._node_list:
f.write(self._cal_node_bytes(node))
f.write(b'\x00' * 16)
for element in self._data_list:
f.write(element)
f.write(METADATA_MAGIC)
f.write(self.encoder_cls(cache=False).encode_meta(self._build_meta()))
def bits_rstrip(n, length=None, keep=0):
return map(int, bin(n)[2:].rjust(length, '0')[:keep])
class MMDBWriter(object):
def __init__(self, ip_version=4, database_type='GeoIP',
languages=None, description='GeoIP db',
ipv4_compatible=False):
self.tree = SearchTreeNode()
self.ipv4_compatible = ipv4_compatible
if languages is None:
languages = []
self.description = description
self.database_type = database_type
self.ip_version = ip_version
self.languages = languages
self.binary_format_major_version = 2
self.binary_format_minor_version = 0
self._bit_length = 128 if ip_version == 6 else 32
if ip_version not in [4, 6]:
raise ValueError("ip_version should be 4 or 6, {} is incorrect".format(ip_version))
if ip_version == 4 and ipv4_compatible:
raise ValueError("ipv4_compatible=True can set when ip_version=6")
if not self.binary_format_major_version:
raise ValueError("major_version can't be empty or 0: {}".format(self.binary_format_major_version))
if isinstance(description, str):
self.description = {i: description for i in languages}
for i in languages:
if i not in self.description:
raise ValueError("language {} must have description!")
def insert_network(self, network: IPSet, content: MMDBType):
leaf = SearchTreeLeaf(content)
if not isinstance(network, IPSet):
raise ValueError("network type should be netaddr.IPSet.")
network = network.iter_cidrs()
for cidr in network:
if self.ip_version == 4 and cidr.version == 6:
raise ValueError('You inserted a IPv6 address {} '
'to an IPv4-only database.'.format(cidr))
if self.ip_version == 6 and cidr.version == 4:
if not self.ipv4_compatible:
raise ValueError("You inserted a IPv4 address {} to an IPv6 database."
"Please use ipv4_compatible=True option store "
"IPv4 address in IPv6 database as ::/96 format".format(cidr))
cidr = cidr.ipv6(True)
node = self.tree
bits = list(bits_rstrip(cidr.value, self._bit_length, cidr.prefixlen))
try:
for i in bits[:-1]:
node = node.get_or_create(i)
if node[bits[-1]] is not None:
logger.warning("address %s info is not empty: %s, will override with %s",
cidr, node[bits[-1]], leaf)
except (AttributeError, TypeError) as e:
bits_str = ''.join(map(str, bits))
logger.warning("{cidr}({bits_str})[{content}] is subnet of {node}, pass!"
.format(cidr=cidr, bits_str=bits_str, content=content, node=node))
continue
node[bits[-1]] = leaf
def to_db_file(self, filename: str):
return TreeWriter(self.tree, self._build_meta()).write(filename)
def _build_meta(self):
return {
"ip_version": self.ip_version,
"database_type": self.database_type,
"languages": self.languages,
"binary_format_major_version": self.binary_format_major_version,
"binary_format_minor_version": self.binary_format_minor_version,
"build_epoch": int(time.time()),
"description": self.description,
}
| 13,456 | 953 | 353 |
136757bc6d8a0fcd893778d2fdb833985e51bbd6 | 62,849 | py | Python | src/command_modules/azure-cli-network/azure/cli/command_modules/network/_validators.py | ehotinger/azure-cli | 4652cddd711bf96f54f9d3a870d3e48e0184db31 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-network/azure/cli/command_modules/network/_validators.py | ehotinger/azure-cli | 4652cddd711bf96f54f9d3a870d3e48e0184db31 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-network/azure/cli/command_modules/network/_validators.py | ehotinger/azure-cli | 4652cddd711bf96f54f9d3a870d3e48e0184db31 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
import argparse
import base64
import socket
import os
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.commands.validators import \
(validate_tags, get_default_location_from_resource_group)
from azure.cli.core.commands.template_create import get_folded_parameter_validator
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.commands.validators import validate_parameter_set
from azure.cli.core.profiles import ResourceType
logger = get_logger(__name__)
# pylint: disable=inconsistent-return-statements
def validate_ip_tags(cmd, namespace):
''' Extracts multiple space-separated tags in TYPE=VALUE format '''
IpTag = cmd.get_models('IpTag')
if namespace.ip_tags and IpTag:
ip_tags = []
for item in namespace.ip_tags:
tag_type, tag_value = item.split('=', 1)
ip_tags.append(IpTag(ip_tag_type=tag_type, tag=tag_value))
namespace.ip_tags = ip_tags
def get_public_ip_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
""" Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. """
from msrestazure.tools import is_valid_resource_id, resource_id
return complex_validator_with_type if has_type_field else simple_validator
# COMMAND NAMESPACE VALIDATORS
# pylint: disable=too-few-public-methods
| 41.293693 | 154 | 0.68803 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
import argparse
import base64
import socket
import os
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.commands.validators import \
(validate_tags, get_default_location_from_resource_group)
from azure.cli.core.commands.template_create import get_folded_parameter_validator
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.commands.validators import validate_parameter_set
from azure.cli.core.profiles import ResourceType
logger = get_logger(__name__)
def get_asg_validator(loader, dest):
from msrestazure.tools import is_valid_resource_id, resource_id
ApplicationSecurityGroup = loader.get_models('ApplicationSecurityGroup')
def _validate_asg_name_or_id(cmd, namespace):
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
ids = []
if names_or_ids == [""] or not names_or_ids:
return
for val in names_or_ids:
if not is_valid_resource_id(val):
val = resource_id(
subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.Network', type='applicationSecurityGroups',
name=val
)
ids.append(ApplicationSecurityGroup(id=val))
setattr(namespace, dest, ids)
return _validate_asg_name_or_id
def get_vnet_validator(dest):
from msrestazure.tools import is_valid_resource_id, resource_id
def _validate_vnet_name_or_id(cmd, namespace):
SubResource = cmd.get_models('SubResource')
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
ids = []
if names_or_ids == [''] or not names_or_ids:
return
for val in names_or_ids:
if not is_valid_resource_id(val):
val = resource_id(
subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.Network', type='virtualNetworks',
name=val
)
ids.append(SubResource(id=val))
setattr(namespace, dest, ids)
return _validate_vnet_name_or_id
def validate_ddos_name_or_id(cmd, namespace):
if namespace.ddos_protection_plan:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(namespace.ddos_protection_plan):
namespace.ddos_protection_plan = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network', type='ddosProtectionPlans',
name=namespace.ddos_protection_plan
)
# pylint: disable=inconsistent-return-statements
def dns_zone_name_type(value):
if value:
return value[:-1] if value[-1] == '.' else value
def _generate_ag_subproperty_id(cli_ctx, namespace, child_type, child_name, subscription=None):
from msrestazure.tools import resource_id
return resource_id(
subscription=subscription or get_subscription_id(cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=namespace.application_gateway_name,
child_type_1=child_type,
child_name_1=child_name)
def _generate_lb_subproperty_id(cli_ctx, namespace, child_type, child_name, subscription=None):
from msrestazure.tools import resource_id
return resource_id(
subscription=subscription or get_subscription_id(cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='loadBalancers',
name=namespace.load_balancer_name,
child_type_1=child_type,
child_name_1=child_name)
def _generate_lb_id_list_from_names_or_ids(cli_ctx, namespace, prop, child_type):
from msrestazure.tools import is_valid_resource_id
raw = getattr(namespace, prop)
if not raw:
return
raw = raw if isinstance(raw, list) else [raw]
result = []
for item in raw:
if is_valid_resource_id(item):
result.append({'id': item})
else:
if not namespace.load_balancer_name:
raise CLIError('Unable to process {}. Please supply a well-formed ID or '
'--lb-name.'.format(item))
result.append({'id': _generate_lb_subproperty_id(
cli_ctx, namespace, child_type, item)})
setattr(namespace, prop, result)
def validate_address_pool_id_list(cmd, namespace):
_generate_lb_id_list_from_names_or_ids(
cmd.cli_ctx, namespace, 'load_balancer_backend_address_pool_ids', 'backendAddressPools')
def validate_address_pool_name_or_id(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
address_pool = namespace.backend_address_pool
lb_name = namespace.load_balancer_name
gateway_name = namespace.application_gateway_name
usage_error = CLIError('usage error: --address-pool ID | --lb-name NAME --address-pool NAME '
'| --gateway-name NAME --address-pool NAME')
if is_valid_resource_id(address_pool):
if lb_name or gateway_name:
raise usage_error
parts = parse_resource_id(address_pool)
if parts['type'] == 'loadBalancers':
namespace.load_balancer_name = parts['name']
elif parts['type'] == 'applicationGateways':
namespace.application_gateway_name = parts['name']
else:
raise usage_error
else:
if bool(lb_name) == bool(gateway_name):
raise usage_error
if lb_name:
namespace.backend_address_pool = _generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', address_pool)
elif gateway_name:
namespace.backend_address_pool = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', address_pool)
def validate_address_prefixes(namespace):
if namespace.subnet_type != 'new':
validate_parameter_set(namespace,
required=[],
forbidden=['subnet_address_prefix', 'vnet_address_prefix'],
description='existing subnet')
def read_base_64_file(filename):
with open(filename, 'rb') as f:
contents = f.read()
base64_data = base64.b64encode(contents)
try:
return base64_data.decode('utf-8')
except UnicodeDecodeError:
return str(base64_data)
def validate_cert(namespace):
if namespace.cert_data:
namespace.cert_data = read_base_64_file(namespace.cert_data)
def validate_ssl_cert(namespace):
params = [namespace.cert_data, namespace.cert_password]
if all([not x for x in params]):
# no cert supplied -- use HTTP
if not namespace.frontend_port:
namespace.frontend_port = 80
else:
# cert supplied -- use HTTPS
if not all(params):
raise CLIError(
None, 'To use SSL certificate, you must specify both the filename and password')
# extract the certificate data from the provided file
namespace.cert_data = read_base_64_file(namespace.cert_data)
try:
# change default to frontend port 443 for https
if not namespace.frontend_port:
namespace.frontend_port = 443
except AttributeError:
# app-gateway ssl-cert create does not have these fields and that is okay
pass
def validate_delegations(cmd, namespace):
if namespace.delegations:
Delegation = cmd.get_models('Delegation')
delegations = []
for i, item in enumerate(namespace.delegations):
if '/' not in item and len(item.split('.')) == 3:
# convert names to serviceNames
_, service, resource_type = item.split('.')
item = 'Microsoft.{}/{}'.format(service, resource_type)
delegations.append(Delegation(name=str(i), service_name=item))
namespace.delegations = delegations
def validate_dns_record_type(namespace):
tokens = namespace.command.split(' ')
types = ['a', 'aaaa', 'caa', 'cname', 'mx', 'ns', 'ptr', 'soa', 'srv', 'txt']
for token in tokens:
if token in types:
if hasattr(namespace, 'record_type'):
namespace.record_type = token
else:
namespace.record_set_type = token
return
def validate_express_route_peering(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
circuit = namespace.circuit_name
peering = namespace.peering
if not circuit and not peering:
return
usage_error = CLIError('usage error: --peering ID | --peering NAME --circuit-name CIRCUIT')
if not is_valid_resource_id(peering):
namespace.peering = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit,
child_type_1='peerings',
child_name_1=peering
)
elif circuit:
raise usage_error
def validate_express_route_port(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.express_route_port and not is_valid_resource_id(namespace.express_route_port):
namespace.express_route_port = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='expressRoutePorts',
name=namespace.express_route_port
)
def validate_virtual_hub(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.virtual_hub and not is_valid_resource_id(namespace.virtual_hub):
namespace.virtual_hub = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualHubs',
name=namespace.virtual_hub
)
def bandwidth_validator_factory(mbps=True):
def validator(namespace):
return validate_circuit_bandwidth(namespace, mbps=mbps)
return validator
def validate_circuit_bandwidth(namespace, mbps=True):
# use gbps if mbps is False
unit = 'mbps' if mbps else 'gbps'
bandwidth = None
bandwidth = getattr(namespace, 'bandwidth_in_{}'.format(unit), None)
if bandwidth is None:
return
if len(bandwidth) == 1:
bandwidth_comps = bandwidth[0].split(' ')
else:
bandwidth_comps = bandwidth
usage_error = CLIError('usage error: --bandwidth INT {Mbps,Gbps}')
if len(bandwidth_comps) == 1:
logger.warning('interpretting --bandwidth as %s. Consider being explicit: Mbps, Gbps', unit)
setattr(namespace, 'bandwidth_in_{}'.format(unit), float(bandwidth_comps[0]))
return
if len(bandwidth_comps) > 2:
raise usage_error
if float(bandwidth_comps[0]) and bandwidth_comps[1].lower() in ['mbps', 'gbps']:
input_unit = bandwidth_comps[1].lower()
if input_unit == unit:
converted_bandwidth = float(bandwidth_comps[0])
elif input_unit == 'gbps':
converted_bandwidth = float(bandwidth_comps[0]) * 1000
else:
converted_bandwidth = float(bandwidth_comps[0]) / 1000
setattr(namespace, 'bandwidth_in_{}'.format(unit), converted_bandwidth)
else:
raise usage_error
def validate_er_peer_circuit(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
if not is_valid_resource_id(namespace.peer_circuit):
peer_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=namespace.peer_circuit,
child_type_1='peerings',
child_name_1=namespace.peering_name)
else:
peer_id = namespace.peer_circuit
# if the circuit ID is provided, we need to append /peerings/{peering_name}
if namespace.peering_name not in peer_id:
peer_id = '{}/peerings/{}'.format(peer_id, namespace.peering_name)
namespace.peer_circuit = peer_id
def validate_inbound_nat_rule_id_list(cmd, namespace):
_generate_lb_id_list_from_names_or_ids(
cmd.cli_ctx, namespace, 'load_balancer_inbound_nat_rule_ids', 'inboundNatRules')
def validate_inbound_nat_rule_name_or_id(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
rule_name = namespace.inbound_nat_rule
lb_name = namespace.load_balancer_name
if is_valid_resource_id(rule_name):
if lb_name:
raise CLIError('Please omit --lb-name when specifying an inbound NAT rule ID.')
else:
if not lb_name:
raise CLIError('Please specify --lb-name when specifying an inbound NAT rule name.')
namespace.inbound_nat_rule = _generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'inboundNatRules', rule_name)
def validate_ip_tags(cmd, namespace):
''' Extracts multiple space-separated tags in TYPE=VALUE format '''
IpTag = cmd.get_models('IpTag')
if namespace.ip_tags and IpTag:
ip_tags = []
for item in namespace.ip_tags:
tag_type, tag_value = item.split('=', 1)
ip_tags.append(IpTag(ip_tag_type=tag_type, tag=tag_value))
namespace.ip_tags = ip_tags
def validate_frontend_ip_configs(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
if namespace.frontend_ip_configurations:
config_ids = []
for item in namespace.frontend_ip_configurations:
if not is_valid_resource_id(item):
config_ids.append(_generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'frontendIpConfigurations', item))
else:
config_ids.append(item)
namespace.frontend_ip_configurations = config_ids
def validate_local_gateway(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.gateway_default_site and not is_valid_resource_id(namespace.gateway_default_site):
namespace.gateway_default_site = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.gateway_default_site,
namespace='Microsoft.Network',
type='localNetworkGateways')
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def validate_peering_type(namespace):
if namespace.peering_type and namespace.peering_type == 'MicrosoftPeering':
if not namespace.advertised_public_prefixes:
raise CLIError(
'missing required MicrosoftPeering parameter --advertised-public-prefixes')
def validate_public_ip_prefix(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.public_ip_prefix and not is_valid_resource_id(namespace.public_ip_prefix):
namespace.public_ip_prefix = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.public_ip_prefix,
namespace='Microsoft.Network',
type='publicIPPrefixes')
def validate_nat_gateway(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.nat_gateway and not is_valid_resource_id(namespace.nat_gateway):
namespace.nat_gateway = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.nat_gateway,
namespace='Microsoft.Network',
type='natGateways')
def validate_private_ip_address(namespace):
if namespace.private_ip_address and hasattr(namespace, 'private_ip_address_allocation'):
namespace.private_ip_address_allocation = 'static'
def validate_route_filter(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.route_filter:
if not is_valid_resource_id(namespace.route_filter):
namespace.route_filter = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='routeFilters',
name=namespace.route_filter)
def get_public_ip_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
""" Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. """
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.public_ip_address:
is_list = isinstance(namespace.public_ip_address, list)
def _validate_name_or_id(public_ip):
# determine if public_ip_address is name or ID
is_id = is_valid_resource_id(public_ip)
return public_ip if is_id else resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='publicIPAddresses',
name=public_ip)
if is_list:
for i, public_ip in enumerate(namespace.public_ip_address):
namespace.public_ip_address[i] = _validate_name_or_id(public_ip)
else:
namespace.public_ip_address = _validate_name_or_id(namespace.public_ip_address)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'public_ip_address', 'Microsoft.Network/publicIPAddresses', '--public-ip-address',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
def get_subnet_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.virtual_network_name is None and namespace.subnet is None:
return
if namespace.subnet == '':
return
usage_error = ValueError('incorrect usage: ( --subnet ID | --subnet NAME --vnet-name NAME)')
# error if vnet-name is provided without subnet
if namespace.virtual_network_name and not namespace.subnet:
raise usage_error
# determine if subnet is name or ID
is_id = is_valid_resource_id(namespace.subnet)
# error if vnet-name is provided along with a subnet ID
if is_id and namespace.virtual_network_name:
raise usage_error
if not is_id and not namespace.virtual_network_name:
raise usage_error
if not is_id:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=namespace.virtual_network_name,
child_type_1='subnets',
child_name_1=namespace.subnet)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'subnet', 'subnets', '--subnet',
'virtual_network_name', 'Microsoft.Network/virtualNetworks', '--vnet-name',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
def get_nsg_validator(has_type_field=False, allow_none=False, allow_new=False, default_none=False):
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.network_security_group:
# determine if network_security_group is name or ID
is_id = is_valid_resource_id(namespace.network_security_group)
if not is_id:
namespace.network_security_group = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkSecurityGroups',
name=namespace.network_security_group)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'network_security_group', 'Microsoft.Network/networkSecurityGroups', '--nsg',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
def validate_service_endpoint_policy(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.service_endpoint_policy:
policy_ids = []
for policy in namespace.service_endpoint_policy:
if not is_valid_resource_id(policy):
policy = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=policy,
namespace='Microsoft.Network',
type='serviceEndpointPolicies')
policy_ids.append(policy)
namespace.service_endpoint_policy = policy_ids
def get_servers_validator(camel_case=False):
def validate_servers(namespace):
servers = []
for item in namespace.servers if namespace.servers else []:
try:
socket.inet_aton(item) # pylint:disable=no-member
servers.append({'ipAddress' if camel_case else 'ip_address': item})
except socket.error: # pylint:disable=no-member
servers.append({'fqdn': item})
namespace.servers = servers
return validate_servers
def validate_subresource_list(cmd, namespace):
if namespace.target_resources:
SubResource = cmd.get_models('SubResource')
subresources = []
for item in namespace.target_resources:
subresources.append(SubResource(id=item))
namespace.target_resources = subresources
def validate_target_listener(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.target_listener and not is_valid_resource_id(namespace.target_listener):
namespace.target_listener = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.application_gateway_name,
namespace='Microsoft.Network',
type='applicationGateways',
child_type_1='httpListeners',
child_name_1=namespace.target_listener)
def get_virtual_network_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.virtual_network:
# determine if vnet is name or ID
is_id = is_valid_resource_id(namespace.virtual_network)
if not is_id:
namespace.virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=namespace.virtual_network)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'virtual_network', 'Microsoft.Network/virtualNetworks', '--vnet',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
# COMMAND NAMESPACE VALIDATORS
def process_ag_listener_create_namespace(cmd, namespace): # pylint: disable=unused-argument
from msrestazure.tools import is_valid_resource_id
if namespace.frontend_ip and not is_valid_resource_id(namespace.frontend_ip):
namespace.frontend_ip = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'frontendIpConfigurations', namespace.frontend_ip)
if namespace.frontend_port and not is_valid_resource_id(namespace.frontend_port):
namespace.frontend_port = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'frontendPorts', namespace.frontend_port)
if namespace.ssl_cert and not is_valid_resource_id(namespace.ssl_cert):
namespace.ssl_cert = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'sslCertificates', namespace.ssl_cert)
def process_ag_http_settings_create_namespace(cmd, namespace): # pylint: disable=unused-argument
from msrestazure.tools import is_valid_resource_id
if namespace.probe and not is_valid_resource_id(namespace.probe):
namespace.probe = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'probes', namespace.probe)
if namespace.auth_certs:
def _validate_name_or_id(val):
return val if is_valid_resource_id(val) else _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'authenticationCertificates', val)
namespace.auth_certs = [_validate_name_or_id(x) for x in namespace.auth_certs]
def process_ag_rule_create_namespace(cmd, namespace): # pylint: disable=unused-argument
from msrestazure.tools import is_valid_resource_id
if namespace.address_pool and not is_valid_resource_id(namespace.address_pool):
namespace.address_pool = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', namespace.address_pool)
if namespace.http_listener and not is_valid_resource_id(namespace.http_listener):
namespace.http_listener = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'httpListeners', namespace.http_listener)
if namespace.http_settings and not is_valid_resource_id(namespace.http_settings):
namespace.http_settings = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendHttpSettingsCollection', namespace.http_settings)
if namespace.url_path_map and not is_valid_resource_id(namespace.url_path_map):
namespace.url_path_map = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'urlPathMaps', namespace.url_path_map)
if namespace.redirect_config and not is_valid_resource_id(namespace.redirect_config):
namespace.redirect_config = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'redirectConfigurations', namespace.redirect_config)
def process_ag_ssl_policy_set_namespace(namespace):
if namespace.disabled_ssl_protocols and getattr(namespace, 'clear', None):
raise ValueError('incorrect usage: --disabled-ssl-protocols PROTOCOL [...] | --clear')
def process_ag_url_path_map_create_namespace(cmd, namespace): # pylint: disable=unused-argument
from msrestazure.tools import is_valid_resource_id
if namespace.default_address_pool and not is_valid_resource_id(namespace.default_address_pool):
namespace.default_address_pool = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', namespace.default_address_pool)
if namespace.default_http_settings and not is_valid_resource_id(
namespace.default_http_settings):
namespace.default_http_settings = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendHttpSettingsCollection', namespace.default_http_settings)
if namespace.default_redirect_config and not is_valid_resource_id(
namespace.default_redirect_config):
namespace.default_redirect_config = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'redirectConfigurations', namespace.default_redirect_config)
if hasattr(namespace, 'rule_name'):
process_ag_url_path_map_rule_create_namespace(cmd, namespace)
def process_ag_url_path_map_rule_create_namespace(cmd, namespace): # pylint: disable=unused-argument
from msrestazure.tools import is_valid_resource_id
if namespace.address_pool and not is_valid_resource_id(namespace.address_pool):
namespace.address_pool = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', namespace.address_pool)
if namespace.http_settings and not is_valid_resource_id(namespace.http_settings):
namespace.http_settings = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendHttpSettingsCollection', namespace.http_settings)
if namespace.redirect_config and not is_valid_resource_id(
namespace.redirect_config):
namespace.redirect_config = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'redirectConfigurations', namespace.redirect_config)
def process_ag_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
get_servers_validator(camel_case=True)(namespace)
# process folded parameters
if namespace.subnet or namespace.virtual_network_name:
get_subnet_validator(has_type_field=True, allow_new=True)(cmd, namespace)
validate_address_prefixes(namespace)
if namespace.public_ip_address:
get_public_ip_validator(
has_type_field=True, allow_none=True, allow_new=True, default_none=True)(cmd, namespace)
validate_ssl_cert(namespace)
validate_tags(namespace)
validate_custom_error_pages(namespace)
def process_auth_create_namespace(cmd, namespace):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
namespace.authorization_parameters = ExpressRouteCircuitAuthorization()
def process_lb_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
if namespace.subnet and namespace.public_ip_address:
raise ValueError(
'incorrect usage: --subnet NAME --vnet-name NAME | '
'--subnet ID | --public-ip-address NAME_OR_ID')
if namespace.subnet:
# validation for an internal load balancer
get_subnet_validator(
has_type_field=True, allow_new=True, allow_none=True, default_none=True)(cmd, namespace)
namespace.public_ip_address_type = None
namespace.public_ip_address = None
else:
# validation for internet facing load balancer
get_public_ip_validator(has_type_field=True, allow_none=True, allow_new=True)(cmd, namespace)
if namespace.public_ip_dns_name and namespace.public_ip_address_type != 'new':
raise CLIError(
'specify --public-ip-dns-name only if creating a new public IP address.')
namespace.subnet_type = None
namespace.subnet = None
namespace.virtual_network_name = None
def process_lb_frontend_ip_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.subnet and namespace.public_ip_address:
raise ValueError(
'incorrect usage: --subnet NAME --vnet-name NAME | '
'--subnet ID | --public-ip NAME_OR_ID')
if namespace.public_ip_prefix:
if not is_valid_resource_id(namespace.public_ip_prefix):
namespace.public_ip_prefix = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='publicIpPrefixes',
name=namespace.public_ip_prefix)
if namespace.subnet:
get_subnet_validator()(cmd, namespace)
else:
get_public_ip_validator()(cmd, namespace)
def process_local_gateway_create_namespace(cmd, namespace):
ns = namespace
get_default_location_from_resource_group(cmd, ns)
validate_tags(ns)
use_bgp_settings = any([ns.asn or ns.bgp_peering_address or ns.peer_weight])
if use_bgp_settings and (not ns.asn or not ns.bgp_peering_address):
raise ValueError(
'incorrect usage: --bgp-peering-address IP --asn ASN [--peer-weight WEIGHT]')
def process_nic_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
validate_ag_address_pools(cmd, namespace)
validate_address_pool_id_list(cmd, namespace)
validate_inbound_nat_rule_id_list(cmd, namespace)
get_asg_validator(cmd.loader, 'application_security_groups')(cmd, namespace)
# process folded parameters
get_subnet_validator(has_type_field=False)(cmd, namespace)
get_public_ip_validator(has_type_field=False, allow_none=True, default_none=True)(cmd, namespace)
get_nsg_validator(has_type_field=False, allow_none=True, default_none=True)(cmd, namespace)
def process_public_ip_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_public_ip_prefix(cmd, namespace)
validate_ip_tags(cmd, namespace)
validate_tags(namespace)
def process_route_table_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
def process_tm_endpoint_create_namespace(cmd, namespace):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(namespace.resource_group_name, namespace.profile_name)
routing_type = profile.traffic_routing_method # pylint: disable=no-member
endpoint_type = namespace.endpoint_type
all_options = ['target_resource_id', 'target', 'min_child_endpoints', 'priority', 'weight', 'endpoint_location']
props_to_options = {
'target_resource_id': '--target-resource-id',
'target': '--target',
'min_child_endpoints': '--min-child-endpoints',
'priority': '--priority',
'weight': '--weight',
'endpoint_location': '--endpoint-location',
'geo_mapping': '--geo-mapping'
}
validate_subnet_ranges(namespace)
validate_custom_headers(namespace)
required_options = []
# determine which options are required based on profile and routing method
if endpoint_type.lower() == 'externalendpoints':
required_options.append('target')
else:
required_options.append('target_resource_id')
if routing_type.lower() == 'weighted':
required_options.append('weight')
elif routing_type.lower() == 'priority':
required_options.append('priority')
if endpoint_type.lower() == 'nestedendpoints':
required_options.append('min_child_endpoints')
if endpoint_type.lower() in ['nestedendpoints', 'externalendpoints'] and routing_type.lower() == 'performance':
required_options.append('endpoint_location')
if routing_type.lower() == 'geographic':
required_options.append('geo_mapping')
# ensure required options are provided
missing_options = [props_to_options[x] for x in required_options if getattr(namespace, x, None) is None]
extra_options = [props_to_options[x] for x in all_options if
getattr(namespace, x, None) is not None and x not in required_options]
if missing_options or extra_options:
error_message = "Incorrect options for profile routing method '{}' and endpoint type '{}'.".format(routing_type,
endpoint_type) # pylint: disable=line-too-long
if missing_options:
error_message = '{}\nSupply the following: {}'.format(error_message, ', '.join(
missing_options))
if extra_options:
error_message = '{}\nOmit the following: {}'.format(error_message, ', '.join(
extra_options))
raise CLIError(error_message)
def process_vnet_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_ddos_name_or_id(cmd, namespace)
validate_tags(namespace)
if namespace.subnet_prefix and not namespace.subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
raise ValueError('incorrect usage: --subnet-name NAME [--subnet-prefixes PREFIXES]')
raise ValueError('incorrect usage: --subnet-name NAME [--subnet-prefix PREFIX]')
if namespace.subnet_name and not namespace.subnet_prefix:
if isinstance(namespace.vnet_prefixes, str):
namespace.vnet_prefixes = [namespace.vnet_prefixes]
prefix_components = namespace.vnet_prefixes[0].split('/', 1)
address = prefix_components[0]
bit_mask = int(prefix_components[1])
subnet_mask = 24 if bit_mask < 24 else bit_mask
subnet_prefix = '{}/{}'.format(address, subnet_mask)
namespace.subnet_prefix = [subnet_prefix] if cmd.supported_api_version(min_api='2018-08-01') else subnet_prefix
def process_vnet_gateway_create_namespace(cmd, namespace):
ns = namespace
get_default_location_from_resource_group(cmd, ns)
validate_tags(ns)
get_virtual_network_validator()(cmd, ns)
get_public_ip_validator()(cmd, ns)
public_ip_count = len(ns.public_ip_address or [])
if public_ip_count > 2:
raise CLIError('Specify a single public IP to create an active-standby gateway or two '
'public IPs to create an active-active gateway.')
validate_local_gateway(cmd, ns)
enable_bgp = any([ns.asn, ns.bgp_peering_address, ns.peer_weight])
if enable_bgp and not ns.asn:
raise ValueError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP ]')
def process_vnet_gateway_update_namespace(cmd, namespace):
ns = namespace
get_virtual_network_validator()(cmd, ns)
get_public_ip_validator()(cmd, ns)
validate_tags(ns)
public_ip_count = len(ns.public_ip_address or [])
if public_ip_count > 2:
raise CLIError('Specify a single public IP to create an active-standby gateway or two '
'public IPs to create an active-active gateway.')
def process_vpn_connection_create_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
args = [a for a in [namespace.express_route_circuit2,
namespace.local_gateway2,
namespace.vnet_gateway2]
if a]
if len(args) != 1:
raise ValueError('usage error: --vnet-gateway2 NAME_OR_ID | --local-gateway2 NAME_OR_ID '
'| --express-route-circuit2 NAME_OR_ID')
def _validate_name_or_id(value, resource_type):
if not is_valid_resource_id(value):
subscription = getattr(namespace, 'subscription', get_subscription_id(cmd.cli_ctx))
return resource_id(
subscription=subscription,
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type=resource_type,
name=value)
return value
if (namespace.local_gateway2 or namespace.vnet_gateway2) and not namespace.shared_key:
raise CLIError('--shared-key is required for VNET-to-VNET or Site-to-Site connections.')
if namespace.express_route_circuit2 and namespace.shared_key:
raise CLIError('--shared-key cannot be used with an ExpressRoute connection.')
namespace.vnet_gateway1 = \
_validate_name_or_id(namespace.vnet_gateway1, 'virtualNetworkGateways')
if namespace.express_route_circuit2:
namespace.express_route_circuit2 = \
_validate_name_or_id(
namespace.express_route_circuit2, 'expressRouteCircuits')
namespace.connection_type = 'ExpressRoute'
elif namespace.local_gateway2:
namespace.local_gateway2 = \
_validate_name_or_id(namespace.local_gateway2, 'localNetworkGateways')
namespace.connection_type = 'IPSec'
elif namespace.vnet_gateway2:
namespace.vnet_gateway2 = \
_validate_name_or_id(namespace.vnet_gateway2, 'virtualNetworkGateways')
namespace.connection_type = 'Vnet2Vnet'
def load_cert_file(param_name):
def load_cert_validator(namespace):
attr = getattr(namespace, param_name)
if attr and os.path.isfile(attr):
setattr(namespace, param_name, read_base_64_file(attr))
return load_cert_validator
def get_network_watcher_from_vm(cmd, namespace):
from msrestazure.tools import parse_resource_id
compute_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machines
vm_name = parse_resource_id(namespace.vm)['name']
vm = compute_client.get(namespace.resource_group_name, vm_name)
namespace.location = vm.location # pylint: disable=no-member
get_network_watcher_from_location()(cmd, namespace)
def get_network_watcher_from_resource(cmd, namespace):
from azure.cli.core.commands.arm import get_arm_resource_by_id
resource = get_arm_resource_by_id(cmd.cli_ctx, namespace.resource)
namespace.location = resource.location # pylint: disable=no-member
get_network_watcher_from_location(remove=True)(cmd, namespace)
def get_network_watcher_from_location(remove=False, watcher_name='watcher_name',
rg_name='watcher_rg'):
def _validator(cmd, namespace):
from msrestazure.tools import parse_resource_id
location = namespace.location
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK).network_watchers
watcher = next((x for x in network_client.list_all() if x.location.lower() == location.lower()), None)
if not watcher:
raise CLIError("network watcher is not enabled for region '{}'.".format(location))
id_parts = parse_resource_id(watcher.id)
setattr(namespace, rg_name, id_parts['resource_group'])
setattr(namespace, watcher_name, id_parts['name'])
if remove:
del namespace.location
return _validator
def process_nw_cm_create_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
validate_tags(namespace)
compute_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machines
vm_name = parse_resource_id(namespace.source_resource)['name']
rg = namespace.resource_group_name or parse_resource_id(namespace.source_resource).get('resource_group', None)
if not rg:
raise CLIError('usage error: --source-resource ID | --source-resource NAME --resource-group NAME')
vm = compute_client.get(rg, vm_name)
namespace.location = vm.location # pylint: disable=no-member
get_network_watcher_from_location()(cmd, namespace)
if namespace.source_resource and not is_valid_resource_id(namespace.source_resource):
namespace.source_resource = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=rg,
namespace='Microsoft.Compute',
type='virtualMachines',
name=namespace.source_resource)
if namespace.dest_resource and not is_valid_resource_id(namespace.dest_resource):
namespace.dest_resource = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Compute',
type='virtualMachines',
name=namespace.dest_resource)
def process_nw_test_connectivity_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
compute_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machines
vm_name = parse_resource_id(namespace.source_resource)['name']
rg = namespace.resource_group_name or parse_resource_id(namespace.source_resource).get('resource_group', None)
if not rg:
raise CLIError('usage error: --source-resource ID | --source-resource NAME --resource-group NAME')
vm = compute_client.get(rg, vm_name)
namespace.location = vm.location # pylint: disable=no-member
get_network_watcher_from_location(remove=True)(cmd, namespace)
if namespace.source_resource and not is_valid_resource_id(namespace.source_resource):
namespace.source_resource = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=rg,
namespace='Microsoft.Compute',
type='virtualMachines',
name=namespace.source_resource)
if namespace.dest_resource and not is_valid_resource_id(namespace.dest_resource):
namespace.dest_resource = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Compute',
type='virtualMachines',
name=namespace.dest_resource)
if namespace.headers:
HTTPHeader = cmd.get_models('HTTPHeader')
headers = []
for item in namespace.headers:
parts = item.split('=')
if len(parts) != 2:
raise CLIError("usage error '{}': --headers KEY=VALUE [KEY=VALUE ...]".format(item))
headers.append(HTTPHeader(name=parts[0], value=parts[1]))
namespace.headers = headers
def process_nw_flow_log_set_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
namespace.storage_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=namespace.storage_account)
if namespace.traffic_analytics_workspace and not is_valid_resource_id(namespace.traffic_analytics_workspace):
namespace.traffic_analytics_workspace = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.OperationalInsights',
type='workspaces',
name=namespace.traffic_analytics_workspace)
process_nw_flow_log_show_namespace(cmd, namespace)
def process_nw_flow_log_show_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.arm import get_arm_resource_by_id
if not is_valid_resource_id(namespace.nsg):
namespace.nsg = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkSecurityGroups',
name=namespace.nsg)
nsg = get_arm_resource_by_id(cmd.cli_ctx, namespace.nsg)
namespace.location = nsg.location # pylint: disable=no-member
get_network_watcher_from_location(remove=True)(cmd, namespace)
def process_nw_topology_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
SubResource = cmd.get_models('SubResource')
subscription_id = get_subscription_id(cmd.cli_ctx)
location = namespace.location
rg = namespace.target_resource_group_name
vnet = namespace.target_vnet
subnet = namespace.target_subnet
vnet_id = vnet if is_valid_resource_id(vnet) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
if rg and not vnet and not subnet:
# targeting resource group - OK
pass
elif subnet:
subnet_usage = CLIError('usage error: --subnet ID | --subnet NAME --resource-group NAME --vnet NAME')
# targeting subnet - OK
if subnet_id and (vnet or rg):
raise subnet_usage
if not subnet_id and (not rg or not vnet or vnet_id):
raise subnet_usage
if subnet_id:
rg = parse_resource_id(subnet_id)['resource_group']
namespace.target_subnet = SubResource(id=subnet)
else:
subnet_id = subnet_id or resource_id(
subscription=subscription_id,
resource_group=rg,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet
)
namespace.target_resource_group_name = None
namespace.target_vnet = None
namespace.target_subnet = SubResource(id=subnet_id)
elif vnet:
# targeting vnet - OK
vnet_usage = CLIError('usage error: --vnet ID | --vnet NAME --resource-group NAME')
if vnet_id and (subnet or rg):
raise vnet_usage
if not vnet_id and not rg or subnet:
raise vnet_usage
if vnet_id:
rg = parse_resource_id(vnet_id)['resource_group']
namespace.target_vnet = SubResource(id=vnet)
else:
vnet_id = vnet_id or resource_id(
subscription=subscription_id,
resource_group=rg,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet
)
namespace.target_resource_group_name = None
namespace.target_vnet = SubResource(id=vnet_id)
else:
raise CLIError('usage error: --resource-group NAME | --vnet NAME_OR_ID | --subnet NAME_OR_ID')
# retrieve location from resource group
if not location:
resource_client = \
get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).resource_groups
resource_group = resource_client.get(rg)
namespace.location = resource_group.location # pylint: disable=no-member
get_network_watcher_from_location(
remove=True, watcher_name='network_watcher_name', rg_name='resource_group_name')(cmd, namespace)
def process_nw_packet_capture_create_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
get_network_watcher_from_vm(cmd, namespace)
storage_usage = CLIError('usage error: --storage-account NAME_OR_ID [--storage-path '
'PATH] [--file-path PATH] | --file-path PATH')
if not namespace.storage_account and not namespace.file_path:
raise storage_usage
if namespace.storage_path and not namespace.storage_account:
raise storage_usage
if not is_valid_resource_id(namespace.vm):
namespace.vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Compute',
type='virtualMachines',
name=namespace.vm)
if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
namespace.storage_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=namespace.storage_account)
if namespace.file_path:
file_path = namespace.file_path
if not file_path.endswith('.cap'):
raise CLIError("usage error: --file-path PATH must end with the '*.cap' extension")
file_path = file_path.replace('/', '\\')
namespace.file_path = file_path
def process_nw_troubleshooting_start_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
storage_usage = CLIError('usage error: --storage-account NAME_OR_ID [--storage-path PATH]')
if namespace.storage_path and not namespace.storage_account:
raise storage_usage
if not is_valid_resource_id(namespace.storage_account):
namespace.storage_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=namespace.storage_account)
process_nw_troubleshooting_show_namespace(cmd, namespace)
def process_nw_troubleshooting_show_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
resource_usage = CLIError('usage error: --resource ID | --resource NAME --resource-type TYPE '
'--resource-group NAME')
id_params = [namespace.resource_type, namespace.resource_group_name]
if not is_valid_resource_id(namespace.resource):
if not all(id_params):
raise resource_usage
type_map = {
'vnetGateway': 'virtualNetworkGateways',
'vpnConnection': 'connections'
}
namespace.resource = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type=type_map[namespace.resource_type],
name=namespace.resource)
else:
if any(id_params):
raise resource_usage
get_network_watcher_from_resource(cmd, namespace)
def process_nw_config_diagnostic_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
# validate target resource
resource_usage = CLIError('usage error: --resource ID | --resource NAME --resource-type TYPE '
'--resource-group NAME [--parent PATH]')
# omit --parent since it is optional
id_params = [namespace.resource_type, namespace.resource_group_name]
if not is_valid_resource_id(namespace.resource):
if not all(id_params):
raise resource_usage
# infer resource namespace
NAMESPACES = {
'virtualMachines': 'Microsoft.Compute',
'applicationGateways': 'Microsoft.Network',
'networkInterfaces': 'Microsoft.Network'
}
resource_namespace = NAMESPACES[namespace.resource_type]
if namespace.parent:
# special case for virtualMachineScaleSets/NetworkInterfaces, since it is
# the only one to need `--parent`.
resource_namespace = 'Microsoft.Compute'
namespace.resource = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace=resource_namespace,
type=namespace.resource_type,
parent=namespace.parent,
name=namespace.resource)
elif any(id_params) or namespace.parent:
raise resource_usage
# validate query
query_usage = CLIError('usage error: --queries JSON | --destination DEST --source SRC --direction DIR '
'--port PORT --protocol PROTOCOL')
query_params = [namespace.destination, namespace.source, namespace.direction, namespace.protocol,
namespace.destination_port]
if namespace.queries:
if any(query_params):
raise query_usage
elif not all(query_params):
raise query_usage
get_network_watcher_from_resource(cmd, namespace)
def process_lb_outbound_rule_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
validate_frontend_ip_configs(cmd, namespace)
if namespace.backend_address_pool:
if not is_valid_resource_id(namespace.backend_address_pool):
namespace.backend_address_pool = _generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', namespace.backend_address_pool)
def process_list_delegations_namespace(cmd, namespace):
if not namespace.resource_group_name and not namespace.location:
raise CLIError('usage error: --location LOCATION | --resource-group NAME [--location LOCATION]')
if not namespace.location:
get_default_location_from_resource_group(cmd, namespace)
def validate_ag_address_pools(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
address_pools = namespace.app_gateway_backend_address_pools
gateway_name = namespace.application_gateway_name
delattr(namespace, 'application_gateway_name')
if not address_pools:
return
ids = []
for item in address_pools:
if not is_valid_resource_id(item):
if not gateway_name:
raise CLIError('usage error: --app-gateway-backend-pools IDS | --gateway-name NAME '
'--app-gateway-backend-pools NAMES')
item = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=gateway_name,
child_type_1='backendAddressPools',
child_name_1=item)
ids.append(item)
namespace.app_gateway_backend_address_pools = ids
def validate_custom_error_pages(namespace):
if not namespace.custom_error_pages:
return
values = []
for item in namespace.custom_error_pages:
try:
(code, url) = item.split('=')
values.append({'statusCode': code, 'customErrorPageUrl': url})
except (ValueError, TypeError):
raise CLIError('usage error: --custom-error-pages STATUS_CODE=URL [STATUS_CODE=URL ...]')
namespace.custom_error_pages = values
def validate_custom_headers(namespace):
if not namespace.monitor_custom_headers:
return
values = []
for item in namespace.monitor_custom_headers:
try:
item_split = item.split('=', 1)
values.append({'name': item_split[0], 'value': item_split[1]})
except IndexError:
raise CLIError('usage error: --custom-headers KEY=VALUE')
namespace.monitor_custom_headers = values
def validate_status_code_ranges(namespace):
if not namespace.status_code_ranges:
return
values = []
for item in namespace.status_code_ranges:
item_split = item.split('-', 1)
usage_error = CLIError('usage error: --status-code-ranges VAL | --status-code-ranges MIN-MAX')
try:
if len(item_split) == 1:
values.append({'min': int(item_split[0]), 'max': int(item_split[0])})
elif len(item_split) == 2:
values.append({'min': int(item_split[0]), 'max': int(item_split[1])})
else:
raise usage_error
except ValueError:
raise usage_error
namespace.status_code_ranges = values
def validate_subnet_ranges(namespace):
if not namespace.subnets:
return
values = []
for item in namespace.subnets:
try:
item_split = item.split('-', 1)
if len(item_split) == 2:
values.append({'first': item_split[0], 'last': item_split[1]})
continue
except ValueError:
pass
try:
item_split = item.split(':', 1)
if len(item_split) == 2:
values.append({'first': item_split[0], 'scope': item_split[1]})
continue
except ValueError:
pass
values.append({'first': item})
namespace.subnets = values
# pylint: disable=too-few-public-methods
class WafConfigExclusionAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
cmd = namespace._cmd # pylint: disable=protected-access
ApplicationGatewayFirewallExclusion = cmd.get_models('ApplicationGatewayFirewallExclusion')
if not namespace.exclusions:
namespace.exclusions = []
if isinstance(values, list):
values = ' '.join(values)
try:
variable, op, selector = values.split(' ')
except (ValueError, TypeError):
raise CLIError('usage error: --exclusion VARIABLE OPERATOR VALUE')
namespace.exclusions.append(ApplicationGatewayFirewallExclusion(
match_variable=variable,
selector_match_operator=op,
selector=selector
))
def get_header_configuration_validator(dest):
def validator(namespace):
values = getattr(namespace, dest, None)
if not values:
return
results = []
for item in values:
key, value = item.split('=', 1)
results.append({
'header_name': key,
'header_value': value
})
setattr(namespace, dest, results)
return validator
| 58,924 | 27 | 1,895 |
837b8575698c659fd3dcdef92b379c132e3eb8bf | 296 | py | Python | get_enviroment.py | limusina10/hammer | 6b42697a02f0ddf750170ef2bed49bfa3c823ad0 | [
"MIT"
] | null | null | null | get_enviroment.py | limusina10/hammer | 6b42697a02f0ddf750170ef2bed49bfa3c823ad0 | [
"MIT"
] | null | null | null | get_enviroment.py | limusina10/hammer | 6b42697a02f0ddf750170ef2bed49bfa3c823ad0 | [
"MIT"
] | 1 | 2019-06-28T18:44:44.000Z | 2019-06-28T18:44:44.000Z | import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("TOKEN")
COMMAND_PREFIX = os.getenv("PREFIX")
OWNER = os.getenv("OWNER")
ANNOUNCEMENTS_CHANNEL = os.getenv("ANNOUNCEMENTS")
SECURITY_CHANNEL = os.getenv("SECURITY")
SWEAR_WORDS_LIST = os.getenv("BANNEDWORDS").split(",")
| 22.769231 | 54 | 0.756757 | import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("TOKEN")
COMMAND_PREFIX = os.getenv("PREFIX")
OWNER = os.getenv("OWNER")
ANNOUNCEMENTS_CHANNEL = os.getenv("ANNOUNCEMENTS")
SECURITY_CHANNEL = os.getenv("SECURITY")
SWEAR_WORDS_LIST = os.getenv("BANNEDWORDS").split(",")
| 0 | 0 | 0 |
107fc6a18e5d39c505f1ffb15ee488a9e8674bb8 | 625 | py | Python | nltk_setup.py | Yakelixir/bigram_from_text | 96f1abe4c6ed0a98e6f909bdd8318096bb7f4f83 | [
"MIT"
] | null | null | null | nltk_setup.py | Yakelixir/bigram_from_text | 96f1abe4c6ed0a98e6f909bdd8318096bb7f4f83 | [
"MIT"
] | null | null | null | nltk_setup.py | Yakelixir/bigram_from_text | 96f1abe4c6ed0a98e6f909bdd8318096bb7f4f83 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""run and initiate nltk.download('all') """
import nltk
# setup or argparse
PERMISSION = input("Would you like to continue and install all nltk dependanies? [Y/n] ")
if PERMISSION == 'Y':
try:
nltk.download('all')
COMPLETE = """We have completed the initial setup for ntlk download.
You can now run bigramft.py"""
print('\n', COMPLETE, '\n')
except Exception as error:
print('There was an error: ', error)
else:
EXIT_MSG = """No worries we can have some bigram fun later when your ready to setup.
Never rush quality!"""
print(EXIT_MSG)
| 27.173913 | 89 | 0.6416 | #! /usr/bin/env python
"""run and initiate nltk.download('all') """
import nltk
# setup or argparse
PERMISSION = input("Would you like to continue and install all nltk dependanies? [Y/n] ")
if PERMISSION == 'Y':
try:
nltk.download('all')
COMPLETE = """We have completed the initial setup for ntlk download.
You can now run bigramft.py"""
print('\n', COMPLETE, '\n')
except Exception as error:
print('There was an error: ', error)
else:
EXIT_MSG = """No worries we can have some bigram fun later when your ready to setup.
Never rush quality!"""
print(EXIT_MSG)
| 0 | 0 | 0 |
5d9831deb49847b6573722cd9f6ee7a462919922 | 178 | py | Python | computer/admin.py | Zomba4okk/EmployeesManager | bff29dec7a7b83db79ef3449e19ad51b6fd4df8d | [
"MIT"
] | null | null | null | computer/admin.py | Zomba4okk/EmployeesManager | bff29dec7a7b83db79ef3449e19ad51b6fd4df8d | [
"MIT"
] | null | null | null | computer/admin.py | Zomba4okk/EmployeesManager | bff29dec7a7b83db79ef3449e19ad51b6fd4df8d | [
"MIT"
] | null | null | null | from django.contrib import admin
from employee.models import Department, Employee, Room
admin.site.register(Department)
admin.site.register(Employee)
admin.site.register(Room)
| 22.25 | 54 | 0.825843 | from django.contrib import admin
from employee.models import Department, Employee, Room
admin.site.register(Department)
admin.site.register(Employee)
admin.site.register(Room)
| 0 | 0 | 0 |
a38f69026e60b3a8229263ec5320c5f6ab8a91f5 | 25,531 | py | Python | face_sync/generate_srrr.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | 40 | 2020-09-21T05:35:17.000Z | 2022-02-06T04:41:34.000Z | face_sync/generate_srrr.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | 4 | 2020-05-22T15:44:13.000Z | 2020-07-17T07:41:33.000Z | face_sync/generate_srrr.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | 8 | 2020-10-03T06:08:39.000Z | 2021-12-17T15:50:30.000Z | import os
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeVideoClip, TextClip
import random
import numpy as np
import time
from video_facial_landmarks_minmax import calculate_distance
from face_embedding import calculate_euclidean_distance
import cv2
import subprocess
ONE_FRAME_SEC = 0.03336666666666588 # 29.97002997002997fps의 역수! 한 프레임당 시간을 계싼해서 프레임 id만 알면 현재 시간 알수 있도록 함# 0.03336666666666588??
EYE_MIN_DIFF = 65 # 두 영상의 눈 크기 차이가 거리 이상이면, crossfade 전환 하지 않는다.
TOTAL_MIN_DIFF = 200 # 두 영상의 눈 거리가 이 이상이면 전환 자체를 시도하지 않는다(엉뚱한데 옮겨가는거 피하기)
ROTATE_MAX = 7 # 각 도 차이가 이 값 이상이면, crossfade 전환하지 않는다.
WINDOW_TIME = 10 # WINDOW_TIME 초 안에서 최소 거리를 찾는다. 얼굴이 겹치는 부분이 없다면, WINDOW_TIME 만큼 자르고 radom으로 다음 영상을 재생한다.
PADDED_TIME = 3 # 최소 시간으로 영상을 자른 뒤 PADDED_TIME 만큼은 얼굴 거리를 계산하지 않는다.
# TRANSITION INFO
ZOOM_FRAME = 20 # 얼굴 확대하는 FRAME 수
CROSS_FRAME = 4 # CrossFade FRAME 수
ONE_ZOOM = 1.2 # 회전 확대 후 검은 비율을 줄이기 위해서 확대하는 비율
AGAIN_ZOOM = 1.15 # 영상이 확대가 불가능(영상 최대 크기 넘어감)할 때 한번 더 확대할 수 있는 비율. 한번 더 확대하고도 범위가 넘어가면, 그냥 아무 효과없이 전환한다.
PANELTY = 100
print('hyper parameter')
print(ONE_FRAME_SEC, EYE_MIN_DIFF, ROTATE_MAX, WINDOW_TIME, PADDED_TIME, ZOOM_FRAME, CROSS_FRAME, ONE_ZOOM, AGAIN_ZOOM)
TEST = False
TEST_TIME = 30
# Moving = 더 작은 쪽에서 하는 것!
# Rotate 할 때 빈 자리 메꾸기 위해서 기본적으로 ONE_ZOOM 만큼 확대하기!
# 이건 사이즈가 안맞아서 한번 더 확대 했을때 다른 쪽 영상을 처리하는 Class
# ForceZoom = 더 큰쪽에서 하는 것!!
start_time = time.time()
use_face_panelty = True # FacePanelty를 사용하면 Panelty값이 기본적으로 들어가니까 자연스러운 전환을 위해서는 역치값을 높여아 함
if use_face_panelty==True:
EYE_MIN_DIFF += PANELTY
TOTAL_MIN_DIFF += PANELTY
crosscut(videos_path="./video", option="norandom", use_face_panelty = False)
end_time = time.time()
print(end_time - start_time, 'total Generation time')
| 57.632054 | 252 | 0.574987 | import os
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeVideoClip, TextClip
import random
import numpy as np
import time
from video_facial_landmarks_minmax import calculate_distance
from face_embedding import calculate_euclidean_distance
import cv2
import subprocess
ONE_FRAME_SEC = 0.03336666666666588 # 29.97002997002997fps의 역수! 한 프레임당 시간을 계싼해서 프레임 id만 알면 현재 시간 알수 있도록 함# 0.03336666666666588??
EYE_MIN_DIFF = 65 # 두 영상의 눈 크기 차이가 거리 이상이면, crossfade 전환 하지 않는다.
TOTAL_MIN_DIFF = 200 # 두 영상의 눈 거리가 이 이상이면 전환 자체를 시도하지 않는다(엉뚱한데 옮겨가는거 피하기)
ROTATE_MAX = 7 # 각 도 차이가 이 값 이상이면, crossfade 전환하지 않는다.
WINDOW_TIME = 10 # WINDOW_TIME 초 안에서 최소 거리를 찾는다. 얼굴이 겹치는 부분이 없다면, WINDOW_TIME 만큼 자르고 radom으로 다음 영상을 재생한다.
PADDED_TIME = 3 # 최소 시간으로 영상을 자른 뒤 PADDED_TIME 만큼은 얼굴 거리를 계산하지 않는다.
# TRANSITION INFO
ZOOM_FRAME = 20 # 얼굴 확대하는 FRAME 수
CROSS_FRAME = 4 # CrossFade FRAME 수
ONE_ZOOM = 1.2 # 회전 확대 후 검은 비율을 줄이기 위해서 확대하는 비율
AGAIN_ZOOM = 1.15 # 영상이 확대가 불가능(영상 최대 크기 넘어감)할 때 한번 더 확대할 수 있는 비율. 한번 더 확대하고도 범위가 넘어가면, 그냥 아무 효과없이 전환한다.
PANELTY = 100
print('hyper parameter')
print(ONE_FRAME_SEC, EYE_MIN_DIFF, ROTATE_MAX, WINDOW_TIME, PADDED_TIME, ZOOM_FRAME, CROSS_FRAME, ONE_ZOOM, AGAIN_ZOOM)
TEST = False
TEST_TIME = 30
def distance(reference_clip, clip, use_face_panelty = False):
# cv2 를 이용해서 최대 거리, 최소 시간, 거리, 각도, 눈 위치를 구한다.
min_diff, min_time, info = calculate_distance(reference_clip, clip)
if use_face_panelty:
# 얼굴이 다른 경우에 penalty 주기!
ref_frame = reference_clip.get_frame(min_time)
frame = clip.get_frame(min_time)
e_dist = calculate_euclidean_distance(ref_frame, frame)
print("Face Panelty Applied With ", e_dist / 1.25 *penalty)
min_diff += e_dist / 1.25 *penalty # 범위 0~1로 바꿔주기 위함
return min_diff, min_time,\
info['refer_length'], info['refer_degree'], \
info['compare_length'], info['compare_degree'], \
info['refer_point'], info['compare_point']
# Moving = 더 작은 쪽에서 하는 것!
# Rotate 할 때 빈 자리 메꾸기 위해서 기본적으로 ONE_ZOOM 만큼 확대하기!
class Moving:
def __init__(self,small_point, big_point, ratio, transition_dir, rotate_degree, default_zoom = 1):
self.small_point = small_point[0] # 왼쪽 눈
self.big_point = big_point[0] # 왼쪽 눈
self.ratio = ratio # 확대비율
self.transition_dir = transition_dir # 점점 커질건지(small_to_big), 작아질건지(big_to_small), 그대로 둘건지(same)
self.rotate_degree = rotate_degree # 이동해야 하는 각도
self.default_zoom = default_zoom
def __call__(self, get_frame, t):
frame = get_frame(t)
if len(self.small_point)==0: # 얼굴이랄게 없을때
return frame
else:
# ratio만큼 영상을 키운다(더 작은 영상이 더 큰 영상 사이즈에 맞춤)
img_cv = cv2.resize(frame,(int(round(1280 * self.ratio * self.default_zoom)),int(round(720 * self.ratio * self.default_zoom))))
cur_w = self.small_point[0] * self.ratio * self.default_zoom
cur_h = self.small_point[1] * self.ratio * self.default_zoom
if self.transition_dir == 'small_to_big':
cur_degree = self.rotate_degree*(t/ONE_FRAME_SEC)/ZOOM_FRAME # 0에서 rotate_degree까지
elif self.transition_dir == 'big_to_small':
cur_degree = self.rotate_degree*(ZOOM_FRAME-t/ONE_FRAME_SEC)/ZOOM_FRAME
else:
cur_degree = self.rotate_degree
# width height 순서
M = cv2.getRotationMatrix2D((cur_w, cur_h), cur_degree, 1.0)
img_cv = cv2.warpAffine(img_cv, M, (int(round(1280 * self.ratio * self.default_zoom)),int(round(720 * self.ratio * self.default_zoom))))
zoom_frame = np.asarray(img_cv)
# 더 큰 부분과 위치가 같아저야 하는것이므로 더 큰 포인트의 위치 비율을 계산한다.
w_ratio = self.big_point[0]/1280 # 그 비율만큼 왼쪽 마이너스
h_ratio = self.big_point[1]/720 # 그 비율만큼 위쪽 마이너스
# 혹시 사이즈가 넘어가면 사이즈를 self.default_zoom 만큼 한번 더 크게 해보기(너무 딱 맞춰서 확대하려고 하지말구!)
# ! 여기선 self.default_zoom 뺀 상태로 확대해야 한다(그래야 원하는 크기로 잘리지)
w1, w2 = int(round(cur_w - 1280 * self.ratio * w_ratio)), int(round(cur_w + 1280 * self.ratio *(1-w_ratio)))
h1, h2 = int(round(cur_h - 720 * self.ratio * h_ratio)), int(round(cur_h + 720 * self.ratio *(1-h_ratio)))
if h1>=0 and h2<=int(round(720 * self.ratio * self.default_zoom)) and w1>=0 and w2 <=int(round(1280 * self.ratio * self.default_zoom)):
# 시간초에 따라서 바뀌어야 함!
zoom_w_size, zoom_h_size = 1280 * self.ratio*self.default_zoom, 720 * self.ratio*self.default_zoom
if self.transition_dir == 'small_to_big': # 앞에가 작고 뒤에가 큰거!
W_real = zoom_w_size - (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = zoom_h_size - (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
elif self.transition_dir == 'big_to_small': # 되려 시간이 지나면서 사이즈가 더 커져야 resize를 하면 더 넓은 부분이 나옴
W_real = 1280 + (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = 720 + (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
else: # 'same' 그냥 큰 상태로 유지!
W_real = 1280
H_real = 720
# 16:9 비율 유지하면서 이동할 위치에 ratio만큼 자르기!
w1, w2 = int(round(cur_w - W_real * w_ratio)), int(round(cur_w + W_real *(1-w_ratio)))
h1, h2 = int(round(cur_h - H_real * h_ratio)), int(round(cur_h + H_real *(1-h_ratio)))
# 확대된 범위를 넘어갔는지 체크
if h1>=0 and h2<=int(round(720 * self.ratio * self.default_zoom)) and w1>=0 and w2 <=int(round(1280 * self.ratio * self.default_zoom)):
frame_region = zoom_frame[h1:h2,w1:w2]
else:
frame_region = frame
return frame_region
else:
# 딱 한번 확대 기회를 주자!
img_cv = cv2.resize(zoom_frame, dsize=(0, 0),fx= self.default_zoom * AGAIN_ZOOM, fy= self.default_zoom * AGAIN_ZOOM) # AGAIN_ZOOM 만큼 확대하기
zoom_frame = np.asarray(img_cv)
cur_w = self.small_point[0] * self.ratio * self.default_zoom * AGAIN_ZOOM
cur_h = self.small_point[1] * self.ratio * self.default_zoom * AGAIN_ZOOM
w_ratio = self.big_point[0]/1280 # 그 비율만큼 왼쪽 마이너스
h_ratio = self.big_point[1]/720 # 그 비율만큼 위쪽 마이너스
zoom_w_size, zoom_h_size = 1280 * self.ratio * self.default_zoom * AGAIN_ZOOM, 720 * self.ratio * self.default_zoom * AGAIN_ZOOM
# 시간초에 따라서 바뀌어야 함!
if self.transition_dir == 'small_to_big': # 앞에가 작고 뒤에가 큰거!
W_real = zoom_w_size - (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = zoom_h_size - (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
# print(W_real, H_real, "W real H real")
elif self.transition_dir == 'big_to_small': # 되려 시간이 지나면서 사이즈가 더 커져야 resize를 하면 더 넓은 부분이 나옴
W_real = 1280 + (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = 720 + (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
else: # 'same' 그냥 큰 상태로 유지!
W_real = 1280
H_real = 720
w1, w2 = int(round(cur_w - W_real * w_ratio)), int(round(cur_w + W_real *(1-w_ratio)))
h1, h2 = int(round(cur_h - H_real * h_ratio)), int(round(cur_h + H_real *(1-h_ratio)))
# 확대된 범위를 넘어갔을때!
if h1>=0 and h2<=int(round(720 * self.ratio*self.default_zoom *AGAIN_ZOOM)) and w1>=0 and w2 <=int(round(1280 * self.ratio*self.default_zoom *AGAIN_ZOOM)):
frame_region = zoom_frame[h1:h2,w1:w2]
else:
frame_region = frame
return frame_region
# 이건 사이즈가 안맞아서 한번 더 확대 했을때 다른 쪽 영상을 처리하는 Class
# ForceZoom = 더 큰쪽에서 하는 것!!
class ForceZoom:
def __init__(self,small_point, big_point, ratio, transition_dir, default_zoom = 1):
self.small_point = small_point[0]
self.big_point = big_point[0]
self.ratio = ratio
self.transition_dir = transition_dir
self.default_zoom = default_zoom
# 여긴 큰 부분 영상 처리하는 것이므로 rotation은 필요없다.
def __call__(self, get_frame, t):
# any process you want
frame = get_frame(t)
if len(self.small_point)==0:
return frame
else:
print('--------------------- DO FORCE ZOOM')
img_cv = cv2.resize(frame,(int(round(1280 *self.default_zoom * self.ratio)),int(round(720 *self.default_zoom* self.ratio))))
zoom_frame = np.asarray(img_cv)
cur_w = self.small_point[0] *self.default_zoom * self.ratio
cur_h = self.small_point[1] *self.default_zoom * self.ratio
# 이동할 애 기준으로 만들어야 함!
w_ratio = self.big_point[0]/1280 # 그 비율만큼 왼쪽 마이너스
h_ratio = self.big_point[1]/720 # 그 비율만큼 위쪽 마이너스
w1, w2 = int(round(cur_w - 1280 * self.ratio * w_ratio)), int(round(cur_w + 1280 * self.ratio *(1-w_ratio)))
h1, h2 = int(round(cur_h - 720 * self.ratio * h_ratio)), int(round(cur_h + 720 * self.ratio *(1-h_ratio)))
# 확대될 사이즈도 확인(Force ZOOM 이 가능했었니? az = again zoom) - 확대 되었을때만 Force Zoom 하면 되니까!
cur_w_az = self.small_point[0] *self.default_zoom * self.ratio * AGAIN_ZOOM
cur_h_az = self.small_point[1] *self.default_zoom * self.ratio * AGAIN_ZOOM
w1_az, w2_az = int(round(cur_w_az - 1280 * self.ratio * w_ratio)), int(round(cur_w_az + 1280 * self.ratio *(1-w_ratio)))
h1_az, h2_az = int(round(cur_h_az - 720 * self.ratio * h_ratio)), int(round(cur_h_az + 720 * self.ratio *(1-h_ratio)))
# 원래건 안되고(not) 확대되는건 되어야 함!! (원래게 되었으면 확대를 안했겠지? 확대가 안되면 그냥 뒀겠지?)
if not( h1>=0 and h2<=int(round(720 * self.ratio*self.default_zoom)) and w1>=0 and w2 <=int(round(1280 * self.ratio*self.default_zoom))) and \
h1_az>=0 and h2_az<=int(round(720 * self.ratio*self.default_zoom*AGAIN_ZOOM)) and w1_az>=0 and w2_az<=int(round(1280 * self.ratio*AGAIN_ZOOM*self.default_zoom)):
# 사이즈가 넘어가서 확대를 했었다면, 나는 처음부터 다시 시작하자!
img_cv = cv2.resize(frame, dsize=(0, 0),fx=self.default_zoom * AGAIN_ZOOM, fy=self.default_zoom * AGAIN_ZOOM) # AGAIN_ZOOM 만큼 확대하기
zoom_frame = np.asarray(img_cv)
cur_w = self.big_point[0] * self.default_zoom * AGAIN_ZOOM
cur_h = self.big_point[1] * self.default_zoom * AGAIN_ZOOM
w_ratio = self.big_point[0]/1280 # 그 비율만큼 왼쪽 마이너스
h_ratio = self.big_point[1]/720 # 그 비율만큼 위쪽 마이너스
zoom_w_size, zoom_h_size = 1280 * self.default_zoom * AGAIN_ZOOM, 720 * self.default_zoom * AGAIN_ZOOM
# 시간초에 따라서 바뀌어야 함!
if self.transition_dir == 'small_to_big': # 앞에가 작고 뒤에가 큰거!
W_real = zoom_w_size - (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = zoom_h_size - (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
elif self.transition_dir == 'big_to_small': # 되려 시간이 지나면서 사이즈가 더 커져야 resize를 하면 더 넓은 부분이 나옴
# 사이즈가 더 커지면, 다시 resize할떄 작아짐. 그래서 처음에는 작은 사이즈에서 큰 사이즈로 가면, resize후엔 확대 후 축소한는거 같음
W_real = 1280 + (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = 720 + (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
else: # 'same' 그냥 큰 상태로 유지!
W_real = 1280
H_real = 720
w1, w2 = int(round(cur_w - W_real * w_ratio)), int(round(cur_w + W_real *(1-w_ratio)))
h1, h2 = int(round(cur_h - H_real * h_ratio)), int(round(cur_h + H_real *(1-h_ratio)))
if h1>=0 and h2<=int(round(720 * self.ratio*self.default_zoom*AGAIN_ZOOM)) and w1>=0 and w2 <=int(round(1280 * self.ratio*self.default_zoom*AGAIN_ZOOM)):
frame_region = zoom_frame[h1:h2,w1:w2]
else:
frame_region = frame
return frame_region
else: # 그런 경우 아니었으면 self.default_zoom 만 하고 return
# 사이즈가 넘어가서 확대를 했었다면, 나는 처음부터 다시 시작하자!
# 이때는 그냥 한번 확대해주자!
img_cv = cv2.resize(frame, dsize=(0, 0),fx=self.default_zoom, fy=self.default_zoom) # AGAIN_ZOOM 만큼 확대하기
zoom_frame = np.asarray(img_cv)
cur_w = self.big_point[0] * self.default_zoom
cur_h = self.big_point[1] * self.default_zoom
# 이동할 애 기준으로 만들어야 함!
w_ratio = self.big_point[0]/1280 # 그 비율만큼 왼쪽 마이너스
h_ratio = self.big_point[1]/720 # 그 비율만큼 위쪽 마이너스
zoom_w_size, zoom_h_size = 1280 * self.default_zoom, 720 * self.default_zoom
# 시간초에 따라서 바뀌어야 함!
if self.transition_dir == 'small_to_big': # 앞에가 작고 뒤에가 큰거!
W_real = zoom_w_size - (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = zoom_h_size - (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
elif self.transition_dir == 'big_to_small': # 되려 시간이 지나면서 사이즈가 더 커져야 resize를 하면 더 넓은 부분이 나옴
# 사이즈가 더 커지면, 다시 resize할떄 작아짐. 그래서 처음에는 작은 사이즈에서 큰 사이즈로 가면, resize후엔 확대 후 축소한는거 같음
W_real = 1280 + (zoom_w_size - 1280)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
H_real = 720 + (zoom_h_size- 720)*(t/ONE_FRAME_SEC)/ZOOM_FRAME
else: # 'same' 그냥 큰 상태로 유지!
W_real = 1280
H_real = 720
w1, w2 = int(round(cur_w - W_real * w_ratio)), int(round(cur_w + W_real *(1-w_ratio)))
h1, h2 = int(round(cur_h - H_real * h_ratio)), int(round(cur_h + H_real *(1-h_ratio)))
# 확대된 범위를 넘어갔을때!
if h1>=0 and h2<=int(round(720 * self.ratio*self.default_zoom)) and w1>=0 and w2 <=int(round(1280 * self.ratio*self.default_zoom)):
frame_region = zoom_frame[h1:h2,w1:w2]
else:
frame_region = frame
return frame_region
def crosscut(videos_path="./video", option="random", use_face_panelty=False):
subprocess.call(f'rm -rf {videos_path}/.DS_Store', shell=True)
min_time = 1000.0
min_idx = 0
audioclip = None
extracted_clips_array = []
video_num = len(os.listdir(videos_path))
start_times = [0] * video_num # VIDEO ALIGNMENT -> SLICE START TIME
# init (refer 가 지금 현재 영상, compare가 비교하는 다음 영상)
refer_point_min = [(0,0),(0,0)]
compare_point_min = [(0,0),(0,0)]
refer_length_min = 0
compare_length_min = 0
refer_degree_min = 0
compare_degree_min = 0
INIT_NUM = 5000000
for i in range(len(os.listdir(videos_path))):
video_path = os.path.join(videos_path, sorted(os.listdir(videos_path))[i]) # 순서가 뒤죽박죽 되지 않게!
clip = VideoFileClip(video_path)
clip = clip.subclip(start_times[i], clip.duration) # 영상 시작점을 맞추기 위해서 start_times 참조
print(video_path, clip.fps, clip.duration)
if min_time > clip.duration: # 제일 작은 영상 기준으로 길이 잡기
audioclip = clip.audio
min_time = clip.duration
min_idx = i
extracted_clips_array.append(clip)
print(len(extracted_clips_array),' videos min idx is ', min_idx, ' time',min_time)
con_clips = []
t = 3
current_idx = 0
check_tqdm = 1
con_clips.append(extracted_clips_array[current_idx].subclip(0, min(t, int(min_time)))) # 앞에서 시작점은 맞춰졌으므로 0부터 시작하면 된다!
if TEST: # test하면 일부분만 생성해서 빠르게 확인하기
CHECK_DURATION = TEST_TIME
min_time = CHECK_DURATION
audioclip = audioclip.set_duration(CHECK_DURATION)
# GENERATE STAGEMIX
# CONCAT SUBCLIP 0~ MIN DURATION CLIP TIME
while t < min_time:
print(check_tqdm,'------------------------------------------------------------------')
check_tqdm += 1
# 최대 WINDOW TIME만큼 영상을 generate 할 예정
cur_t = t
next_t = min(t+WINDOW_TIME, min_time) # 마지막은 window초보다 작은초일수도 있으니
# RANDOM BASED METHOD
if option=="random" or min(min_time,t + PADDED_TIME)==min_time: # 혹시 제일 마지막 영상이면 random으로 생성할 수도 있음!
random_video_idx = random.randint(0, len(extracted_clips_array)-1)
clip = extracted_clips_array[random_video_idx].subclip(cur_t, next_t)
t = next_t
con_clips.append(clip)
else:
reference_clip = extracted_clips_array[current_idx].subclip(cur_t, next_t) # 지금 현재 영상!
d = INIT_NUM # init
# 거리가 Inf일때는 있을때는 이 idx로 설정됨!
# --------------------------------------------------------------
# 최소거리 영상 찾고 편집 위한 정보 얻기 -------------------------------
# --------------------------------------------------------------
min_idx = (current_idx+1)%len(extracted_clips_array)
for video_idx in range(len(extracted_clips_array)):
# 같은 영상 나올수도 있는 문제 해결
if video_idx == current_idx:
continue
# WINDOW_TIME초 간 영상 확인
clip = extracted_clips_array[video_idx].subclip(cur_t, next_t)
# PADDING TIME이 들어가면 엄청 좋은 부분을 놓칠수도 있지만, 넣어야 계속해서 그 주변에서 전환되는 문제가 해결됨!
# CALCULATE DISTANCE between reference_clip, compare_clip(같은초에서 최선의 거리 장면 찾기)
cur_d, plus_frame, refer_length, refer_degree, compare_length, compare_degree, refer_point, compare_point = distance(reference_clip, clip, use_face_panelty=use_face_panelty)
print('from video:',current_idx, ' to video',video_idx, ' in distance ',cur_d, ' in sec ' ,cur_t + plus_frame, 'first deg ', refer_degree, 'second deg ', compare_degree, ' refer length ', refer_length, ' compare length', compare_length)
if d > cur_d: # 최소 정보 찾기!
d = cur_d
min_idx = video_idx
next_t = cur_t + plus_frame # 바로 옮길 frame
cur_clip = reference_clip.subclip(0,plus_frame)
next_clip = clip.subclip(0, plus_frame) # 그 바꿀 부분만 자르는 클립!
compare_point_min = compare_point
refer_point_min = refer_point
refer_length_min = refer_length # 이거에 맞춰서 확대 축소 해줄거야!
compare_length_min = compare_length # 이거에 맞춰 확대 축소 해줄거야!
refer_degree_min = refer_degree
compare_degree_min = compare_degree
if d == INIT_NUM or (not cur_clip): # 거리가 모두 inf일떄, cur_clip 자체가 비어있을때
print("ALL DISTANCE INFINITE PROBLEM !!! --> APPEND NEXT INDEX VIDEO...")
# current_idx는 다음으로 넘어간다!!!
current_idx = min_idx # 다음에 재생될 idx
clip = reference_clip # 현재 클립(여기는 거리가 Inf이므로 10초 전체가 잘려있다!)
t = min(t+WINDOW_TIME, min_time) # 마지막은 window초보다 작은초일수도 있으니
con_clips.append(clip)
if t < min_time: # t가 이미 min_time을 넘었을땐 더할 필요도 없음!
# 뒤에 padding 데이터 더하기
pad_clip = extracted_clips_array[current_idx].subclip(t, min(min_time,t+PADDED_TIME)) # min_time을 넘어가면 안됨!
t = min(min_time,t + PADDED_TIME) # padding 된 시간 더하기
con_clips.append(pad_clip)
else:
print("MIN DISTANCE VIDEO FOUND...!")
# (!! 현재 영상을 concat 하고 다음에 넣을 영상 idx를 저장해야 한다!)
prev_idx = current_idx
current_idx = min_idx # 바로 다음에 이어지는 영상 index
clip = cur_clip # 현재 클립(바꾸면 가장 좋은 부분까지 잘린 현재 클립)
print("next video idx : {}".format(current_idx))
print('----refer, compare length : ', refer_length_min, compare_length_min)
print('----refer, compare point information : ', refer_point_min, compare_point_min)
print('----refer, compare degree information : ', refer_degree_min, compare_degree_min)
# 여기서 편집하기 -------------------------------------------------
t = next_t
# --------------------------------------------------------------
# 1. Transition 전 영상 효과 없이 넣기 ------------------------------
# --------------------------------------------------------------
clip_front = clip.subclip(0,clip.duration-(ONE_FRAME_SEC*ZOOM_FRAME)) # 전환 효과 없이 들어갈 클립!
con_clips.append(clip_front)
# --------------------------------------------------------------
# 2. Transition 영상 넣기(ZOOM_FRAME만큼) --------------------------
# --------------------------------------------------------------
clip_back = clip.subclip(clip.duration-(ONE_FRAME_SEC*ZOOM_FRAME),clip.duration)
## 해당 조건을 만족하면 resize및 transition 허용
if abs(compare_length_min-refer_length_min) < EYE_MIN_DIFF and abs(compare_degree_min-refer_degree_min) < ROTATE_MAX and d < TOTAL_MIN_DIFF:
# 앞 영상이 더 작으면 Moving 함수를 실행해서 앞 영상 확대하기
if compare_length_min> refer_length_min:
clip_back = clip_back.fl(Moving(refer_point_min, compare_point_min, compare_length_min/refer_length_min,'small_to_big',refer_degree_min-compare_degree_min))
clip_back = clip_back.resize((1280,720))
else: # 뒤 영상이 더 작으면 ForceZoom을 통해서 사이즈 맞추기(self.default_zoom을 통해서 더 커지기 때문에)
clip_back = clip_back.fl(ForceZoom(compare_point_min, refer_point_min, refer_length_min/compare_length_min,'small_to_big'))
clip_back = clip_back.resize((1280,720))
con_clips.append(clip_back)
else:
con_clips.append(clip_back)
# ---------------------------------------------------
# 3. 다음 영상에 padding 데이터 더하기 ---------------------
# ---------------------------------------------------
pad_clip = extracted_clips_array[current_idx].subclip(t, min(min_time,t + PADDED_TIME)) # min_time을 넘어가면 안됨!
# padding 데이터도 효과를 넣을지 안넣을지 판단!
if abs(compare_length_min-refer_length_min) < EYE_MIN_DIFF and abs(compare_degree_min-refer_degree_min) < ROTATE_MAX and d < TOTAL_MIN_DIFF:
### PAD FRONT ---------------
pad_front = pad_clip.subclip(0,ONE_FRAME_SEC*ZOOM_FRAME) # 그 바꿀 부분만 자르는 클립!
# 앞이 더 크고 뒤(pad_clip)가 작을 때
if refer_length_min> compare_length_min:
# pad_clip을 확대 해줘야 함
pad_front = pad_front.fl(Moving(compare_point_min, refer_point_min, refer_length_min/compare_length_min, 'big_to_small',compare_degree_min-refer_degree_min))
pad_front = pad_front.resize((1280,720))
# 앞 영상 연속해서 틀면서 cross fade!(이때는 앞 영상은 회전및 확대가 없으므로 그대로 재생!)
# !!!! 여기 잠깐 주석
# cross_clip = extracted_clips_array[prev_idx].subclip(t, t+ONE_FRAME_SEC*CROSS_FRAME) # min_time을 넘어가면 안됨!
# cross_clip = cross_clip.fl(ForceZoom(compare_point_min, refer_point_min, refer_length_min/compare_length_min, 'same')) # 여기서도 ForceZoom 필수!
# pad_front = CompositeVideoClip([pad_front, cross_clip.crossfadeout(ONE_FRAME_SEC*CROSS_FRAME)])
# !!! 주석 끝
else: # 앞이 더 작은 경우
pad_front = pad_front.fl(ForceZoom(refer_point_min, compare_point_min , compare_length_min/refer_length_min, 'big_to_small'))
pad_front = pad_front.resize((1280,720))
# !!!! 여기 잠깐 주석
# cross_clip = extracted_clips_array[prev_idx].subclip(t, t+ONE_FRAME_SEC*CROSS_FRAME) # min_time을 넘어가면 안됨!
# cross_clip = cross_clip.fl(Moving(refer_point_min, compare_point_min, compare_length_min/refer_length_min, 'same',refer_degree_min-compare_degree_min))
# pad_front = CompositeVideoClip([pad_front, cross_clip.crossfadeout(ONE_FRAME_SEC*CROSS_FRAME)])
# !!! 주석 끝
con_clips.append(pad_front)
### PAD BACK ---------------
pad_back = pad_clip.subclip(ONE_FRAME_SEC*ZOOM_FRAME,pad_clip.duration) # 그 바꿀 부분만 자르는 클립!
t = min(min_time, t + PADDED_TIME) # padding 된 시간 더하기
con_clips.append(pad_back)
else:
t = min(min_time, t + PADDED_TIME) # padding 된 시간 더하기
con_clips.append(pad_clip)
# 영상 다 붙이기!
final_clip = concatenate_videoclips(con_clips)
if audioclip !=None:
final_clip.audio = audioclip
final_clip.write_videofile("video.mp4")
return final_clip
start_time = time.time()
use_face_panelty = True # FacePanelty를 사용하면 Panelty값이 기본적으로 들어가니까 자연스러운 전환을 위해서는 역치값을 높여아 함
if use_face_panelty==True:
EYE_MIN_DIFF += PANELTY
TOTAL_MIN_DIFF += PANELTY
crosscut(videos_path="./video", option="norandom", use_face_panelty = False)
end_time = time.time()
print(end_time - start_time, 'total Generation time')
| 26,892 | -13 | 195 |
b2a0d8e3aef5256d756e6ea1b81ad2b8592f0ef9 | 2,196 | py | Python | cgi-bin/home.py | JamisHoo/Yagra | edcfe8ae6aadee152023c894bd0b8a0b23b9e5a9 | [
"MIT"
] | null | null | null | cgi-bin/home.py | JamisHoo/Yagra | edcfe8ae6aadee152023c894bd0b8a0b23b9e5a9 | [
"MIT"
] | null | null | null | cgi-bin/home.py | JamisHoo/Yagra | edcfe8ae6aadee152023c894bd0b8a0b23b9e5a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from collections import namedtuple
from common import config
from common.response import text_response, populate_html, redirect
import os
import cgi
import hashlib
import MySQLdb
import Cookie
try:
process_input()
except:
cgi.print_exception()
| 28.153846 | 79 | 0.676685 | #!/usr/bin/env python
from __future__ import print_function
from collections import namedtuple
from common import config
from common.response import text_response, populate_html, redirect
import os
import cgi
import hashlib
import MySQLdb
import Cookie
def process_input():
# Load email and password from cookie
cookie = Cookie.SimpleCookie(os.environ.get("HTTP_COOKIE"))
email = cookie["email"].value if "email" in cookie else None
password = cookie["password"].value if "password" in cookie else None
generate_output(email, password)
def generate_output(email, password):
if not email or not password:
print(redirect("signin.py"))
return
db_connection = MySQLdb.connect(
host=config.mysql_host, user=config.mysql_user,
passwd=config.mysql_password, db=config.mysql_db)
db_cursor = db_connection.cursor()
UserInformation = namedtuple(
"UserInformation",
"email, email_hash, salt, password_hash, random_password_hash, rating")
# Fetch user information from database
db_cursor.execute("""SELECT email, email_hash, salt, passwd_hash,
random_passwd_hash, rating
FROM users
WHERE email = %s""", (email,))
record = db_cursor.fetchone()
# Could not find this user
if not record:
print("Location: signin.py")
print()
return
user_info = UserInformation._make(record)
input_password_hash = hashlib.sha256(user_info.salt + password).digest()
# Wrong password
if (input_password_hash != user_info.password_hash and
input_password_hash != user_info.random_password_hash):
print(redirect("signin.py"))
return
# add r=x query to display images in all ratings
image_url = "{}?r=x".format(user_info.email_hash.encode("hex").upper())
rating = user_info.rating.upper() if user_info.rating else "G"
message_body = populate_html(
"home.html",
dict(email=email, image_url=image_url, rating=rating))
print(text_response("text/html", message_body))
try:
process_input()
except:
cgi.print_exception()
| 1,831 | 0 | 46 |
07448024f6f040dd89d7b7d6ee58c934ed48fba4 | 427 | py | Python | mokaplayer/ui/gtk/adapter/adapter_tab.py | vedard/MusicPlayer | cffc16ebb1372ad8916d62c4dc1215298eddc75d | [
"MIT"
] | 1 | 2017-10-05T14:30:17.000Z | 2017-10-05T14:30:17.000Z | mokaplayer/ui/gtk/adapter/adapter_tab.py | vedard/MusicPlayer | cffc16ebb1372ad8916d62c4dc1215298eddc75d | [
"MIT"
] | null | null | null | mokaplayer/ui/gtk/adapter/adapter_tab.py | vedard/MusicPlayer | cffc16ebb1372ad8916d62c4dc1215298eddc75d | [
"MIT"
] | null | null | null | from gi.repository import Gtk
| 19.409091 | 53 | 0.508197 | from gi.repository import Gtk
class AdapterTab:
@staticmethod
def create_row(tab):
return [
str(tab['type']),
tab['name'],
tab['rating'],
tab['votes'],
tab['url']
]
@staticmethod
def create_store():
return Gtk.ListStore(str, str, int, int, str)
@staticmethod
def create_col_number():
return [0, 1, 2, 3, 4]
| 243 | 130 | 23 |
91602bafca408a0b22428838102d8c066829afb5 | 11,657 | py | Python | setup.py | itsabhishekhere/scikit-learn | 8266583d99b5a30c5fc79c3fdad809cc5e8684bc | [
"BSD-3-Clause"
] | 1 | 2021-11-19T06:21:43.000Z | 2021-11-19T06:21:43.000Z | setup.py | Nisar-1234/scikit-learn | 1cd282d600088d2547d827af72a99e036106417a | [
"BSD-3-Clause"
] | 2 | 2021-04-13T12:48:43.000Z | 2021-04-13T16:17:58.000Z | setup.py | hurricane642/scikit-learn | 5c3cb6b0af04344d41d542b718d682604d6aa685 | [
"BSD-3-Clause"
] | 1 | 2021-11-19T06:21:34.000Z | 2021-11-19T06:21:34.000Z | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
import sys
import os
import platform
import shutil
# We need to import setuptools before because it monkey-patches distutils
import setuptools # noqa
from distutils.command.clean import clean as Clean
from distutils.command.sdist import sdist
import traceback
import importlib
try:
import builtins
except ImportError:
# Python 2 compat: just to be able to declare that Python >=3.7 is needed.
import __builtin__ as builtins
# This is a bit (!) hackish: we are setting a global variable so that the
# main sklearn __init__ can detect if it is being loaded by the setup
# routine, to avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to
# recursively build the compiled extensions in sub-packages is based on the
# Python import machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
DOWNLOAD_URL = 'https://pypi.org/project/scikit-learn/#files'
LICENSE = 'new BSD'
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/scikit-learn/scikit-learn/issues',
'Documentation': 'https://scikit-learn.org/stable/documentation.html',
'Source Code': 'https://github.com/scikit-learn/scikit-learn'
}
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
import sklearn._min_dependencies as min_deps # noqa
from sklearn.externals._packaging.version import parse as parse_version # noqa
VERSION = sklearn.__version__
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = {
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
}
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
key: min_deps.tag_to_packages[key] for
key in ['examples', 'docs', 'tests', 'benchmark']
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
cmdclass = {'clean': CleanCommand, 'sdist': sdist}
# Custom build_ext command to set OpenMP compile flags depending on os and
# compiler. Also makes it possible to set the parallelism level via
# and environment variable (useful for the wheel building CI).
# build_ext has to be imported after setuptools
try:
from numpy.distutils.command.build_ext import build_ext # noqa
cmdclass['build_ext'] = build_ext_subclass
except ImportError:
# Numpy should not be a dependency just to be able to introspect
# that python 3.7 is required.
pass
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def check_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
module = importlib.import_module(package)
package_version = module.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
req_str = "scikit-learn requires {} >= {}.\n".format(
package, min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if package_status['up_to_date'] is False:
if package_status['version']:
raise ImportError("Your installation of {} "
"{} is out-of-date.\n{}{}"
.format(package, package_status['version'],
req_str, instructions))
else:
raise ImportError("{} is not "
"installed.\n{}{}"
.format(package, req_str, instructions))
if __name__ == "__main__":
setup_package()
| 38.727575 | 79 | 0.611307 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
import sys
import os
import platform
import shutil
# We need to import setuptools before because it monkey-patches distutils
import setuptools # noqa
from distutils.command.clean import clean as Clean
from distutils.command.sdist import sdist
import traceback
import importlib
try:
import builtins
except ImportError:
# Python 2 compat: just to be able to declare that Python >=3.7 is needed.
import __builtin__ as builtins
# This is a bit (!) hackish: we are setting a global variable so that the
# main sklearn __init__ can detect if it is being loaded by the setup
# routine, to avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to
# recursively build the compiled extensions in sub-packages is based on the
# Python import machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
DOWNLOAD_URL = 'https://pypi.org/project/scikit-learn/#files'
LICENSE = 'new BSD'
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/scikit-learn/scikit-learn/issues',
'Documentation': 'https://scikit-learn.org/stable/documentation.html',
'Source Code': 'https://github.com/scikit-learn/scikit-learn'
}
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
import sklearn._min_dependencies as min_deps # noqa
from sklearn.externals._packaging.version import parse as parse_version # noqa
VERSION = sklearn.__version__
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = {
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
}
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
key: min_deps.tag_to_packages[key] for
key in ['examples', 'docs', 'tests', 'benchmark']
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand, 'sdist': sdist}
# Custom build_ext command to set OpenMP compile flags depending on os and
# compiler. Also makes it possible to set the parallelism level via
# and environment variable (useful for the wheel building CI).
# build_ext has to be imported after setuptools
try:
from numpy.distutils.command.build_ext import build_ext # noqa
class build_ext_subclass(build_ext):
def finalize_options(self):
super().finalize_options()
if self.parallel is None:
# Do not override self.parallel if already defined by
# command-line flag (--parallel or -j)
parallel = os.environ.get("SKLEARN_BUILD_PARALLEL")
if parallel:
self.parallel = int(parallel)
if self.parallel:
print("setting parallel=%d " % self.parallel)
def build_extensions(self):
from sklearn._build_utils.openmp_helpers import get_openmp_flag
if sklearn._OPENMP_SUPPORTED:
openmp_flag = get_openmp_flag(self.compiler)
for e in self.extensions:
e.extra_compile_args += openmp_flag
e.extra_link_args += openmp_flag
build_ext.build_extensions(self)
cmdclass['build_ext'] = build_ext_subclass
except ImportError:
# Numpy should not be a dependency just to be able to introspect
# that python 3.7 is required.
pass
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import _check_cython_version
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
# Cython is required by config.add_subpackage for templated extensions
# that need the tempita sub-submodule. So check that we have the correct
# version of Cython so as to be able to raise a more informative error
# message from the start if it's not the case.
_check_cython_version()
config.add_subpackage('sklearn')
return config
def check_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
module = importlib.import_module(package)
package_version = module.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
req_str = "scikit-learn requires {} >= {}.\n".format(
package, min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if package_status['up_to_date'] is False:
if package_status['version']:
raise ImportError("Your installation of {} "
"{} is out-of-date.\n{}{}"
.format(package, package_status['version'],
req_str, instructions))
else:
raise ImportError("{} is not "
"installed.\n{}{}"
.format(package, req_str, instructions))
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Development Status :: 5 - Production/Stable',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
('Programming Language :: Python :: '
'Implementation :: CPython'),
('Programming Language :: Python :: '
'Implementation :: PyPy')
],
cmdclass=cmdclass,
python_requires=">=3.7",
install_requires=min_deps.tag_to_packages['install'],
package_data={'': ['*.pxd']},
**extra_setuptools_args)
commands = [arg for arg in sys.argv[1:] if not arg.startswith('-')]
if all(command in ('egg_info', 'dist_info', 'clean', 'check')
for command in commands):
# These actions are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
# These commands use setup from setuptools
from setuptools import setup
metadata['version'] = VERSION
else:
if sys.version_info < (3, 6):
raise RuntimeError(
"Scikit-learn requires Python 3.7 or later. The current"
" Python version is %s installed in %s."
% (platform.python_version(), sys.executable))
check_package_status('numpy', min_deps.NUMPY_MIN_VERSION)
check_package_status('scipy', min_deps.SCIPY_MIN_VERSION)
# These commands require the setup from numpy.distutils because they
# may use numpy.distutils compiler classes.
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| 6,137 | 111 | 158 |
eeb66eb14ecea99317fc795c1d7fbfa89e6de230 | 4,107 | py | Python | floxcore/console.py | getflox/flox-core | 128b5f3272384e38881db8fb90c175ce8f44b904 | [
"MIT"
] | null | null | null | floxcore/console.py | getflox/flox-core | 128b5f3272384e38881db8fb90c175ce8f44b904 | [
"MIT"
] | null | null | null | floxcore/console.py | getflox/flox-core | 128b5f3272384e38881db8fb90c175ce8f44b904 | [
"MIT"
] | null | null | null | import textwrap
from functools import partial
import click
import tqdm
from wasabi import Printer, MESSAGES
from wasabi.util import ICONS
msg = Printer()
success = partial(msg.text, color=MESSAGES.GOOD, icon=MESSAGES.GOOD)
info = partial(msg.text, color=MESSAGES.INFO, icon=MESSAGES.INFO)
error = partial(msg.text, color=MESSAGES.FAIL, icon=MESSAGES.FAIL)
warning = partial(msg.text, color=MESSAGES.WARN, icon=MESSAGES.WARN)
error_box = partial(message_box, bg="red", icon=MESSAGES.FAIL)
info_box = partial(message_box, bg="blue", icon=MESSAGES.INFO)
warning_box = partial(message_box, bg="yellow", icon=MESSAGES.WARN)
success_box = partial(message_box, bg="green", icon=MESSAGES.GOOD)
| 38.745283 | 112 | 0.634526 | import textwrap
from functools import partial
import click
import tqdm
from wasabi import Printer, MESSAGES
from wasabi.util import ICONS
msg = Printer()
success = partial(msg.text, color=MESSAGES.GOOD, icon=MESSAGES.GOOD)
info = partial(msg.text, color=MESSAGES.INFO, icon=MESSAGES.INFO)
error = partial(msg.text, color=MESSAGES.FAIL, icon=MESSAGES.FAIL)
warning = partial(msg.text, color=MESSAGES.WARN, icon=MESSAGES.WARN)
def message_box(message, bg, icon, extra=None, file=None):
width = min(120, click.get_terminal_size()[0])
indent = " " * 2
wrap = partial(textwrap.fill, width=width - len(indent), subsequent_indent=indent, break_long_words=False,
break_on_hyphens=False, )
lines = [""]
lines += wrap(message, initial_indent=f"{indent}{ICONS.get(icon)} ").splitlines()
if extra:
lines += wrap(extra, initial_indent=indent, ).splitlines()
lines.append("")
click.echo("")
for i, line in enumerate(lines):
click.echo(" ", nl=False)
click.secho(f"{line}{indent}".ljust(width, " "), bg=bg, bold=i == 1, file=file)
click.echo("")
error_box = partial(message_box, bg="red", icon=MESSAGES.FAIL)
info_box = partial(message_box, bg="blue", icon=MESSAGES.INFO)
warning_box = partial(message_box, bg="yellow", icon=MESSAGES.WARN)
success_box = partial(message_box, bg="green", icon=MESSAGES.GOOD)
class Output:
def __init__(self, stages, *args, **kwargs):
self.stages = stages
self.context = None
self.printer = Printer()
def success(self, title="", text="", show=True, spaced=False, exits=None):
self.write(
self.printer.text(title=self.with_prefix(title), text=text, color=MESSAGES.GOOD, icon=MESSAGES.GOOD,
show=show, spaced=spaced, exits=exits, no_print=True)
)
def info(self, title="", text="", show=True, spaced=False, exits=None):
self.write(
self.printer.text(title=self.with_prefix(title), text=text, color=MESSAGES.INFO, icon=MESSAGES.INFO,
show=show, spaced=spaced, exits=exits, no_print=True)
)
def error(self, title="", text="", show=True, spaced=False, exits=None):
self.write(
self.printer.text(title=self.with_prefix(title), text=text, color=MESSAGES.FAIL, icon=MESSAGES.FAIL,
show=show, spaced=spaced, exits=exits, no_print=True)
)
def warning(self, title="", text="", show=True, spaced=False, exits=None):
self.write(
self.printer.text(title=self.with_prefix(title), text=text, color=MESSAGES.WARN, icon=MESSAGES.WARN,
show=show, spaced=spaced, exits=exits, no_print=True)
)
def set_description(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def write(self, text):
click.echo(text)
def set_context(self, context: str):
self.context = context
def line_prefix(self) -> str:
return f"[{self.context}] " if self.context else ""
def with_prefix(self, title) -> str:
return f"{self.line_prefix()} {title}"
def __iter__(self):
return iter(self.stages)
class tqdm(tqdm.tqdm, Output):
def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1,
maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False,
dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None, gui=False, **kwargs):
super().__init__(iterable, desc, total, leave, file, ncols, mininterval, maxinterval, miniters, ascii,
disable, unit, unit_scale, dynamic_ncols, smoothing,
bar_format or "{l_bar}{bar} | {n_fmt}/{total_fmt}", initial, position, postfix,
unit_divisor, write_bytes, lock_args, gui, **kwargs)
Output.__init__(self, [])
| 2,994 | 1 | 418 |
8a869da913e7e3c281811411cee3eca6a5841c95 | 2,811 | py | Python | nano/web/db.py | aga3134/VapaaCruiser | 76095296fe910b9b99edaaea2f96024b6ae65336 | [
"MIT"
] | 2 | 2021-01-26T13:26:05.000Z | 2021-08-05T08:04:49.000Z | nano/web/db.py | aga3134/VapaaCruiser | 76095296fe910b9b99edaaea2f96024b6ae65336 | [
"MIT"
] | null | null | null | nano/web/db.py | aga3134/VapaaCruiser | 76095296fe910b9b99edaaea2f96024b6ae65336 | [
"MIT"
] | null | null | null | import sqlite3
import json
import uuid | 33.070588 | 143 | 0.533618 | import sqlite3
import json
import uuid
class SqliteDB:
def __init__(self):
print(sqlite3.version)
self.conn = sqlite3.connect("vapaa_cruiser.db")
self.CreateTable()
def __del__(self):
self.conn.close()
def CreateTable(self):
c = self.conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS Setting
(userID TEXT PRIMARY KEY NOT NULL,
dataset TEXT,
apiKey TEXT);
''')
c.execute('''CREATE TABLE IF NOT EXISTS NavigationPath
(id TEXT PRIMARY KEY,
userID TEXT,
path TEXT);
''')
self.conn.commit()
def GetSetting(self, userID):
c = self.conn.cursor()
cmd = "SELECT userID, dataset,apiKey FROM Setting WHERE userID='%s';" % (userID)
result = c.execute(cmd).fetchone()
if result is None:
return None
else:
return {
"userID": result[0],
"dataset": result[1],
"apiKey": result[2]
}
def UpdateSetting(self, data):
found = self.GetSetting(data["userID"])
c = self.conn.cursor()
if found == None:
cmd = '''INSERT INTO Setting (userID, dataset,apiKey) VALUES('%s', '%s','%s');''' % (data["userID"],data["dataset"],data["apiKey"])
else:
cmd = "UPDATE Setting SET dataset='%s', apiKey='%s' WHERE userID='%s';" % (data["dataset"],data["apiKey"],data["userID"])
c.execute(cmd)
self.conn.commit()
def CreateNavigationPath(self,userID,path):
c = self.conn.cursor()
id = str(uuid.uuid4())
path = json.loads(path)
path["id"] = id
cmd = '''INSERT INTO NavigationPath (id, userID, path) VALUES('%s','%s','%s');''' % (id,userID,json.dumps(path))
c.execute(cmd)
self.conn.commit()
def EditNavigationPath(self,userID,path):
c = self.conn.cursor()
path = json.loads(path)
cmd = "UPDATE NavigationPath SET path='%s' WHERE userID='%s' AND id='%s';" % (json.dumps(path),userID,path["id"])
c.execute(cmd)
self.conn.commit()
def ListNavigationPath(self,userID):
c = self.conn.cursor()
cmd = "SELECT * FROM NavigationPath WHERE userID='%s';" % (userID)
result = c.execute(cmd).fetchall()
arr = []
for row in result:
p = {
"id": row[0],
"userID": row[1],
"path": json.loads(row[2])
}
arr.append(p)
return arr
def DeleteNavigationPath(self,userID,pathID):
c = self.conn.cursor()
cmd = "DELETE from NavigationPath WHERE userID='%s' AND id='%s';" % (userID,pathID)
c.execute(cmd)
self.conn.commit() | 2,514 | -6 | 265 |
fb7e182e2a4c39b319dd385379a6767ba86954ae | 1,392 | py | Python | py/2015/19A.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | py/2015/19A.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | py/2015/19A.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | s = 'ORnPBPMgArCaCaCaSiThCaCaSiThCaCaPBSiRnFArRnFArCaCaSiThCaCaSiThCaCaCaCaCaCaSiRnFYFArSiRnMgArCaSiRnPTiTiBFYPBFArSiRnCaSiRnTiRnFArSiAlArPTiBPTiRnCaSiAlArCaPTiTiBPMgYFArPTiRnFArSiRnCaCaFArRnCaFArCaSiRnSiRnMgArFYCaSiRnMgArCaCaSiThPRnFArPBCaSiRnMgArCaCaSiThCaSiRnTiMgArFArSiThSiThCaCaSiRnMgArCaCaSiRnFArTiBPTiRnCaSiAlArCaPTiRnFArPBPBCaCaSiThCaPBSiThPRnFArSiThCaSiThCaSiThCaPTiBSiRnFYFArCaCaPRnFArPBCaCaPBSiRnTiRnFArCaPRnFArSiRnCaCaCaSiThCaRnCaFArYCaSiRnFArBCaCaCaSiThFArPBFArCaSiRnFArRnCaCaCaFArSiRnFArTiRnPMgArF'
cmds = """Al => ThF
Al => ThRnFAr
B => BCa
B => TiB
B => TiRnFAr
Ca => CaCa
Ca => PB
Ca => PRnFAr
Ca => SiRnFYFAr
Ca => SiRnMgAr
Ca => SiTh
F => CaF
F => PMg
F => SiAl
H => CRnAlAr
H => CRnFYFYFAr
H => CRnFYMgAr
H => CRnMgYFAr
H => HCa
H => NRnFYFAr
H => NRnMgAr
H => NTh
H => OB
H => ORnFAr
Mg => BF
Mg => TiMg
N => CRnFAr
N => HSi
O => CRnFYFAr
O => CRnMgAr
O => HP
O => NRnFAr
O => OTi
P => CaP
P => PTi
P => SiRnFAr
Si => CaSi
Th => ThCa
Ti => BP
Ti => TiTi
e => HF
e => NAl
e => OMg"""
import re, copy
t = [n.split('=>') for n in cmds.replace(' ','').split('\n')]
conv = dict()
for name, value in t:
if name not in conv:
conv[name] = [value]
else:
conv[name].append(value)
final = set()
for name in conv:
index = [m.start() for m in list(re.finditer(name, s))]
for b in conv[name]:
for i in index:
final.add(s[:i] + b + s[i+len(name):])
print len(final) | 21.090909 | 512 | 0.721983 | s = 'ORnPBPMgArCaCaCaSiThCaCaSiThCaCaPBSiRnFArRnFArCaCaSiThCaCaSiThCaCaCaCaCaCaSiRnFYFArSiRnMgArCaSiRnPTiTiBFYPBFArSiRnCaSiRnTiRnFArSiAlArPTiBPTiRnCaSiAlArCaPTiTiBPMgYFArPTiRnFArSiRnCaCaFArRnCaFArCaSiRnSiRnMgArFYCaSiRnMgArCaCaSiThPRnFArPBCaSiRnMgArCaCaSiThCaSiRnTiMgArFArSiThSiThCaCaSiRnMgArCaCaSiRnFArTiBPTiRnCaSiAlArCaPTiRnFArPBPBCaCaSiThCaPBSiThPRnFArSiThCaSiThCaSiThCaPTiBSiRnFYFArCaCaPRnFArPBCaCaPBSiRnTiRnFArCaPRnFArSiRnCaCaCaSiThCaRnCaFArYCaSiRnFArBCaCaCaSiThFArPBFArCaSiRnFArRnCaCaCaFArSiRnFArTiRnPMgArF'
cmds = """Al => ThF
Al => ThRnFAr
B => BCa
B => TiB
B => TiRnFAr
Ca => CaCa
Ca => PB
Ca => PRnFAr
Ca => SiRnFYFAr
Ca => SiRnMgAr
Ca => SiTh
F => CaF
F => PMg
F => SiAl
H => CRnAlAr
H => CRnFYFYFAr
H => CRnFYMgAr
H => CRnMgYFAr
H => HCa
H => NRnFYFAr
H => NRnMgAr
H => NTh
H => OB
H => ORnFAr
Mg => BF
Mg => TiMg
N => CRnFAr
N => HSi
O => CRnFYFAr
O => CRnMgAr
O => HP
O => NRnFAr
O => OTi
P => CaP
P => PTi
P => SiRnFAr
Si => CaSi
Th => ThCa
Ti => BP
Ti => TiTi
e => HF
e => NAl
e => OMg"""
import re, copy
t = [n.split('=>') for n in cmds.replace(' ','').split('\n')]
conv = dict()
for name, value in t:
if name not in conv:
conv[name] = [value]
else:
conv[name].append(value)
final = set()
for name in conv:
index = [m.start() for m in list(re.finditer(name, s))]
for b in conv[name]:
for i in index:
final.add(s[:i] + b + s[i+len(name):])
print len(final) | 0 | 0 | 0 |
6f80e39d00765ee017d888d8aaa001d690eeadcf | 3,403 | py | Python | API_caller_example.py | TBFY/crosslinguality | d20c2b7fef02be923a76e471bd27262252bd3aa2 | [
"Apache-2.0"
] | null | null | null | API_caller_example.py | TBFY/crosslinguality | d20c2b7fef02be923a76e471bd27262252bd3aa2 | [
"Apache-2.0"
] | null | null | null | API_caller_example.py | TBFY/crosslinguality | d20c2b7fef02be923a76e471bd27262252bd3aa2 | [
"Apache-2.0"
] | 2 | 2018-06-15T09:18:06.000Z | 2019-11-14T15:00:20.000Z | import urllib.parse, urllib.request, json
CompareDocs("""
All human beings are born free and equal in dignity and rights.
They are endowed with reason and conscience and should act towards
one another in a spirit of brotherhood.
Everyone is entitled to all the rights and freedoms set forth in
this Declaration, without distinction of any kind, such as race,
colour, sex, language, religion, political or other opinion,
national or social origin, property, birth or other status.
Furthermore, no distinction shall be made on the basis of the
political, jurisdictional or international status of the country
or territory to which a person belongs, whether it be independent,
trust, non-self-governing or under any other limitation of
sovereignty.""",
"en",
"""
Alle Menschen sind frei und gleich an W\u00fcrde und Rechten geboren.
Sie sind mit Vernunft und Gewissen begabt und sollen einander im
Geist der Br\u00fcderlichkeit begegnen.
Jeder hat Anspruch auf die in dieser Erkl\u00e4rung verk\u00fcndeten Rechte
und Freiheiten ohne irgendeinen Unterschied, etwa nach Rasse,
Hautfarbe, Geschlecht, Sprache, Religion, politischer oder sonstiger
\u00dcberzeugung, nationaler oder sozialer Herkunft, Verm\u00f6gen, Geburt
oder sonstigem Stand.
Des weiteren darf kein Unterschied gemacht werden auf Grund der
politischen, rechtlichen oder internationalen Stellung des Landes
oder Gebiets, dem eine Person angeh\u00f6rt, gleichg\u00fcltig ob dieses
unabh\u00e4ngig ist, unter Treuhandschaft steht, keine Selbstregierung
besitzt oder sonst in seiner Souver\u00e4nit\u00e4t eingeschr\u00e4nkt ist.
""",
"de")
| 52.353846 | 83 | 0.693212 | import urllib.parse, urllib.request, json
def CompareDocs(text1, lang1, text2, lang2):
# Prepare the request.
data = urllib.parse.urlencode([
("doc1", text1), ("lang1", lang1),
("doc2", text2), ("lang2", lang2),
# ("azureKey", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), # insert your key here
])
url = "http://www.wikifier.org/compare-docs"
# Call the server and read the response.
req = urllib.request.Request(url, data=data.encode("utf8"), method="POST")
with urllib.request.urlopen(req, timeout = 60) as f:
response = f.read()
#g = open("response.txt", "wb"); g.write(response); g.close()
response = json.loads(response.decode("utf8"))
# Output the results.
print("Similarity based on Wikifier annotations:")
print(" - Cosine measure: %g" % response["wikiCosine"])
print(" - Intersection: %d" % response["wikiIntersection"])
print(" - Jaccard measure: %g" % response["wikiJaccard"])
print("Similarity based on CCA projections:")
print(" - Cosine measure: %g" % response["ccaCosine"])
if "translationCosineBinSw" in response:
print("Similarity based on translations into English:")
print(" - Cosine measure over binary vectors, stopwords removed: %g" %
response["translationCosineBinNoSw"])
print(" - Cosine measure over binary vectors, stopwords kept: %g" %
response["translationCosineBinSw"])
print(" - Cosine measure over TF vectors, stopwords removed: %g" %
response["translationCosineTfNoSw"])
print(" - Cosine measure over TF vectors, stopwords kept: %g" %
response["translationCosineTfSw"])
CompareDocs("""
All human beings are born free and equal in dignity and rights.
They are endowed with reason and conscience and should act towards
one another in a spirit of brotherhood.
Everyone is entitled to all the rights and freedoms set forth in
this Declaration, without distinction of any kind, such as race,
colour, sex, language, religion, political or other opinion,
national or social origin, property, birth or other status.
Furthermore, no distinction shall be made on the basis of the
political, jurisdictional or international status of the country
or territory to which a person belongs, whether it be independent,
trust, non-self-governing or under any other limitation of
sovereignty.""",
"en",
"""
Alle Menschen sind frei und gleich an W\u00fcrde und Rechten geboren.
Sie sind mit Vernunft und Gewissen begabt und sollen einander im
Geist der Br\u00fcderlichkeit begegnen.
Jeder hat Anspruch auf die in dieser Erkl\u00e4rung verk\u00fcndeten Rechte
und Freiheiten ohne irgendeinen Unterschied, etwa nach Rasse,
Hautfarbe, Geschlecht, Sprache, Religion, politischer oder sonstiger
\u00dcberzeugung, nationaler oder sozialer Herkunft, Verm\u00f6gen, Geburt
oder sonstigem Stand.
Des weiteren darf kein Unterschied gemacht werden auf Grund der
politischen, rechtlichen oder internationalen Stellung des Landes
oder Gebiets, dem eine Person angeh\u00f6rt, gleichg\u00fcltig ob dieses
unabh\u00e4ngig ist, unter Treuhandschaft steht, keine Selbstregierung
besitzt oder sonst in seiner Souver\u00e4nit\u00e4t eingeschr\u00e4nkt ist.
""",
"de")
| 1,642 | 0 | 23 |
8b0c5ade6ad28d3803409c5148c21b326eb01e4f | 373 | py | Python | integration-test/1406-include-all-name-variants.py | nextzen/vector-datasource | f11700f232a3a6251915579106ff07b2bee25d12 | [
"MIT"
] | 1 | 2018-01-03T21:26:27.000Z | 2018-01-03T21:26:27.000Z | integration-test/1406-include-all-name-variants.py | nextzen/vector-datasource | f11700f232a3a6251915579106ff07b2bee25d12 | [
"MIT"
] | null | null | null | integration-test/1406-include-all-name-variants.py | nextzen/vector-datasource | f11700f232a3a6251915579106ff07b2bee25d12 | [
"MIT"
] | 1 | 2019-06-19T19:14:42.000Z | 2019-06-19T19:14:42.000Z | from . import FixtureTest
| 24.866667 | 58 | 0.581769 | from . import FixtureTest
class IncludeAllNameVariants(FixtureTest):
def test_duplicate_names(self):
self.load_fixtures([
'http://www.openstreetmap.org/node/206270454',
])
self.assert_has_feature(
15, 18199, 11103, 'pois',
{'id': 206270454, 'kind': 'station',
'name': None, 'name:pl': None})
| 275 | 21 | 50 |
774456242ae119b2dede055522353259d90fdb74 | 8,792 | py | Python | hw/ip/otbn/util/rig/gens/straight_line_insn.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/util/rig/gens/straight_line_insn.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | 1 | 2020-10-30T06:30:51.000Z | 2020-10-30T06:30:51.000Z | hw/ip/otbn/util/rig/gens/straight_line_insn.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | 1 | 2019-12-13T00:52:40.000Z | 2019-12-13T00:52:40.000Z | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from shared.lsu_desc import LSUDesc
from shared.operand import ImmOperandType, RegOperandType
from ..program import ProgInsn, Program
from ..model import Model
from ..snippet import Snippet
from ..snippet_gen import SnippetGen
class StraightLineInsn(SnippetGen):
'''A super-simple snippet consisting of a single instruction'''
def fill_insn(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Try to fill out an instruction
This might fail if, for example, the model doesn't have enough
registers with architectural values. In that case, return None.
'''
# If this is not an LSU operation, or it is an LSU operation that
# operates on CSR/WSRs, we can pick operands independently.
if insn.lsu is None:
# For each operand, pick a value that's allowed by the model (i.e.
# one that won't trigger any undefined behaviour)
op_vals = []
for operand in insn.operands:
op_val = model.pick_operand_value(operand.op_type)
if op_val is None:
return None
op_vals.append(op_val)
assert len(op_vals) == len(insn.operands)
return ProgInsn(insn, op_vals, None)
# If this is an LSU operation, then the target address is given by the
# sum of one or more operands. For each of these operands with a
# register type, we are going to need to look in the model to figure
# out the list of different known values we can give it. At the moment,
# we only support the case when there is at most one non-register
# operand, which must be an immediate. Grab that operand's name too.
lsu_imm_op = None
lsu_reg_ops = []
lsu_reg_types = set()
imm_op_min = 0
imm_op_max = 0
for tgt_op_name in insn.lsu.target:
tgt_op = insn.name_to_operand[tgt_op_name]
if isinstance(tgt_op.op_type, ImmOperandType):
if lsu_imm_op is not None:
raise RuntimeError('Multiple immediate operands '
'contribute to target for instruction '
'{!r}. Not currently supported.'
.format(insn.mnemonic))
lsu_imm_op = tgt_op_name
imm_op_range = tgt_op.op_type.get_op_val_range(model.pc)
if imm_op_range is None:
assert tgt_op.op_type.width is None
raise RuntimeError('The {!r} immediate operand for the '
'{!r} instruction contributes to its '
'LSU target but has no width.'
.format(tgt_op_name, insn.mnemonic))
imm_op_min, imm_op_max = imm_op_range
continue
if isinstance(tgt_op.op_type, RegOperandType):
reg_type = tgt_op.op_type.reg_type
lsu_reg_ops.append((tgt_op_name, reg_type))
lsu_reg_types.add(reg_type)
continue
raise RuntimeError('Unknown operand type for {!r} operand of '
'{!r} instruction: {}.'
.format(tgt_op_name, insn.mnemonic,
type(tgt_op.op_type).__name__))
# We have a list of register operands, together with their types. Get a
# list of registers with known values for each register type we've seen.
known_regs_by_type = {rtype: model.regs_with_known_vals(rtype)
for rtype in lsu_reg_types}
# And turn that into a dict keyed by operand name
op_to_known_regs = {op_name: known_regs_by_type[op_type]
for op_name, op_type in lsu_reg_ops}
# Ask the model to try to find a target we can use. If this is a load
# or a CSR operation, it will have to be an address that already has an
# architectural value. If a store, it can be any address in range.
lsu_type_to_info = {
'mem-load': ('dmem', True),
'mem-store': ('dmem', False),
'csr': ('csr', True),
'wsr': ('wsr', True)
}
assert set(lsu_type_to_info.keys()) == set(LSUDesc.TYPES)
mem_type, loads_value = lsu_type_to_info[insn.lsu.lsu_type]
tgt = model.pick_lsu_target(mem_type,
loads_value,
op_to_known_regs,
imm_op_min,
imm_op_max,
insn.lsu.idx_width)
if tgt is None:
return None
addr, imm_val, reg_indices = tgt
assert imm_op_min <= imm_val <= imm_op_max
enc_vals = []
for operand in insn.operands:
# Is this the immediate? If the immediate operand is signed then
# note that imm_op_min < 0 and we might have that imm_val < 0.
# However, we store everything as an enc_val, so we have to
# "re-encode" here.
if operand.name == lsu_imm_op:
enc_val = operand.op_type.op_val_to_enc_val(imm_val, model.pc)
assert enc_val is not None
enc_vals.append(enc_val)
continue
# Or is it a register operand contributing to the target address?
reg_val = reg_indices.get(operand.name)
if reg_val is not None:
enc_vals.append(reg_val)
continue
# Otherwise it's some other operand. Pick any old value.
val = model.pick_operand_value(operand.op_type)
if val is None:
return None
enc_vals.append(val)
assert len(enc_vals) == len(insn.operands)
return ProgInsn(insn, enc_vals, (mem_type, addr))
| 41.866667 | 80 | 0.578253 | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from shared.lsu_desc import LSUDesc
from shared.operand import ImmOperandType, RegOperandType
from ..program import ProgInsn, Program
from ..model import Model
from ..snippet import Snippet
from ..snippet_gen import SnippetGen
class StraightLineInsn(SnippetGen):
'''A super-simple snippet consisting of a single instruction'''
def __init__(self, insns_file: InsnsFile) -> None:
# Find all the straight line, non-pseudo instructions in insns_file
self.insns = []
for insn in insns_file.insns:
# Skip pseudo-ops
if insn.python_pseudo_op or insn.literal_pseudo_op:
continue
# Skip anything that isn't straight-line
if not insn.straight_line:
continue
# Skip bn.sid, bn.lid and bn.movr: These are indirect and we don't
# currently track their sources properly (e.g. "bn.movr x2, x3"
# reads from the WDR whose index is whatever is currently in x3)
if insn.mnemonic in ['bn.sid', 'bn.lid', 'bn.movr']:
continue
self.insns.append(insn)
def gen(self,
size: int,
model: Model,
program: Program) -> Optional[Tuple[Snippet, bool, int]]:
# Return None if this is the last instruction in the current gap
# because we need to either jump or do an ECALL to avoid getting stuck.
#
# Note that we could do this by defining pick_weight, but we don't
# expect it to happen very often so it's probably best (and cheaper)
# just to disable ourselves on the rare occasions when it does.
if program.get_insn_space_at(model.pc) <= 1:
return None
# Pick a (YAML) instruction at random. We'll probably do some clever
# weighting here later on but, for now, we'll pick uniformly at the
# start.
weights = [1.0] * len(self.insns)
prog_insn = None
while prog_insn is None:
idx = random.choices(range(len(self.insns)), weights=weights)[0]
# Sanity check to make sure some weight was positive
assert weights[idx] > 0
# Try to fill out the instruction. On failure, clear the weight for
# this index and go around again.
prog_insn = self.fill_insn(self.insns[idx], model)
if prog_insn is None:
weights[idx] = 0
continue
# Success! We have generated an instruction. Put it in a snippet and
# add that to the program
snippet = Snippet([(model.pc, [prog_insn])])
snippet.insert_into_program(program)
# Then update the model with the instruction and update the model PC
model.update_for_insn(prog_insn)
model.pc += 4
return (snippet, False, size - 1)
def fill_insn(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Try to fill out an instruction
This might fail if, for example, the model doesn't have enough
registers with architectural values. In that case, return None.
'''
# If this is not an LSU operation, or it is an LSU operation that
# operates on CSR/WSRs, we can pick operands independently.
if insn.lsu is None:
# For each operand, pick a value that's allowed by the model (i.e.
# one that won't trigger any undefined behaviour)
op_vals = []
for operand in insn.operands:
op_val = model.pick_operand_value(operand.op_type)
if op_val is None:
return None
op_vals.append(op_val)
assert len(op_vals) == len(insn.operands)
return ProgInsn(insn, op_vals, None)
# If this is an LSU operation, then the target address is given by the
# sum of one or more operands. For each of these operands with a
# register type, we are going to need to look in the model to figure
# out the list of different known values we can give it. At the moment,
# we only support the case when there is at most one non-register
# operand, which must be an immediate. Grab that operand's name too.
lsu_imm_op = None
lsu_reg_ops = []
lsu_reg_types = set()
imm_op_min = 0
imm_op_max = 0
for tgt_op_name in insn.lsu.target:
tgt_op = insn.name_to_operand[tgt_op_name]
if isinstance(tgt_op.op_type, ImmOperandType):
if lsu_imm_op is not None:
raise RuntimeError('Multiple immediate operands '
'contribute to target for instruction '
'{!r}. Not currently supported.'
.format(insn.mnemonic))
lsu_imm_op = tgt_op_name
imm_op_range = tgt_op.op_type.get_op_val_range(model.pc)
if imm_op_range is None:
assert tgt_op.op_type.width is None
raise RuntimeError('The {!r} immediate operand for the '
'{!r} instruction contributes to its '
'LSU target but has no width.'
.format(tgt_op_name, insn.mnemonic))
imm_op_min, imm_op_max = imm_op_range
continue
if isinstance(tgt_op.op_type, RegOperandType):
reg_type = tgt_op.op_type.reg_type
lsu_reg_ops.append((tgt_op_name, reg_type))
lsu_reg_types.add(reg_type)
continue
raise RuntimeError('Unknown operand type for {!r} operand of '
'{!r} instruction: {}.'
.format(tgt_op_name, insn.mnemonic,
type(tgt_op.op_type).__name__))
# We have a list of register operands, together with their types. Get a
# list of registers with known values for each register type we've seen.
known_regs_by_type = {rtype: model.regs_with_known_vals(rtype)
for rtype in lsu_reg_types}
# And turn that into a dict keyed by operand name
op_to_known_regs = {op_name: known_regs_by_type[op_type]
for op_name, op_type in lsu_reg_ops}
# Ask the model to try to find a target we can use. If this is a load
# or a CSR operation, it will have to be an address that already has an
# architectural value. If a store, it can be any address in range.
lsu_type_to_info = {
'mem-load': ('dmem', True),
'mem-store': ('dmem', False),
'csr': ('csr', True),
'wsr': ('wsr', True)
}
assert set(lsu_type_to_info.keys()) == set(LSUDesc.TYPES)
mem_type, loads_value = lsu_type_to_info[insn.lsu.lsu_type]
tgt = model.pick_lsu_target(mem_type,
loads_value,
op_to_known_regs,
imm_op_min,
imm_op_max,
insn.lsu.idx_width)
if tgt is None:
return None
addr, imm_val, reg_indices = tgt
assert imm_op_min <= imm_val <= imm_op_max
enc_vals = []
for operand in insn.operands:
# Is this the immediate? If the immediate operand is signed then
# note that imm_op_min < 0 and we might have that imm_val < 0.
# However, we store everything as an enc_val, so we have to
# "re-encode" here.
if operand.name == lsu_imm_op:
enc_val = operand.op_type.op_val_to_enc_val(imm_val, model.pc)
assert enc_val is not None
enc_vals.append(enc_val)
continue
# Or is it a register operand contributing to the target address?
reg_val = reg_indices.get(operand.name)
if reg_val is not None:
enc_vals.append(reg_val)
continue
# Otherwise it's some other operand. Pick any old value.
val = model.pick_operand_value(operand.op_type)
if val is None:
return None
enc_vals.append(val)
assert len(enc_vals) == len(insn.operands)
return ProgInsn(insn, enc_vals, (mem_type, addr))
| 2,466 | 0 | 53 |
8c645ea7eea7f9d79701357200e3200adcd89283 | 850 | py | Python | shortit/shortener/tests/test_views.py | m7salam/shortit | c575acb4e8b447ac62abdf899063357f1569e93d | [
"MIT"
] | null | null | null | shortit/shortener/tests/test_views.py | m7salam/shortit | c575acb4e8b447ac62abdf899063357f1569e93d | [
"MIT"
] | 2 | 2022-03-01T00:07:15.000Z | 2022-03-02T00:17:58.000Z | shortit/shortener/tests/test_views.py | m7salam/shortit | c575acb4e8b447ac62abdf899063357f1569e93d | [
"MIT"
] | null | null | null | # from django.http import request
# import pytest
# from django.contrib.auth.models import AnonymousUser
# from django.http.response import Http404, HttpResponse
# from django.shortcuts import get_object_or_404
# from django.test import RequestFactory
# from shortit.shortener.models import ShortUrl
# from shortit.shortener.views import short_url_redirect_view
# from shortit.shortener.tests.factories import UrlFactory
# pytestmark = pytest.mark.django_db
# class TestShortUrlRedirectVieww:
# def test_get_redirect_url(self, short_url: ShortUrl, rf: RequestFactory):
# request = rf.get("/fake-url")
# view = short_url_redirect_view(request)
# obj = get_object_or_404(short_url, shortcode=shortcode)
# = short_url
# view.request = request
# assert HttpResponse == f"{obj.url}/"
| 29.310345 | 79 | 0.734118 | # from django.http import request
# import pytest
# from django.contrib.auth.models import AnonymousUser
# from django.http.response import Http404, HttpResponse
# from django.shortcuts import get_object_or_404
# from django.test import RequestFactory
# from shortit.shortener.models import ShortUrl
# from shortit.shortener.views import short_url_redirect_view
# from shortit.shortener.tests.factories import UrlFactory
# pytestmark = pytest.mark.django_db
# class TestShortUrlRedirectVieww:
# def test_get_redirect_url(self, short_url: ShortUrl, rf: RequestFactory):
# request = rf.get("/fake-url")
# view = short_url_redirect_view(request)
# obj = get_object_or_404(short_url, shortcode=shortcode)
# = short_url
# view.request = request
# assert HttpResponse == f"{obj.url}/"
| 0 | 0 | 0 |
1f9a2fdba7ba7f61e0c08e6682726a0a538ad878 | 341 | py | Python | avalon/compiler/__init__.py | nehz/avalon | 1c53d4c1e115a8c31b7170cf3948c870a33e4e56 | [
"MIT"
] | null | null | null | avalon/compiler/__init__.py | nehz/avalon | 1c53d4c1e115a8c31b7170cf3948c870a33e4e56 | [
"MIT"
] | 2 | 2015-10-09T19:21:07.000Z | 2019-08-03T13:50:51.000Z | avalon/compiler/__init__.py | nehz/avalon | 1c53d4c1e115a8c31b7170cf3948c870a33e4e56 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#==============================================================================
# Copyright: Hybrid Labs
# Licence: See LICENSE
#==============================================================================
"""
Python to Javascript compiler
"""
from .compiler import js_compile, runtime, JSCode
| 28.416667 | 80 | 0.331378 | # -*- coding: utf-8 -*-
#==============================================================================
# Copyright: Hybrid Labs
# Licence: See LICENSE
#==============================================================================
"""
Python to Javascript compiler
"""
from .compiler import js_compile, runtime, JSCode
| 0 | 0 | 0 |
3f1cf2a14b0debd5d624d0e3fb67080d5b250030 | 357 | py | Python | conans/server/rest/controllers/v2/__init__.py | laundry-96/conan | fd938f7220ca042d94c42ec5eb607ee69c6785a3 | [
"MIT"
] | 6,205 | 2015-12-01T13:40:05.000Z | 2022-03-31T07:30:25.000Z | conans/server/rest/controllers/v2/__init__.py | laundry-96/conan | fd938f7220ca042d94c42ec5eb607ee69c6785a3 | [
"MIT"
] | 8,747 | 2015-12-01T16:28:48.000Z | 2022-03-31T23:34:53.000Z | conans/server/rest/controllers/v2/__init__.py | laundry-96/conan | fd938f7220ca042d94c42ec5eb607ee69c6785a3 | [
"MIT"
] | 961 | 2015-12-01T16:56:43.000Z | 2022-03-31T13:50:52.000Z | from conans.model.ref import ConanFileReference, PackageReference
| 44.625 | 88 | 0.778711 | from conans.model.ref import ConanFileReference, PackageReference
def get_package_ref(name, version, username, channel, package_id, revision, p_revision):
ref = ConanFileReference(name, version, username, channel, revision)
package_id = "%s#%s" % (package_id, p_revision) if p_revision else package_id
return PackageReference(ref, package_id)
| 267 | 0 | 23 |
f7c1a9a48fbd29d1dce14cb6d7a9d838bab9f312 | 202 | py | Python | foe_pool.py | QwerTech/foe-automation | 9978cd365097a2c9ebec9039642c4e5f6c361018 | [
"MIT"
] | null | null | null | foe_pool.py | QwerTech/foe-automation | 9978cd365097a2c9ebec9039642c4e5f6c361018 | [
"MIT"
] | 3 | 2021-09-08T02:13:20.000Z | 2022-03-12T00:36:40.000Z | foe_pool.py | QwerTech/foe-automation | 9978cd365097a2c9ebec9039642c4e5f6c361018 | [
"MIT"
] | null | null | null | import multiprocessing
pool = None
| 15.538462 | 65 | 0.722772 | import multiprocessing
pool = None
def initPool():
global pool
pool = multiprocessing.Pool(int(multiprocessing.cpu_count()))
def execInPool(func, params):
return pool.map(func, params)
| 118 | 0 | 46 |
422fb245184cf85a029ccd26bd2b6fcba9c6b6b6 | 4,569 | py | Python | scope/__init__.py | FlorianLudwig/scope | 013b7010a55cf7d377abdaf75cea882f984f02d8 | [
"Apache-2.0"
] | null | null | null | scope/__init__.py | FlorianLudwig/scope | 013b7010a55cf7d377abdaf75cea882f984f02d8 | [
"Apache-2.0"
] | null | null | null | scope/__init__.py | FlorianLudwig/scope | 013b7010a55cf7d377abdaf75cea882f984f02d8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Florian Ludwig
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import sys
import contextlib
import functools
import inspect
from tornado import stack_context, gen
NOT_PROVIDED = object()
SCOPE_CHAIN = None
class Scope(dict):
"""
"""
def get(self, key, default=NOT_PROVIDED, scopes=None):
"""
:param str key:
:param default:
:param list[Scope] scopes:
:param str prefix:
:return: :raise IndexError:
"""
if scopes is None:
if SCOPE_CHAIN:
scopes = list(reversed(SCOPE_CHAIN))
else:
scopes = [self]
if key == 'scope':
return self
for i, scope in enumerate(scopes):
if key in scope:
return scope[key]
elif key in scope._provider:
scope[key] = scope._provider[key]()
del scope._provider[key]
return scope[key]
elif key in scope._subscopes:
return SubScopeView(key, scopes)
if default is not NOT_PROVIDED:
return default
msg = 'No value for "{}" stored and no default given'.format(key)
raise IndexError(msg)
@contextlib.contextmanager
| 28.55625 | 85 | 0.606041 | # Copyright 2015 Florian Ludwig
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import sys
import contextlib
import functools
import inspect
from tornado import stack_context, gen
NOT_PROVIDED = object()
SCOPE_CHAIN = None
class OutsideScopeError(Exception):
pass
class Scope(dict):
"""
"""
def __init__(self, name=None):
super(Scope, self).__init__()
self._provider = {}
self._subscopes = {}
self.name = name
def provider(self, key, provider):
self._provider[key] = provider
def subscope(self, key):
if not key in self._subscopes:
name = '{}.{}'.format(self.name, key)
subscope = SubScope(name, self)
self._subscopes[key] = subscope
return self._subscopes[key]
def get(self, key, default=NOT_PROVIDED, scopes=None):
"""
:param str key:
:param default:
:param list[Scope] scopes:
:param str prefix:
:return: :raise IndexError:
"""
if scopes is None:
if SCOPE_CHAIN:
scopes = list(reversed(SCOPE_CHAIN))
else:
scopes = [self]
if key == 'scope':
return self
for i, scope in enumerate(scopes):
if key in scope:
return scope[key]
elif key in scope._provider:
scope[key] = scope._provider[key]()
del scope._provider[key]
return scope[key]
elif key in scope._subscopes:
return SubScopeView(key, scopes)
if default is not NOT_PROVIDED:
return default
msg = 'No value for "{}" stored and no default given'.format(key)
raise IndexError(msg)
def __call__(self):
return stack_context.StackContext(functools.partial(set_context, self))
class SubScope(Scope):
def __init__(self, name, parent):
self.parent = parent
super(SubScope, self).__init__(name)
class SubScopeView(object):
def __init__(self, key, scope_chain):
self.key = key
self.scope_chain = scope_chain
def __getitem__(self, item):
for scope in self.scope_chain:
if self.key in scope._subscopes:
if item in scope._subscopes[self.key]:
return scope._subscopes[self.key][item]
raise IndexError()
def __eq__(self, other):
return (
isinstance(other, SubScopeView)
and self.key == other.key
and self.scope_chain == other.scope_chain
)
@contextlib.contextmanager
def set_context(scope):
global SCOPE_CHAIN
if SCOPE_CHAIN is None:
SCOPE_CHAIN = []
SCOPE_CHAIN.append(scope)
try:
yield
finally:
# TODO write unit test to get current_scope to be None
SCOPE_CHAIN.pop()
def get_current_scope():
return SCOPE_CHAIN[-1] if SCOPE_CHAIN else None
def get(key, default=NOT_PROVIDED):
if not SCOPE_CHAIN:
raise OutsideScopeError()
return SCOPE_CHAIN[-1].get(key, default, list(reversed(SCOPE_CHAIN)))
def inject(fn):
fn_inspect = getattr(fn, '_rw_wrapped_function', fn)
arg_spec = inspect.getargspec(fn_inspect)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if len(args) < len(arg_spec.args):
# possible injection
missing_args = set(arg_spec.args[len(args):])
for key in missing_args:
if key not in kwargs:
if not SCOPE_CHAIN:
raise OutsideScopeError('Cannot use inject outside of scope')
try:
kwargs[key] = get(key)
except IndexError:
# the key might not be inside scope but there might be
# a default parameter defined inside the function
pass
return fn(*args, **kwargs)
return wrapper
| 2,312 | 30 | 373 |
33be01006ef0ec73b7f5c416d8c3f7cb4e81caef | 306 | py | Python | source/genPrimes.py | ahmedraza007/6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python | a2e3960c8e703148e6c8d5d397baea7283f209dc | [
"MIT"
] | null | null | null | source/genPrimes.py | ahmedraza007/6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python | a2e3960c8e703148e6c8d5d397baea7283f209dc | [
"MIT"
] | null | null | null | source/genPrimes.py | ahmedraza007/6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python | a2e3960c8e703148e6c8d5d397baea7283f209dc | [
"MIT"
] | null | null | null |
gen = genPrimes()
print gen.next()
print gen.next()
print gen.next()
print gen.next() | 18 | 27 | 0.434641 | def genPrimes():
prime = []
x = 1
while True:
x += 1
for p in prime:
if x % p == 0:
break
else:
prime.append(x)
yield x
gen = genPrimes()
print gen.next()
print gen.next()
print gen.next()
print gen.next() | 186 | 0 | 22 |
90fd839f97089e06ea9ead00b3f2ea9dc8c1e909 | 7,692 | py | Python | src/magplan/migrations/0002_auto_20201115_1140.py | f1nnix/magplan | 1bda6b53c6e96129e6634bff786b3052d04b0cef | [
"Unlicense"
] | 21 | 2018-12-14T09:08:11.000Z | 2022-01-28T14:33:24.000Z | src/magplan/migrations/0002_auto_20201115_1140.py | f1nnix/magplan | 1bda6b53c6e96129e6634bff786b3052d04b0cef | [
"Unlicense"
] | 20 | 2019-01-11T20:40:01.000Z | 2022-01-30T16:01:38.000Z | src/magplan/migrations/0002_auto_20201115_1140.py | f1nnix/magplan | 1bda6b53c6e96129e6634bff786b3052d04b0cef | [
"Unlicense"
] | 5 | 2019-02-08T01:21:51.000Z | 2021-11-25T17:43:04.000Z | # Generated by Django 3.1.2 on 2020-11-15 11:40
import django.contrib.auth.models
import django.contrib.postgres.fields.jsonb
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 44.462428 | 277 | 0.587493 | # Generated by Django 3.1.2 on 2020-11-15 11:40
import django.contrib.auth.models
import django.contrib.postgres.fields.jsonb
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('magplan', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('meta', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
],
options={
'permissions': (('access_magplan', 'Can access magplan'), ('manage_authors', 'Can manage authors')),
},
bases=('main.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Widget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('_old_id', models.PositiveIntegerField(blank=True, null=True)),
('content', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Widgetype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('_old_id', models.PositiveIntegerField(blank=True, null=True)),
('slug', models.SlugField(max_length=255)),
('title', models.CharField(max_length=255)),
('meta', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('_old_id', models.PositiveIntegerField(blank=True, null=True)),
('score', models.SmallIntegerField(choices=[(0, 'Против таких статей в «Хакере»'), (25, 'Не верю, что выйдет хорошо'), (50, 'Тема нормальная, но не для меня'), (75, 'Почитал бы, встретив в журнале'), (100, 'Ради таких статей мог бы подписаться')], default=50)),
('idea', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='magplan.idea')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.user')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='stage',
name='assignee',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='magplan.user'),
),
migrations.AddField(
model_name='stage',
name='next_stage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='p_stage', to='magplan.stage'),
),
migrations.AddField(
model_name='stage',
name='prev_stage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='n_stage', to='magplan.stage'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to='magplan.user'),
),
migrations.AddField(
model_name='post',
name='authors',
field=models.ManyToManyField(to='magplan.User', verbose_name='Авторы'),
),
migrations.AddField(
model_name='post',
name='editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='edited', to='magplan.user', verbose_name='Редактор'),
),
migrations.AddField(
model_name='post',
name='issues',
field=models.ManyToManyField(related_name='posts', to='magplan.Issue', verbose_name='Выпуски'),
),
migrations.AddField(
model_name='post',
name='last_updater',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts_updated', to='magplan.user', verbose_name='Кто последний обновлял'),
),
migrations.AddField(
model_name='post',
name='postype',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.postype'),
),
migrations.AddField(
model_name='post',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.section', verbose_name='Раздел'),
),
migrations.AddField(
model_name='post',
name='stage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.stage', verbose_name='Этап'),
),
migrations.AddField(
model_name='issue',
name='magazine',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.magazine'),
),
migrations.AddField(
model_name='idea',
name='authors',
field=models.ManyToManyField(blank=True, related_name='authors', to='magplan.User', verbose_name='Авторы'),
),
migrations.AddField(
model_name='idea',
name='editor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='editor', to='magplan.user'),
),
migrations.AddField(
model_name='idea',
name='post',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='magplan.post'),
),
migrations.AddField(
model_name='comment',
name='content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.user'),
),
migrations.AddField(
model_name='attachment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.post'),
),
migrations.AddField(
model_name='attachment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='magplan.user'),
),
]
| 0 | 7,619 | 23 |
b3dbca7a9f41a05f752f4b1bb64b60dc81d219cf | 123 | py | Python | pi_camera_capture/cli.py | yoyota-pose-estimation/pi-camera-capture | c77ed1691dafbec9b8e1932a0493ba5c4acc2048 | [
"MIT"
] | null | null | null | pi_camera_capture/cli.py | yoyota-pose-estimation/pi-camera-capture | c77ed1691dafbec9b8e1932a0493ba5c4acc2048 | [
"MIT"
] | 1 | 2020-02-10T07:02:45.000Z | 2020-02-10T08:16:25.000Z | pi_camera_capture/cli.py | yoyota-pose-estimation/pi-camera-capture | c77ed1691dafbec9b8e1932a0493ba5c4acc2048 | [
"MIT"
] | null | null | null | import fire
from pi_camera_capture.app import main
if __name__ == "__main__":
cli()
| 11.181818 | 38 | 0.674797 | import fire
from pi_camera_capture.app import main
def cli():
fire.Fire(main)
if __name__ == "__main__":
cli()
| 9 | 0 | 23 |
6cb7ebb492fcdeb41c1c34796345e72ad718f0b1 | 27,257 | py | Python | words.py | nailtonvital/python-projects | dff440fe0aaebfedbf9622d2daae0b53d972e438 | [
"MIT"
] | 2 | 2022-03-21T23:03:51.000Z | 2022-03-21T23:18:02.000Z | words.py | nailtonvital/python-projects | dff440fe0aaebfedbf9622d2daae0b53d972e438 | [
"MIT"
] | null | null | null | words.py | nailtonvital/python-projects | dff440fe0aaebfedbf9622d2daae0b53d972e438 | [
"MIT"
] | null | null | null | # Word list for hangman
words = ["aback", "abaft", "abandoned", "abashed", "aberrant", "abhorrent", "abiding", "abject", "ablaze", "able",
"abnormal", "aboard", "aboriginal", "abortive", "abounding", "abrasive", "abrupt", "absent", "absorbed",
"absorbing", "abstracted", "absurd", "abundant", "abusive", "accept", "acceptable", "accessible", "accidental",
"account", "accurate", "achiever", "acid", "acidic", "acoustic", "acoustics", "acrid", "act", "action",
"activity", "actor", "actually", "ad hoc", "adamant", "adaptable", "add", "addicted", "addition", "adhesive",
"adjoining", "adjustment", "admire", "admit", "adorable", "adventurous", "advertisement", "advice", "advise",
"afford", "afraid", "aftermath", "afternoon", "afterthought", "aggressive", "agonizing", "agree", "agreeable",
"agreement", "ahead", "air", "airplane", "airport", "ajar", "alarm", "alcoholic", "alert", "alike", "alive",
"alleged", "allow", "alluring", "aloof", "amazing", "ambiguous", "ambitious", "amount", "amuck", "amuse",
"amused", "amusement", "amusing", "analyze", "ancient", "anger", "angle", "angry", "animal", "animated",
"announce", "annoy", "annoyed", "annoying", "answer", "ants", "anxious", "apathetic", "apologise", "apparatus",
"apparel", "appear", "applaud", "appliance", "appreciate", "approval", "approve", "aquatic", "arch", "argue",
"argument", "arithmetic", "arm", "army", "aromatic", "arrange", "arrest", "arrive", "arrogant", "art",
"ashamed", "ask", "aspiring", "assorted", "astonishing", "attach", "attack", "attempt", "attend", "attract",
"attraction", "attractive", "aunt", "auspicious", "authority", "automatic", "available", "average", "avoid",
"awake", "aware", "awesome", "awful", "axiomatic", "babies", "baby", "back", "bad", "badge", "bag", "bait",
"bake", "balance", "ball", "ban", "bang", "barbarous", "bare", "base", "baseball", "bashful", "basin", "basket",
"basketball", "bat", "bath", "bathe", "battle", "bawdy", "bead", "beam", "bear", "beautiful", "bed", "bedroom",
"beds", "bee", "beef", "befitting", "beg", "beginner", "behave", "behavior", "belief", "believe", "bell",
"belligerent", "bells", "belong", "beneficial", "bent", "berry", "berserk", "best", "better", "bewildered",
"big", "bike", "bikes", "billowy", "bird", "birds", "birth", "birthday", "bit", "bite", "bite-sized", "bitter",
"bizarre", "black", "black-and-white", "blade", "bleach", "bless", "blind", "blink", "blood", "bloody", "blot",
"blow", "blue", "blue-eyed", "blush", "blushing", "board", "boast", "boat", "boil", "boiling", "bolt", "bomb",
"bone", "book", "books", "boorish", "boot", "border", "bore", "bored", "boring", "borrow", "bottle", "bounce",
"bouncy", "boundary", "boundless", "bow", "box", "boy", "brainy", "brake", "branch", "brash", "brass", "brave",
"brawny", "breakable", "breath", "breathe", "breezy", "brick", "bridge", "brief", "bright", "broad", "broken",
"brother", "brown", "bruise", "brush", "bubble", "bucket", "building", "bulb", "bump", "bumpy", "burly", "burn",
"burst", "bury", "bushes", "business", "bustling", "busy", "butter", "button", "buzz", "cabbage", "cable",
"cactus", "cagey", "cake", "cakes", "calculate", "calculating", "calculator", "calendar", "call", "callous",
"calm", "camera", "camp", "can", "cannon", "canvas", "cap", "capable", "capricious", "caption", "car", "card",
"care", "careful", "careless", "caring", "carpenter", "carriage", "carry", "cars", "cart", "carve", "cast",
"cat", "cats", "cattle", "cause", "cautious", "cave", "ceaseless", "celery", "cellar", "cemetery", "cent",
"certain", "chalk", "challenge", "chance", "change", "changeable", "channel", "charge", "charming", "chase",
"cheap", "cheat", "check", "cheer", "cheerful", "cheese", "chemical", "cherries", "cherry", "chess", "chew",
"chicken", "chickens", "chief", "childlike", "children", "chilly", "chin", "chivalrous", "choke", "chop",
"chubby", "chunky", "church", "circle", "claim", "clam", "clammy", "clap", "class", "classy", "clean", "clear",
"clever", "clip", "cloistered", "close", "closed", "cloth", "cloudy", "clover", "club", "clumsy", "cluttered",
"coach", "coal", "coast", "coat", "cobweb", "coherent", "coil", "cold", "collar", "collect", "color",
"colorful", "colossal", "colour", "comb", "combative", "comfortable", "command", "committee", "common",
"communicate", "company", "compare", "comparison", "compete", "competition", "complain", "complete", "complex",
"concentrate", "concern", "concerned", "condemned", "condition", "confess", "confuse", "confused", "connect",
"connection", "conscious", "consider", "consist", "contain", "continue", "control", "cooing", "cook", "cool",
"cooperative", "coordinated", "copper", "copy", "corn", "correct", "cough", "count", "country", "courageous",
"cover", "cow", "cowardly", "cows", "crabby", "crack", "cracker", "crash", "crate", "craven", "crawl", "crayon",
"crazy", "cream", "creator", "creature", "credit", "creepy", "crib", "crime", "crook", "crooked", "cross",
"crow", "crowd", "crowded", "crown", "cruel", "crush", "cry", "cub", "cuddly", "cultured", "cumbersome", "cup",
"cure", "curious", "curl", "curly", "current", "curtain", "curve", "curved", "curvy", "cushion", "cut", "cute",
"cycle", "cynical", "dad", "daffy", "daily", "dam", "damage", "damaged", "damaging", "damp", "dance",
"dangerous", "dapper", "dare", "dark", "dashing", "daughter", "day", "dazzling", "dead", "deadpan", "deafening",
"dear", "death", "debonair", "debt", "decay", "deceive", "decide", "decision", "decisive", "decorate",
"decorous", "deep", "deeply", "deer", "defeated", "defective", "defiant", "degree", "delay", "delicate",
"delicious", "delight", "delightful", "delirious", "deliver", "demonic", "depend", "dependent", "depressed",
"deranged", "describe", "descriptive", "desert", "deserted", "deserve", "design", "desire", "desk", "destroy",
"destruction", "detail", "detailed", "detect", "determined", "develop", "development", "devilish", "didactic",
"different", "difficult", "digestion", "diligent", "dime", "dinner", "dinosaurs", "direction", "direful",
"dirt", "dirty", "disagree", "disagreeable", "disappear", "disapprove", "disarm", "disastrous", "discover",
"discovery", "discreet", "discussion", "disgusted", "disgusting", "disillusioned", "dislike", "dispensable",
"distance", "distinct", "distribution", "disturbed", "divergent", "divide", "division", "dizzy", "dock",
"doctor", "dog", "dogs", "doll", "dolls", "domineering", "donkey", "door", "double", "doubt", "doubtful",
"downtown", "drab", "draconian", "drag", "drain", "dramatic", "drawer", "dream", "dreary", "dress", "drink",
"drip", "driving", "drop", "drown", "drum", "drunk", "dry", "duck", "ducks", "dull", "dust", "dusty", "dynamic",
"dysfunctional", "eager", "ear", "early", "earn", "earsplitting", "earth", "earthquake", "earthy", "easy",
"eatable", "economic", "edge", "educate", "educated", "education", "effect", "efficacious", "efficient", "egg",
"eggnog", "eggs", "eight", "elastic", "elated", "elbow", "elderly", "electric", "elegant", "elfin", "elite",
"embarrass", "embarrassed", "eminent", "employ", "empty", "enchanted", "enchanting", "encourage", "encouraging",
"end", "endurable", "energetic", "engine", "enjoy", "enormous", "enter", "entertain", "entertaining",
"enthusiastic", "envious", "equable", "equal", "erect", "erratic", "error", "escape", "ethereal", "evanescent",
"evasive", "even", "event", "examine", "example", "excellent", "exchange", "excite", "excited", "exciting",
"exclusive", "excuse", "exercise", "exist", "existence", "exotic", "expand", "expansion", "expect", "expensive",
"experience", "expert", "explain", "explode", "extend", "extra-large", "extra-small", "exuberant", "exultant",
"eye", "eyes", "fabulous", "face", "fact", "fade", "faded", "fail", "faint", "fair", "fairies", "faithful",
"fall", "fallacious", "false", "familiar", "famous", "fanatical", "fancy", "fang", "fantastic", "far",
"far-flung", "farm", "fascinated", "fast", "fasten", "fat", "faulty", "fax", "fear", "fearful", "fearless",
"feeble", "feeling", "feigned", "female", "fence", "fertile", "festive", "fetch", "few", "field", "fierce",
"file", "fill", "film", "filthy", "fine", "finger", "finicky", "fire", "fireman", "first", "fish", "fit",
"five", "fix", "fixed", "flag", "flagrant", "flaky", "flame", "flap", "flash", "flashy", "flat", "flavor",
"flawless", "flesh", "flight", "flimsy", "flippant", "float", "flock", "flood", "floor", "flow", "flower",
"flowers", "flowery", "fluffy", "fluttering", "fly", "foamy", "fog", "fold", "follow", "food", "fool",
"foolish", "foot", "force", "foregoing", "forgetful", "fork", "form", "fortunate", "found", "four", "fowl",
"fragile", "frail", "frame", "frantic", "free", "freezing", "frequent", "fresh", "fretful", "friction",
"friend", "friendly", "friends", "frighten", "frightened", "frightening", "frog", "frogs", "front", "fruit",
"fry", "fuel", "full", "fumbling", "functional", "funny", "furniture", "furry", "furtive", "future",
"futuristic", "fuzzy", "gabby", "gainful", "gamy", "gaping", "garrulous", "gate", "gather", "gaudy", "gaze",
"geese", "general", "gentle", "ghost", "giant", "giants", "giddy", "gifted", "gigantic", "giraffe", "girl",
"girls", "glamorous", "glass", "gleaming", "glib", "glistening", "glorious", "glossy", "glove", "glow", "glue",
"godly", "gold", "good", "goofy", "gorgeous", "government", "governor", "grab", "graceful", "grade", "grain",
"grandfather", "grandiose", "grandmother", "grape", "grass", "grate", "grateful", "gratis", "gray", "grease",
"greasy", "great", "greedy", "green", "greet", "grey", "grieving", "grin", "grip", "groan", "groovy",
"grotesque", "grouchy", "ground", "group", "growth", "grubby", "gruesome", "grumpy", "guarantee", "guard",
"guarded", "guess", "guide", "guiltless", "guitar", "gullible", "gun", "gusty", "guttural", "habitual", "hair",
"haircut", "half", "hall", "hallowed", "halting", "hammer", "hand", "handle", "hands", "handsome", "handsomely",
"handy", "hang", "hanging", "hapless", "happen", "happy", "harass", "harbor", "hard", "hard-to-find", "harm",
"harmonious", "harmony", "harsh", "hat", "hate", "hateful", "haunt", "head", "heady", "heal", "health",
"healthy", "heap", "heartbreaking", "heat", "heavenly", "heavy", "hellish", "help", "helpful", "helpless",
"hesitant", "hideous", "high", "high-pitched", "highfalutin", "hilarious", "hill", "hissing", "historical",
"history", "hobbies", "hole", "holiday", "holistic", "hollow", "home", "homeless", "homely", "honey",
"honorable", "hook", "hop", "hope", "horn", "horrible", "horse", "horses", "hose", "hospitable", "hospital",
"hot", "hour", "house", "houses", "hover", "hug", "huge", "hulking", "hum", "humdrum", "humor", "humorous",
"hungry", "hunt", "hurried", "hurry", "hurt", "hushed", "husky", "hydrant", "hypnotic", "hysterical", "ice",
"icicle", "icky", "icy", "idea", "identify", "idiotic", "ignorant", "ignore", "ill", "ill-fated",
"ill-informed", "illegal", "illustrious", "imaginary", "imagine", "immense", "imminent", "impartial",
"imperfect", "impolite", "important", "imported", "impossible", "impress", "improve", "impulse", "incandescent",
"include", "income", "incompetent", "inconclusive", "increase", "incredible", "industrious", "industry",
"inexpensive", "infamous", "influence", "inform", "inject", "injure", "ink", "innate", "innocent",
"inquisitive", "insect", "insidious", "instinctive", "instruct", "instrument", "insurance", "intelligent",
"intend", "interest", "interesting", "interfere", "internal", "interrupt", "introduce", "invent", "invention",
"invincible", "invite", "irate", "iron", "irritate", "irritating", "island", "itch", "itchy", "jaded", "jagged",
"jail", "jam", "jar", "jazzy", "jealous", "jeans", "jelly", "jellyfish", "jewel", "jittery", "jobless", "jog",
"join", "joke", "jolly", "joyous", "judge", "judicious", "juggle", "juice", "juicy", "jumbled", "jump", "jumpy",
"juvenile", "kaput", "keen", "kettle", "key", "kick", "kill", "kind", "kindhearted", "kindly", "kiss",
"kittens", "kitty", "knee", "kneel", "knife", "knit", "knock", "knot", "knotty", "knowing", "knowledge",
"knowledgeable", "known", "label", "labored", "laborer", "lace", "lackadaisical", "lacking", "ladybug", "lake",
"lame", "lamentable", "lamp", "land", "language", "languid", "large", "last", "late", "laugh", "laughable",
"launch", "lavish", "lazy", "lean", "learn", "learned", "leather", "left", "leg", "legal", "legs", "lethal",
"letter", "letters", "lettuce", "level", "lewd", "library", "license", "lick", "lie", "light", "lighten",
"like", "likeable", "limit", "limping", "line", "linen", "lip", "liquid", "list", "listen", "literate",
"little", "live", "lively", "living", "load", "loaf", "lock", "locket", "lonely", "long", "long-term",
"longing", "look", "loose", "lopsided", "loss", "loud", "loutish", "love", "lovely", "loving", "low", "lowly",
"lucky", "ludicrous", "lumber", "lumpy", "lunch", "lunchroom", "lush", "luxuriant", "lying", "lyrical",
"macabre", "machine", "macho", "maddening", "madly", "magenta", "magic", "magical", "magnificent", "maid",
"mailbox", "majestic", "makeshift", "male", "malicious", "mammoth", "man", "manage", "maniacal", "many",
"marble", "march", "mark", "marked", "market", "married", "marry", "marvelous", "mask", "mass", "massive",
"match", "mate", "material", "materialistic", "matter", "mature", "meal", "mean", "measly", "measure", "meat",
"meaty", "meddle", "medical", "meek", "meeting", "mellow", "melodic", "melt", "melted", "memorize", "memory",
"men", "mend", "merciful", "mere", "mess up", "messy", "metal", "mice", "middle", "mighty", "military", "milk",
"milky", "mind", "mindless", "mine", "miniature", "minister", "minor", "mint", "minute", "miscreant", "miss",
"mist", "misty", "mitten", "mix", "mixed", "moan", "moaning", "modern", "moldy", "mom", "momentous", "money",
"monkey", "month", "moon", "moor", "morning", "mother", "motion", "motionless", "mountain", "mountainous",
"mourn", "mouth", "move", "muddle", "muddled", "mug", "multiply", "mundane", "murder", "murky", "muscle",
"mushy", "mute", "mysterious", "nail", "naive", "name", "nappy", "narrow", "nasty", "nation", "natural",
"naughty", "nauseating", "near", "neat", "nebulous", "necessary", "neck", "need", "needle", "needless", "needy",
"neighborly", "nerve", "nervous", "nest", "new", "next", "nice", "nifty", "night", "nimble", "nine", "nippy",
"nod", "noise", "noiseless", "noisy", "nonchalant", "nondescript", "nonstop", "normal", "north", "nose",
"nostalgic", "nosy", "note", "notebook", "notice", "noxious", "null", "number", "numberless", "numerous", "nut",
"nutritious", "nutty", "oafish", "oatmeal", "obedient", "obeisant", "obese", "obey", "object", "obnoxious",
"obscene", "obsequious", "observant", "observation", "observe", "obsolete", "obtain", "obtainable", "occur",
"ocean", "oceanic", "odd", "offbeat", "offend", "offer", "office", "oil", "old", "old-fashioned", "omniscient",
"one", "onerous", "open", "opposite", "optimal", "orange", "oranges", "order", "ordinary", "organic",
"ossified", "outgoing", "outrageous", "outstanding", "oval", "oven", "overconfident", "overflow", "overjoyed",
"overrated", "overt", "overwrought", "owe", "own", "pack", "paddle", "page", "pail", "painful", "painstaking",
"paint", "pale", "paltry", "pan", "pancake", "panicky", "panoramic", "paper", "parallel", "parcel", "parched",
"park", "parsimonious", "part", "partner", "party", "pass", "passenger", "past", "paste", "pastoral", "pat",
"pathetic", "pause", "payment", "peace", "peaceful", "pear", "peck", "pedal", "peel", "peep", "pen", "pencil",
"penitent", "perfect", "perform", "periodic", "permissible", "permit", "perpetual", "person", "pest", "pet",
"petite", "pets", "phobic", "phone", "physical", "picayune", "pick", "pickle", "picture", "pie", "pies", "pig",
"pigs", "pin", "pinch", "pine", "pink", "pipe", "piquant", "pizzas", "place", "placid", "plain", "plan",
"plane", "planes", "plant", "plantation", "plants", "plastic", "plate", "plausible", "play", "playground",
"pleasant", "please", "pleasure", "plot", "plough", "plucky", "plug", "pocket", "point", "pointless", "poised",
"poison", "poke", "polish", "polite", "political", "pollution", "poor", "pop", "popcorn", "porter", "position",
"possess", "possessive", "possible", "post", "pot", "potato", "pour", "powder", "power", "powerful", "practice",
"pray", "preach", "precede", "precious", "prefer", "premium", "prepare", "present", "preserve", "press",
"pretend", "pretty", "prevent", "previous", "price", "pricey", "prick", "prickly", "print", "private",
"probable", "produce", "productive", "profit", "profuse", "program", "promise", "property", "prose", "protect",
"protective", "protest", "proud", "provide", "psychedelic", "psychotic", "public", "puffy", "pull", "pump",
"pumped", "punch", "puncture", "punish", "punishment", "puny", "purple", "purpose", "purring", "push", "pushy",
"puzzled", "puzzling", "quack", "quaint", "quarrelsome", "quarter", "quartz", "queen", "question",
"questionable", "queue", "quick", "quickest", "quicksand", "quiet", "quill", "quilt", "quince", "quirky",
"quiver", "quixotic", "quizzical", "rabbit", "rabbits", "rabid", "race", "racial", "radiate", "ragged", "rail",
"railway", "rain", "rainstorm", "rainy", "raise", "rake", "rambunctious", "rampant", "range", "rapid", "rare",
"raspy", "rat", "rate", "ratty", "ray", "reach", "reaction", "reading", "ready", "real", "realize", "reason",
"rebel", "receipt", "receive", "receptive", "recess", "recognise", "recondite", "record", "red", "reduce",
"redundant", "reflect", "reflective", "refuse", "regret", "regular", "reign", "reject", "rejoice", "relation",
"relax", "release", "relieved", "religion", "rely", "remain", "remarkable", "remember", "remind", "reminiscent",
"remove", "repair", "repeat", "replace", "reply", "report", "representative", "reproduce", "repulsive",
"request", "rescue", "resolute", "resonant", "respect", "responsible", "rest", "retire", "return", "reward",
"rhetorical", "rhyme", "rhythm", "rice", "rich", "riddle", "rifle", "right", "righteous", "rightful", "rigid",
"ring", "rings", "rinse", "ripe", "risk", "ritzy", "river", "road", "roasted", "rob", "robin", "robust", "rock",
"rod", "roll", "romantic", "roof", "room", "roomy", "root", "rose", "rot", "rotten", "rough", "round", "route",
"royal", "rub", "ruddy", "rude", "ruin", "rule", "run", "rural", "rush", "rustic", "ruthless", "sable", "sack",
"sad", "safe", "sail", "salt", "salty", "same", "sand", "sassy", "satisfy", "satisfying", "save", "savory",
"saw", "scale", "scandalous", "scarce", "scare", "scarecrow", "scared", "scarf", "scary", "scatter",
"scattered", "scene", "scent", "school", "science", "scientific", "scintillating", "scissors", "scold",
"scorch", "scrape", "scratch", "scrawny", "scream", "screeching", "screw", "scribble", "scrub", "sea", "seal",
"search", "seashore", "seat", "second", "second-hand", "secret", "secretary", "secretive", "sedate", "seed",
"seemly", "selection", "selective", "self", "selfish", "sense", "separate", "serious", "servant", "serve",
"settle", "shade", "shaggy", "shake", "shaky", "shallow", "shame", "shape", "share", "sharp", "shave", "sheep",
"sheet", "shelf", "shelter", "shiny", "ship", "shirt", "shiver", "shivering", "shock", "shocking", "shoe",
"shoes", "shop", "short", "show", "shrill", "shrug", "shut", "shy", "sick", "side", "sidewalk", "sigh", "sign",
"signal", "silent", "silk", "silky", "silly", "silver", "simple", "simplistic", "sin", "sincere", "sink", "sip",
"sister", "sisters", "six", "size", "skate", "ski", "skillful", "skin", "skinny", "skip", "skirt", "sky",
"slap", "slave", "sleep", "sleepy", "sleet", "slim", "slimy", "slip", "slippery", "slope", "sloppy", "slow",
"small", "smart", "smash", "smell", "smelly", "smile", "smiling", "smoggy", "smoke", "smooth", "snail",
"snails", "snake", "snakes", "snatch", "sneaky", "sneeze", "sniff", "snobbish", "snore", "snotty", "snow",
"soak", "soap", "society", "sock", "soda", "sofa", "soft", "soggy", "solid", "somber", "son", "song", "songs",
"soothe", "sophisticated", "sordid", "sore", "sort", "sound", "soup", "sour", "space", "spade", "spare",
"spark", "sparkle", "sparkling", "special", "spectacular", "spell", "spicy", "spiders", "spiffy", "spiky",
"spill", "spiritual", "spiteful", "splendid", "spoil", "sponge", "spooky", "spoon", "spot", "spotless",
"spotted", "spotty", "spray", "spring", "sprout", "spurious", "spy", "squalid", "square", "squash", "squeak",
"squeal", "squealing", "squeamish", "squeeze", "squirrel", "stage", "stain", "staking", "stale", "stamp",
"standing", "star", "stare", "start", "statement", "station", "statuesque", "stay", "steadfast", "steady",
"steam", "steel", "steep", "steer", "stem", "step", "stereotyped", "stew", "stick", "sticks", "sticky", "stiff",
"stimulating", "stingy", "stir", "stitch", "stocking", "stomach", "stone", "stop", "store", "stormy", "story",
"stove", "straight", "strange", "stranger", "strap", "straw", "stream", "street", "strengthen", "stretch",
"string", "strip", "striped", "stroke", "strong", "structure", "stuff", "stupendous", "stupid", "sturdy",
"subdued", "subsequent", "substance", "substantial", "subtract", "succeed", "successful", "succinct", "suck",
"sudden", "suffer", "sugar", "suggest", "suggestion", "suit", "sulky", "summer", "sun", "super", "superb",
"superficial", "supply", "support", "suppose", "supreme", "surprise", "surround", "suspect", "suspend",
"swanky", "sweater", "sweet", "sweltering", "swift", "swim", "swing", "switch", "symptomatic", "synonymous",
"system", "table", "taboo", "tacit", "tacky", "tail", "talented", "talk", "tall", "tame", "tan", "tangible",
"tangy", "tank", "tap", "tart", "taste", "tasteful", "tasteless", "tasty", "tawdry", "tax", "teaching", "team",
"tearful", "tease", "tedious", "teeny", "teeny-tiny", "teeth", "telephone", "telling", "temper", "temporary",
"tempt", "ten", "tendency", "tender", "tense", "tent", "tenuous", "terrible", "terrific", "terrify",
"territory", "test", "tested", "testy", "texture", "thank", "thankful", "thaw", "theory", "therapeutic",
"thick", "thin", "thing", "things", "thinkable", "third", "thirsty", "thought", "thoughtful", "thoughtless",
"thread", "threatening", "three", "thrill", "throat", "throne", "thumb", "thunder", "thundering", "tick",
"ticket", "tickle", "tidy", "tie", "tiger", "tight", "tightfisted", "time", "tin", "tiny", "tip", "tire",
"tired", "tiresome", "title", "toad", "toe", "toes", "tomatoes", "tongue", "tooth", "toothbrush", "toothpaste",
"toothsome", "top", "torpid", "touch", "tough", "tour", "tow", "towering", "town", "toy", "toys", "trace",
"trade", "trail", "train", "trains", "tramp", "tranquil", "transport", "trap", "trashy", "travel", "tray",
"treat", "treatment", "tree", "trees", "tremble", "tremendous", "trick", "tricky", "trip", "trite", "trot",
"trouble", "troubled", "trousers", "truck", "trucks", "truculent", "true", "trust", "truthful", "try", "tub",
"tug", "tumble", "turkey", "turn", "twig", "twist", "two", "type", "typical", "ubiquitous", "ugliest", "ugly",
"ultra", "umbrella", "unable", "unaccountable", "unadvised", "unarmed", "unbecoming", "unbiased", "uncle",
"uncovered", "understood", "underwear", "undesirable", "undress", "unequal", "unequaled", "uneven", "unfasten",
"unhealthy", "uninterested", "unique", "unit", "unite", "unkempt", "unknown", "unlock", "unnatural", "unpack",
"unruly", "unsightly", "unsuitable", "untidy", "unused", "unusual", "unwieldy", "unwritten", "upbeat", "uppity",
"upset", "uptight", "use", "used", "useful", "useless", "utopian", "utter", "uttermost", "vacation", "vacuous",
"vagabond", "vague", "valuable", "value", "van", "vanish", "various", "vase", "vast", "vegetable", "veil",
"vein", "vengeful", "venomous", "verdant", "verse", "versed", "vessel", "vest", "victorious", "view",
"vigorous", "violent", "violet", "visit", "visitor", "vivacious", "voice", "voiceless", "volatile", "volcano",
"volleyball", "voracious", "voyage", "vulgar", "wacky", "waggish", "wail", "wait", "waiting", "wakeful", "walk",
"wall", "wander", "wandering", "want", "wanting", "war", "warlike", "warm", "warn", "wary", "wash", "waste",
"wasteful", "watch", "water", "watery", "wave", "waves", "wax", "way", "weak", "wealth", "wealthy", "weary",
"weather", "week", "weigh", "weight", "welcome", "well-groomed", "well-made", "well-off", "well-to-do", "wet",
"wheel", "whimsical", "whine", "whip", "whirl", "whisper", "whispering", "whistle", "white", "whole",
"wholesale", "wicked", "wide", "wide-eyed", "wiggly", "wild", "wilderness", "willing", "wind", "window",
"windy", "wine", "wing", "wink", "winter", "wipe", "wire", "wiry", "wise", "wish", "wistful", "witty", "wobble",
"woebegone", "woman", "womanly", "women", "wonder", "wonderful", "wood", "wooden", "wool", "woozy", "word",
"work", "workable", "worm", "worried", "worry", "worthless", "wound", "wrap", "wrathful", "wreck", "wren",
"wrench", "wrestle", "wretched", "wriggle", "wrist", "writer", "writing", "wrong", "wry", "x-ray", "yak", "yam",
"yard", "yarn", "yawn", "year", "yell", "yellow", "yielding", "yoke", "young", "youthful", "yummy", "zany",
"zealous", "zebra", "zephyr", "zesty", "zinc", "zip", "zipper", "zippy", "zonked", "zoo", "zoom"]
| 116.482906 | 121 | 0.56048 | # Word list for hangman
words = ["aback", "abaft", "abandoned", "abashed", "aberrant", "abhorrent", "abiding", "abject", "ablaze", "able",
"abnormal", "aboard", "aboriginal", "abortive", "abounding", "abrasive", "abrupt", "absent", "absorbed",
"absorbing", "abstracted", "absurd", "abundant", "abusive", "accept", "acceptable", "accessible", "accidental",
"account", "accurate", "achiever", "acid", "acidic", "acoustic", "acoustics", "acrid", "act", "action",
"activity", "actor", "actually", "ad hoc", "adamant", "adaptable", "add", "addicted", "addition", "adhesive",
"adjoining", "adjustment", "admire", "admit", "adorable", "adventurous", "advertisement", "advice", "advise",
"afford", "afraid", "aftermath", "afternoon", "afterthought", "aggressive", "agonizing", "agree", "agreeable",
"agreement", "ahead", "air", "airplane", "airport", "ajar", "alarm", "alcoholic", "alert", "alike", "alive",
"alleged", "allow", "alluring", "aloof", "amazing", "ambiguous", "ambitious", "amount", "amuck", "amuse",
"amused", "amusement", "amusing", "analyze", "ancient", "anger", "angle", "angry", "animal", "animated",
"announce", "annoy", "annoyed", "annoying", "answer", "ants", "anxious", "apathetic", "apologise", "apparatus",
"apparel", "appear", "applaud", "appliance", "appreciate", "approval", "approve", "aquatic", "arch", "argue",
"argument", "arithmetic", "arm", "army", "aromatic", "arrange", "arrest", "arrive", "arrogant", "art",
"ashamed", "ask", "aspiring", "assorted", "astonishing", "attach", "attack", "attempt", "attend", "attract",
"attraction", "attractive", "aunt", "auspicious", "authority", "automatic", "available", "average", "avoid",
"awake", "aware", "awesome", "awful", "axiomatic", "babies", "baby", "back", "bad", "badge", "bag", "bait",
"bake", "balance", "ball", "ban", "bang", "barbarous", "bare", "base", "baseball", "bashful", "basin", "basket",
"basketball", "bat", "bath", "bathe", "battle", "bawdy", "bead", "beam", "bear", "beautiful", "bed", "bedroom",
"beds", "bee", "beef", "befitting", "beg", "beginner", "behave", "behavior", "belief", "believe", "bell",
"belligerent", "bells", "belong", "beneficial", "bent", "berry", "berserk", "best", "better", "bewildered",
"big", "bike", "bikes", "billowy", "bird", "birds", "birth", "birthday", "bit", "bite", "bite-sized", "bitter",
"bizarre", "black", "black-and-white", "blade", "bleach", "bless", "blind", "blink", "blood", "bloody", "blot",
"blow", "blue", "blue-eyed", "blush", "blushing", "board", "boast", "boat", "boil", "boiling", "bolt", "bomb",
"bone", "book", "books", "boorish", "boot", "border", "bore", "bored", "boring", "borrow", "bottle", "bounce",
"bouncy", "boundary", "boundless", "bow", "box", "boy", "brainy", "brake", "branch", "brash", "brass", "brave",
"brawny", "breakable", "breath", "breathe", "breezy", "brick", "bridge", "brief", "bright", "broad", "broken",
"brother", "brown", "bruise", "brush", "bubble", "bucket", "building", "bulb", "bump", "bumpy", "burly", "burn",
"burst", "bury", "bushes", "business", "bustling", "busy", "butter", "button", "buzz", "cabbage", "cable",
"cactus", "cagey", "cake", "cakes", "calculate", "calculating", "calculator", "calendar", "call", "callous",
"calm", "camera", "camp", "can", "cannon", "canvas", "cap", "capable", "capricious", "caption", "car", "card",
"care", "careful", "careless", "caring", "carpenter", "carriage", "carry", "cars", "cart", "carve", "cast",
"cat", "cats", "cattle", "cause", "cautious", "cave", "ceaseless", "celery", "cellar", "cemetery", "cent",
"certain", "chalk", "challenge", "chance", "change", "changeable", "channel", "charge", "charming", "chase",
"cheap", "cheat", "check", "cheer", "cheerful", "cheese", "chemical", "cherries", "cherry", "chess", "chew",
"chicken", "chickens", "chief", "childlike", "children", "chilly", "chin", "chivalrous", "choke", "chop",
"chubby", "chunky", "church", "circle", "claim", "clam", "clammy", "clap", "class", "classy", "clean", "clear",
"clever", "clip", "cloistered", "close", "closed", "cloth", "cloudy", "clover", "club", "clumsy", "cluttered",
"coach", "coal", "coast", "coat", "cobweb", "coherent", "coil", "cold", "collar", "collect", "color",
"colorful", "colossal", "colour", "comb", "combative", "comfortable", "command", "committee", "common",
"communicate", "company", "compare", "comparison", "compete", "competition", "complain", "complete", "complex",
"concentrate", "concern", "concerned", "condemned", "condition", "confess", "confuse", "confused", "connect",
"connection", "conscious", "consider", "consist", "contain", "continue", "control", "cooing", "cook", "cool",
"cooperative", "coordinated", "copper", "copy", "corn", "correct", "cough", "count", "country", "courageous",
"cover", "cow", "cowardly", "cows", "crabby", "crack", "cracker", "crash", "crate", "craven", "crawl", "crayon",
"crazy", "cream", "creator", "creature", "credit", "creepy", "crib", "crime", "crook", "crooked", "cross",
"crow", "crowd", "crowded", "crown", "cruel", "crush", "cry", "cub", "cuddly", "cultured", "cumbersome", "cup",
"cure", "curious", "curl", "curly", "current", "curtain", "curve", "curved", "curvy", "cushion", "cut", "cute",
"cycle", "cynical", "dad", "daffy", "daily", "dam", "damage", "damaged", "damaging", "damp", "dance",
"dangerous", "dapper", "dare", "dark", "dashing", "daughter", "day", "dazzling", "dead", "deadpan", "deafening",
"dear", "death", "debonair", "debt", "decay", "deceive", "decide", "decision", "decisive", "decorate",
"decorous", "deep", "deeply", "deer", "defeated", "defective", "defiant", "degree", "delay", "delicate",
"delicious", "delight", "delightful", "delirious", "deliver", "demonic", "depend", "dependent", "depressed",
"deranged", "describe", "descriptive", "desert", "deserted", "deserve", "design", "desire", "desk", "destroy",
"destruction", "detail", "detailed", "detect", "determined", "develop", "development", "devilish", "didactic",
"different", "difficult", "digestion", "diligent", "dime", "dinner", "dinosaurs", "direction", "direful",
"dirt", "dirty", "disagree", "disagreeable", "disappear", "disapprove", "disarm", "disastrous", "discover",
"discovery", "discreet", "discussion", "disgusted", "disgusting", "disillusioned", "dislike", "dispensable",
"distance", "distinct", "distribution", "disturbed", "divergent", "divide", "division", "dizzy", "dock",
"doctor", "dog", "dogs", "doll", "dolls", "domineering", "donkey", "door", "double", "doubt", "doubtful",
"downtown", "drab", "draconian", "drag", "drain", "dramatic", "drawer", "dream", "dreary", "dress", "drink",
"drip", "driving", "drop", "drown", "drum", "drunk", "dry", "duck", "ducks", "dull", "dust", "dusty", "dynamic",
"dysfunctional", "eager", "ear", "early", "earn", "earsplitting", "earth", "earthquake", "earthy", "easy",
"eatable", "economic", "edge", "educate", "educated", "education", "effect", "efficacious", "efficient", "egg",
"eggnog", "eggs", "eight", "elastic", "elated", "elbow", "elderly", "electric", "elegant", "elfin", "elite",
"embarrass", "embarrassed", "eminent", "employ", "empty", "enchanted", "enchanting", "encourage", "encouraging",
"end", "endurable", "energetic", "engine", "enjoy", "enormous", "enter", "entertain", "entertaining",
"enthusiastic", "envious", "equable", "equal", "erect", "erratic", "error", "escape", "ethereal", "evanescent",
"evasive", "even", "event", "examine", "example", "excellent", "exchange", "excite", "excited", "exciting",
"exclusive", "excuse", "exercise", "exist", "existence", "exotic", "expand", "expansion", "expect", "expensive",
"experience", "expert", "explain", "explode", "extend", "extra-large", "extra-small", "exuberant", "exultant",
"eye", "eyes", "fabulous", "face", "fact", "fade", "faded", "fail", "faint", "fair", "fairies", "faithful",
"fall", "fallacious", "false", "familiar", "famous", "fanatical", "fancy", "fang", "fantastic", "far",
"far-flung", "farm", "fascinated", "fast", "fasten", "fat", "faulty", "fax", "fear", "fearful", "fearless",
"feeble", "feeling", "feigned", "female", "fence", "fertile", "festive", "fetch", "few", "field", "fierce",
"file", "fill", "film", "filthy", "fine", "finger", "finicky", "fire", "fireman", "first", "fish", "fit",
"five", "fix", "fixed", "flag", "flagrant", "flaky", "flame", "flap", "flash", "flashy", "flat", "flavor",
"flawless", "flesh", "flight", "flimsy", "flippant", "float", "flock", "flood", "floor", "flow", "flower",
"flowers", "flowery", "fluffy", "fluttering", "fly", "foamy", "fog", "fold", "follow", "food", "fool",
"foolish", "foot", "force", "foregoing", "forgetful", "fork", "form", "fortunate", "found", "four", "fowl",
"fragile", "frail", "frame", "frantic", "free", "freezing", "frequent", "fresh", "fretful", "friction",
"friend", "friendly", "friends", "frighten", "frightened", "frightening", "frog", "frogs", "front", "fruit",
"fry", "fuel", "full", "fumbling", "functional", "funny", "furniture", "furry", "furtive", "future",
"futuristic", "fuzzy", "gabby", "gainful", "gamy", "gaping", "garrulous", "gate", "gather", "gaudy", "gaze",
"geese", "general", "gentle", "ghost", "giant", "giants", "giddy", "gifted", "gigantic", "giraffe", "girl",
"girls", "glamorous", "glass", "gleaming", "glib", "glistening", "glorious", "glossy", "glove", "glow", "glue",
"godly", "gold", "good", "goofy", "gorgeous", "government", "governor", "grab", "graceful", "grade", "grain",
"grandfather", "grandiose", "grandmother", "grape", "grass", "grate", "grateful", "gratis", "gray", "grease",
"greasy", "great", "greedy", "green", "greet", "grey", "grieving", "grin", "grip", "groan", "groovy",
"grotesque", "grouchy", "ground", "group", "growth", "grubby", "gruesome", "grumpy", "guarantee", "guard",
"guarded", "guess", "guide", "guiltless", "guitar", "gullible", "gun", "gusty", "guttural", "habitual", "hair",
"haircut", "half", "hall", "hallowed", "halting", "hammer", "hand", "handle", "hands", "handsome", "handsomely",
"handy", "hang", "hanging", "hapless", "happen", "happy", "harass", "harbor", "hard", "hard-to-find", "harm",
"harmonious", "harmony", "harsh", "hat", "hate", "hateful", "haunt", "head", "heady", "heal", "health",
"healthy", "heap", "heartbreaking", "heat", "heavenly", "heavy", "hellish", "help", "helpful", "helpless",
"hesitant", "hideous", "high", "high-pitched", "highfalutin", "hilarious", "hill", "hissing", "historical",
"history", "hobbies", "hole", "holiday", "holistic", "hollow", "home", "homeless", "homely", "honey",
"honorable", "hook", "hop", "hope", "horn", "horrible", "horse", "horses", "hose", "hospitable", "hospital",
"hot", "hour", "house", "houses", "hover", "hug", "huge", "hulking", "hum", "humdrum", "humor", "humorous",
"hungry", "hunt", "hurried", "hurry", "hurt", "hushed", "husky", "hydrant", "hypnotic", "hysterical", "ice",
"icicle", "icky", "icy", "idea", "identify", "idiotic", "ignorant", "ignore", "ill", "ill-fated",
"ill-informed", "illegal", "illustrious", "imaginary", "imagine", "immense", "imminent", "impartial",
"imperfect", "impolite", "important", "imported", "impossible", "impress", "improve", "impulse", "incandescent",
"include", "income", "incompetent", "inconclusive", "increase", "incredible", "industrious", "industry",
"inexpensive", "infamous", "influence", "inform", "inject", "injure", "ink", "innate", "innocent",
"inquisitive", "insect", "insidious", "instinctive", "instruct", "instrument", "insurance", "intelligent",
"intend", "interest", "interesting", "interfere", "internal", "interrupt", "introduce", "invent", "invention",
"invincible", "invite", "irate", "iron", "irritate", "irritating", "island", "itch", "itchy", "jaded", "jagged",
"jail", "jam", "jar", "jazzy", "jealous", "jeans", "jelly", "jellyfish", "jewel", "jittery", "jobless", "jog",
"join", "joke", "jolly", "joyous", "judge", "judicious", "juggle", "juice", "juicy", "jumbled", "jump", "jumpy",
"juvenile", "kaput", "keen", "kettle", "key", "kick", "kill", "kind", "kindhearted", "kindly", "kiss",
"kittens", "kitty", "knee", "kneel", "knife", "knit", "knock", "knot", "knotty", "knowing", "knowledge",
"knowledgeable", "known", "label", "labored", "laborer", "lace", "lackadaisical", "lacking", "ladybug", "lake",
"lame", "lamentable", "lamp", "land", "language", "languid", "large", "last", "late", "laugh", "laughable",
"launch", "lavish", "lazy", "lean", "learn", "learned", "leather", "left", "leg", "legal", "legs", "lethal",
"letter", "letters", "lettuce", "level", "lewd", "library", "license", "lick", "lie", "light", "lighten",
"like", "likeable", "limit", "limping", "line", "linen", "lip", "liquid", "list", "listen", "literate",
"little", "live", "lively", "living", "load", "loaf", "lock", "locket", "lonely", "long", "long-term",
"longing", "look", "loose", "lopsided", "loss", "loud", "loutish", "love", "lovely", "loving", "low", "lowly",
"lucky", "ludicrous", "lumber", "lumpy", "lunch", "lunchroom", "lush", "luxuriant", "lying", "lyrical",
"macabre", "machine", "macho", "maddening", "madly", "magenta", "magic", "magical", "magnificent", "maid",
"mailbox", "majestic", "makeshift", "male", "malicious", "mammoth", "man", "manage", "maniacal", "many",
"marble", "march", "mark", "marked", "market", "married", "marry", "marvelous", "mask", "mass", "massive",
"match", "mate", "material", "materialistic", "matter", "mature", "meal", "mean", "measly", "measure", "meat",
"meaty", "meddle", "medical", "meek", "meeting", "mellow", "melodic", "melt", "melted", "memorize", "memory",
"men", "mend", "merciful", "mere", "mess up", "messy", "metal", "mice", "middle", "mighty", "military", "milk",
"milky", "mind", "mindless", "mine", "miniature", "minister", "minor", "mint", "minute", "miscreant", "miss",
"mist", "misty", "mitten", "mix", "mixed", "moan", "moaning", "modern", "moldy", "mom", "momentous", "money",
"monkey", "month", "moon", "moor", "morning", "mother", "motion", "motionless", "mountain", "mountainous",
"mourn", "mouth", "move", "muddle", "muddled", "mug", "multiply", "mundane", "murder", "murky", "muscle",
"mushy", "mute", "mysterious", "nail", "naive", "name", "nappy", "narrow", "nasty", "nation", "natural",
"naughty", "nauseating", "near", "neat", "nebulous", "necessary", "neck", "need", "needle", "needless", "needy",
"neighborly", "nerve", "nervous", "nest", "new", "next", "nice", "nifty", "night", "nimble", "nine", "nippy",
"nod", "noise", "noiseless", "noisy", "nonchalant", "nondescript", "nonstop", "normal", "north", "nose",
"nostalgic", "nosy", "note", "notebook", "notice", "noxious", "null", "number", "numberless", "numerous", "nut",
"nutritious", "nutty", "oafish", "oatmeal", "obedient", "obeisant", "obese", "obey", "object", "obnoxious",
"obscene", "obsequious", "observant", "observation", "observe", "obsolete", "obtain", "obtainable", "occur",
"ocean", "oceanic", "odd", "offbeat", "offend", "offer", "office", "oil", "old", "old-fashioned", "omniscient",
"one", "onerous", "open", "opposite", "optimal", "orange", "oranges", "order", "ordinary", "organic",
"ossified", "outgoing", "outrageous", "outstanding", "oval", "oven", "overconfident", "overflow", "overjoyed",
"overrated", "overt", "overwrought", "owe", "own", "pack", "paddle", "page", "pail", "painful", "painstaking",
"paint", "pale", "paltry", "pan", "pancake", "panicky", "panoramic", "paper", "parallel", "parcel", "parched",
"park", "parsimonious", "part", "partner", "party", "pass", "passenger", "past", "paste", "pastoral", "pat",
"pathetic", "pause", "payment", "peace", "peaceful", "pear", "peck", "pedal", "peel", "peep", "pen", "pencil",
"penitent", "perfect", "perform", "periodic", "permissible", "permit", "perpetual", "person", "pest", "pet",
"petite", "pets", "phobic", "phone", "physical", "picayune", "pick", "pickle", "picture", "pie", "pies", "pig",
"pigs", "pin", "pinch", "pine", "pink", "pipe", "piquant", "pizzas", "place", "placid", "plain", "plan",
"plane", "planes", "plant", "plantation", "plants", "plastic", "plate", "plausible", "play", "playground",
"pleasant", "please", "pleasure", "plot", "plough", "plucky", "plug", "pocket", "point", "pointless", "poised",
"poison", "poke", "polish", "polite", "political", "pollution", "poor", "pop", "popcorn", "porter", "position",
"possess", "possessive", "possible", "post", "pot", "potato", "pour", "powder", "power", "powerful", "practice",
"pray", "preach", "precede", "precious", "prefer", "premium", "prepare", "present", "preserve", "press",
"pretend", "pretty", "prevent", "previous", "price", "pricey", "prick", "prickly", "print", "private",
"probable", "produce", "productive", "profit", "profuse", "program", "promise", "property", "prose", "protect",
"protective", "protest", "proud", "provide", "psychedelic", "psychotic", "public", "puffy", "pull", "pump",
"pumped", "punch", "puncture", "punish", "punishment", "puny", "purple", "purpose", "purring", "push", "pushy",
"puzzled", "puzzling", "quack", "quaint", "quarrelsome", "quarter", "quartz", "queen", "question",
"questionable", "queue", "quick", "quickest", "quicksand", "quiet", "quill", "quilt", "quince", "quirky",
"quiver", "quixotic", "quizzical", "rabbit", "rabbits", "rabid", "race", "racial", "radiate", "ragged", "rail",
"railway", "rain", "rainstorm", "rainy", "raise", "rake", "rambunctious", "rampant", "range", "rapid", "rare",
"raspy", "rat", "rate", "ratty", "ray", "reach", "reaction", "reading", "ready", "real", "realize", "reason",
"rebel", "receipt", "receive", "receptive", "recess", "recognise", "recondite", "record", "red", "reduce",
"redundant", "reflect", "reflective", "refuse", "regret", "regular", "reign", "reject", "rejoice", "relation",
"relax", "release", "relieved", "religion", "rely", "remain", "remarkable", "remember", "remind", "reminiscent",
"remove", "repair", "repeat", "replace", "reply", "report", "representative", "reproduce", "repulsive",
"request", "rescue", "resolute", "resonant", "respect", "responsible", "rest", "retire", "return", "reward",
"rhetorical", "rhyme", "rhythm", "rice", "rich", "riddle", "rifle", "right", "righteous", "rightful", "rigid",
"ring", "rings", "rinse", "ripe", "risk", "ritzy", "river", "road", "roasted", "rob", "robin", "robust", "rock",
"rod", "roll", "romantic", "roof", "room", "roomy", "root", "rose", "rot", "rotten", "rough", "round", "route",
"royal", "rub", "ruddy", "rude", "ruin", "rule", "run", "rural", "rush", "rustic", "ruthless", "sable", "sack",
"sad", "safe", "sail", "salt", "salty", "same", "sand", "sassy", "satisfy", "satisfying", "save", "savory",
"saw", "scale", "scandalous", "scarce", "scare", "scarecrow", "scared", "scarf", "scary", "scatter",
"scattered", "scene", "scent", "school", "science", "scientific", "scintillating", "scissors", "scold",
"scorch", "scrape", "scratch", "scrawny", "scream", "screeching", "screw", "scribble", "scrub", "sea", "seal",
"search", "seashore", "seat", "second", "second-hand", "secret", "secretary", "secretive", "sedate", "seed",
"seemly", "selection", "selective", "self", "selfish", "sense", "separate", "serious", "servant", "serve",
"settle", "shade", "shaggy", "shake", "shaky", "shallow", "shame", "shape", "share", "sharp", "shave", "sheep",
"sheet", "shelf", "shelter", "shiny", "ship", "shirt", "shiver", "shivering", "shock", "shocking", "shoe",
"shoes", "shop", "short", "show", "shrill", "shrug", "shut", "shy", "sick", "side", "sidewalk", "sigh", "sign",
"signal", "silent", "silk", "silky", "silly", "silver", "simple", "simplistic", "sin", "sincere", "sink", "sip",
"sister", "sisters", "six", "size", "skate", "ski", "skillful", "skin", "skinny", "skip", "skirt", "sky",
"slap", "slave", "sleep", "sleepy", "sleet", "slim", "slimy", "slip", "slippery", "slope", "sloppy", "slow",
"small", "smart", "smash", "smell", "smelly", "smile", "smiling", "smoggy", "smoke", "smooth", "snail",
"snails", "snake", "snakes", "snatch", "sneaky", "sneeze", "sniff", "snobbish", "snore", "snotty", "snow",
"soak", "soap", "society", "sock", "soda", "sofa", "soft", "soggy", "solid", "somber", "son", "song", "songs",
"soothe", "sophisticated", "sordid", "sore", "sort", "sound", "soup", "sour", "space", "spade", "spare",
"spark", "sparkle", "sparkling", "special", "spectacular", "spell", "spicy", "spiders", "spiffy", "spiky",
"spill", "spiritual", "spiteful", "splendid", "spoil", "sponge", "spooky", "spoon", "spot", "spotless",
"spotted", "spotty", "spray", "spring", "sprout", "spurious", "spy", "squalid", "square", "squash", "squeak",
"squeal", "squealing", "squeamish", "squeeze", "squirrel", "stage", "stain", "staking", "stale", "stamp",
"standing", "star", "stare", "start", "statement", "station", "statuesque", "stay", "steadfast", "steady",
"steam", "steel", "steep", "steer", "stem", "step", "stereotyped", "stew", "stick", "sticks", "sticky", "stiff",
"stimulating", "stingy", "stir", "stitch", "stocking", "stomach", "stone", "stop", "store", "stormy", "story",
"stove", "straight", "strange", "stranger", "strap", "straw", "stream", "street", "strengthen", "stretch",
"string", "strip", "striped", "stroke", "strong", "structure", "stuff", "stupendous", "stupid", "sturdy",
"subdued", "subsequent", "substance", "substantial", "subtract", "succeed", "successful", "succinct", "suck",
"sudden", "suffer", "sugar", "suggest", "suggestion", "suit", "sulky", "summer", "sun", "super", "superb",
"superficial", "supply", "support", "suppose", "supreme", "surprise", "surround", "suspect", "suspend",
"swanky", "sweater", "sweet", "sweltering", "swift", "swim", "swing", "switch", "symptomatic", "synonymous",
"system", "table", "taboo", "tacit", "tacky", "tail", "talented", "talk", "tall", "tame", "tan", "tangible",
"tangy", "tank", "tap", "tart", "taste", "tasteful", "tasteless", "tasty", "tawdry", "tax", "teaching", "team",
"tearful", "tease", "tedious", "teeny", "teeny-tiny", "teeth", "telephone", "telling", "temper", "temporary",
"tempt", "ten", "tendency", "tender", "tense", "tent", "tenuous", "terrible", "terrific", "terrify",
"territory", "test", "tested", "testy", "texture", "thank", "thankful", "thaw", "theory", "therapeutic",
"thick", "thin", "thing", "things", "thinkable", "third", "thirsty", "thought", "thoughtful", "thoughtless",
"thread", "threatening", "three", "thrill", "throat", "throne", "thumb", "thunder", "thundering", "tick",
"ticket", "tickle", "tidy", "tie", "tiger", "tight", "tightfisted", "time", "tin", "tiny", "tip", "tire",
"tired", "tiresome", "title", "toad", "toe", "toes", "tomatoes", "tongue", "tooth", "toothbrush", "toothpaste",
"toothsome", "top", "torpid", "touch", "tough", "tour", "tow", "towering", "town", "toy", "toys", "trace",
"trade", "trail", "train", "trains", "tramp", "tranquil", "transport", "trap", "trashy", "travel", "tray",
"treat", "treatment", "tree", "trees", "tremble", "tremendous", "trick", "tricky", "trip", "trite", "trot",
"trouble", "troubled", "trousers", "truck", "trucks", "truculent", "true", "trust", "truthful", "try", "tub",
"tug", "tumble", "turkey", "turn", "twig", "twist", "two", "type", "typical", "ubiquitous", "ugliest", "ugly",
"ultra", "umbrella", "unable", "unaccountable", "unadvised", "unarmed", "unbecoming", "unbiased", "uncle",
"uncovered", "understood", "underwear", "undesirable", "undress", "unequal", "unequaled", "uneven", "unfasten",
"unhealthy", "uninterested", "unique", "unit", "unite", "unkempt", "unknown", "unlock", "unnatural", "unpack",
"unruly", "unsightly", "unsuitable", "untidy", "unused", "unusual", "unwieldy", "unwritten", "upbeat", "uppity",
"upset", "uptight", "use", "used", "useful", "useless", "utopian", "utter", "uttermost", "vacation", "vacuous",
"vagabond", "vague", "valuable", "value", "van", "vanish", "various", "vase", "vast", "vegetable", "veil",
"vein", "vengeful", "venomous", "verdant", "verse", "versed", "vessel", "vest", "victorious", "view",
"vigorous", "violent", "violet", "visit", "visitor", "vivacious", "voice", "voiceless", "volatile", "volcano",
"volleyball", "voracious", "voyage", "vulgar", "wacky", "waggish", "wail", "wait", "waiting", "wakeful", "walk",
"wall", "wander", "wandering", "want", "wanting", "war", "warlike", "warm", "warn", "wary", "wash", "waste",
"wasteful", "watch", "water", "watery", "wave", "waves", "wax", "way", "weak", "wealth", "wealthy", "weary",
"weather", "week", "weigh", "weight", "welcome", "well-groomed", "well-made", "well-off", "well-to-do", "wet",
"wheel", "whimsical", "whine", "whip", "whirl", "whisper", "whispering", "whistle", "white", "whole",
"wholesale", "wicked", "wide", "wide-eyed", "wiggly", "wild", "wilderness", "willing", "wind", "window",
"windy", "wine", "wing", "wink", "winter", "wipe", "wire", "wiry", "wise", "wish", "wistful", "witty", "wobble",
"woebegone", "woman", "womanly", "women", "wonder", "wonderful", "wood", "wooden", "wool", "woozy", "word",
"work", "workable", "worm", "worried", "worry", "worthless", "wound", "wrap", "wrathful", "wreck", "wren",
"wrench", "wrestle", "wretched", "wriggle", "wrist", "writer", "writing", "wrong", "wry", "x-ray", "yak", "yam",
"yard", "yarn", "yawn", "year", "yell", "yellow", "yielding", "yoke", "young", "youthful", "yummy", "zany",
"zealous", "zebra", "zephyr", "zesty", "zinc", "zip", "zipper", "zippy", "zonked", "zoo", "zoom"]
| 0 | 0 | 0 |
a2cc51e71d5adbbc105d3688d047f7bf6f06e078 | 1,874 | py | Python | run_raml_exp.py | pcyin/pytorch_nmt | bf28dae8a4c71e1f3f3fcb51e989fab905886f44 | [
"CC-BY-4.0"
] | 122 | 2017-04-17T18:36:43.000Z | 2022-02-09T06:24:13.000Z | run_raml_exp.py | pcyin/pytorch_nmt | bf28dae8a4c71e1f3f3fcb51e989fab905886f44 | [
"CC-BY-4.0"
] | 5 | 2017-10-08T14:13:52.000Z | 2018-10-11T04:43:11.000Z | run_raml_exp.py | pcyin/pytorch_nmt | bf28dae8a4c71e1f3f3fcb51e989fab905886f44 | [
"CC-BY-4.0"
] | 29 | 2017-04-27T18:26:47.000Z | 2021-04-08T05:58:10.000Z | import os
train_src="../dynet_nmt/data/train.de-en.de.wmixerprep"
train_tgt="../dynet_nmt/data/train.de-en.en.wmixerprep"
dev_src="../dynet_nmt/data/valid.de-en.de"
dev_tgt="../dynet_nmt/data/valid.de-en.en"
test_src="../dynet_nmt/data/test.de-en.de"
test_tgt="../dynet_nmt/data/test.de-en.en"
for temp in [0.6, 0.8]: # 0.75, 0.80, 0.85, 0.90, 0.95, 1.0
job_name = 'iwslt14.raml.512enc.corrupt_ngram.t%.3f' % temp
train_log = 'train.' + job_name + '.log'
model_name = 'model.' + job_name
job_file = 'scripts/train.%s.sh' % job_name
decode_file = job_name + '.test.en'
with open(job_file, 'w') as f:
f.write("""#!/bin/sh
python nmt.py \
--cuda \
--mode raml_train \
--vocab iwslt.vocab.bin \
--save_to models/{model_name} \
--valid_niter 15400 \
--valid_metric ppl \
--beam_size 5 \
--batch_size 10 \
--sample_size 10 \
--hidden_size 256 \
--embed_size 256 \
--uniform_init 0.1 \
--dropout 0.2 \
--clip_grad 5.0 \
--lr_decay 0.5 \
--temp {temp} \
--raml_sample_file samples.corrupt_ngram.bleu_score.txt \
--train_src {train_src} \
--train_tgt {train_tgt} \
--dev_src {dev_src} \
--dev_tgt {dev_tgt} 2>logs/{train_log}
python nmt.py \
--cuda \
--mode test \
--load_model models/{model_name}.bin \
--beam_size 5 \
--decode_max_time_step 100 \
--save_to_file decode/{decode_file} \
--test_src {test_src} \
--test_tgt {test_tgt}
echo "test result" >> logs/{train_log}
perl multi-bleu.perl {test_tgt} < decode/{decode_file} >> logs/{train_log}
""".format(model_name=model_name, temp=temp,
train_src=train_src, train_tgt=train_tgt,
dev_src=dev_src, dev_tgt=dev_tgt,
test_src=test_src, test_tgt=test_tgt,
train_log=train_log, decode_file=decode_file))
os.system('bash submit_job.sh %s' % job_file)
| 30.225806 | 74 | 0.640875 | import os
train_src="../dynet_nmt/data/train.de-en.de.wmixerprep"
train_tgt="../dynet_nmt/data/train.de-en.en.wmixerprep"
dev_src="../dynet_nmt/data/valid.de-en.de"
dev_tgt="../dynet_nmt/data/valid.de-en.en"
test_src="../dynet_nmt/data/test.de-en.de"
test_tgt="../dynet_nmt/data/test.de-en.en"
for temp in [0.6, 0.8]: # 0.75, 0.80, 0.85, 0.90, 0.95, 1.0
job_name = 'iwslt14.raml.512enc.corrupt_ngram.t%.3f' % temp
train_log = 'train.' + job_name + '.log'
model_name = 'model.' + job_name
job_file = 'scripts/train.%s.sh' % job_name
decode_file = job_name + '.test.en'
with open(job_file, 'w') as f:
f.write("""#!/bin/sh
python nmt.py \
--cuda \
--mode raml_train \
--vocab iwslt.vocab.bin \
--save_to models/{model_name} \
--valid_niter 15400 \
--valid_metric ppl \
--beam_size 5 \
--batch_size 10 \
--sample_size 10 \
--hidden_size 256 \
--embed_size 256 \
--uniform_init 0.1 \
--dropout 0.2 \
--clip_grad 5.0 \
--lr_decay 0.5 \
--temp {temp} \
--raml_sample_file samples.corrupt_ngram.bleu_score.txt \
--train_src {train_src} \
--train_tgt {train_tgt} \
--dev_src {dev_src} \
--dev_tgt {dev_tgt} 2>logs/{train_log}
python nmt.py \
--cuda \
--mode test \
--load_model models/{model_name}.bin \
--beam_size 5 \
--decode_max_time_step 100 \
--save_to_file decode/{decode_file} \
--test_src {test_src} \
--test_tgt {test_tgt}
echo "test result" >> logs/{train_log}
perl multi-bleu.perl {test_tgt} < decode/{decode_file} >> logs/{train_log}
""".format(model_name=model_name, temp=temp,
train_src=train_src, train_tgt=train_tgt,
dev_src=dev_src, dev_tgt=dev_tgt,
test_src=test_src, test_tgt=test_tgt,
train_log=train_log, decode_file=decode_file))
os.system('bash submit_job.sh %s' % job_file)
| 0 | 0 | 0 |
cd38b22d3922c73b19709343da9d3b773aea1fa9 | 2,509 | py | Python | utils/ranking/least_confidence_softmax.py | kkontras/Sleep_net | a6a83d4624989cc8a79238e491da06dc22d562b8 | [
"MIT"
] | 1 | 2022-02-22T02:40:41.000Z | 2022-02-22T02:40:41.000Z | utils/ranking/least_confidence_softmax.py | kkontras/Sleep_net | a6a83d4624989cc8a79238e491da06dc22d562b8 | [
"MIT"
] | null | null | null | utils/ranking/least_confidence_softmax.py | kkontras/Sleep_net | a6a83d4624989cc8a79238e491da06dc22d562b8 | [
"MIT"
] | null | null | null | import numpy as np
def calculate_probs(predicted_classes, num_classes):
'''
This function is to calculate the probabilities for each class given the softmax output
:param predicted_classes: matrix num_datapoints X num_ensembles (or dropout_iterations)
:param num_classes:
:return: For each datapoint it returns a vector with 10 elements, corresponding to the prob of each class
'''
probs = np.mean(predicted_classes,axis = 1)
return probs | 41.816667 | 109 | 0.730969 | import numpy as np
def least_conf(data,num_classes):
num_labels = float(num_classes)
least_conf_ranks = []
prob_dist = calculate_probs(data,num_labels)
simple_least_conf = np.nanmax(prob_dist) # most confident prediction, ignoring NaNs
normalized_least_conf = (1 - simple_least_conf) * (num_labels / (num_labels - 1))
least_conf_ranks.append(normalized_least_conf)
return np.array(least_conf_ranks)
def margin_conf(data,num_classes):
num_labels = float(num_classes)
margin_conf_ranks = []
prob_dist = calculate_probs(data, num_labels)
prob_dist[::-1].sort() # sort probs so that largest is at prob_dist[0]
difference = (prob_dist[0] - prob_dist[1])
margin_conf = 1 - difference
margin_conf_ranks.append(margin_conf)
return np.array(margin_conf_ranks)
def ratio_conf(data,num_classes):
num_labels = float(num_classes)
ratio_conf_ranks = []
prob_dist = calculate_probs(data, num_labels)
prob_dist[::-1].sort() # sort probs so that largest is at prob_dist[0]
ratio_conf = prob_dist[1] / prob_dist[0]
ratio_conf_ranks.append(ratio_conf)
return np.array(ratio_conf_ranks)
def entropy_conf(data,num_classes):
num_labels = float(num_classes)
entropy_conf_ranks = []
prob_dist = calculate_probs(data, num_labels)
log_probs = prob_dist * np.log2(prob_dist+0.00001) # multiply each probability by its base 2 log
raw_entropy = 0 - np.sum(log_probs)
normalized_entropy = raw_entropy / np.log2(prob_dist.size)
entropy_conf_ranks.append(normalized_entropy)
return np.array(entropy_conf_ranks)
def bald_conf(data,num_classes):
# num_labels = float(num_classes)
bald_conf_ranks = []
expected_entropy = - np.mean(np.sum(data * np.log(data + 1e-10), axis=-1), axis=0) # [batch size]
expected_p = data
entropy_expected_p = - np.sum(expected_p * np.log(expected_p + 1e-10), axis=-1) # [batch size]
BALD_acq = entropy_expected_p - expected_entropy
bald_conf_ranks.append(BALD_acq)
return np.array(bald_conf_ranks)
def calculate_probs(predicted_classes, num_classes):
'''
This function is to calculate the probabilities for each class given the softmax output
:param predicted_classes: matrix num_datapoints X num_ensembles (or dropout_iterations)
:param num_classes:
:return: For each datapoint it returns a vector with 10 elements, corresponding to the prob of each class
'''
probs = np.mean(predicted_classes,axis = 1)
return probs | 1,922 | 0 | 115 |
e45eecc040c259a441c1826aebf97467c4f6b867 | 4,669 | py | Python | awards/models.py | OscarGichana/awards | f07d67d6ba210753a6559ca2584a14c8596200d7 | [
"Unlicense"
] | null | null | null | awards/models.py | OscarGichana/awards | f07d67d6ba210753a6559ca2584a14c8596200d7 | [
"Unlicense"
] | null | null | null | awards/models.py | OscarGichana/awards | f07d67d6ba210753a6559ca2584a14c8596200d7 | [
"Unlicense"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
import datetime as dt
from django.contrib.auth.mixins import LoginRequiredMixin
from django.dispatch import receiver
from django.db.models.signals import (post_save,pre_save,)
# from PIL import Image
from django.core.files import File
from django.dispatch import receiver
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
from phonenumber_field.modelfields import PhoneNumberField
import numpy as np
from django.db.models import Avg, Max, Min
# Create your models here.
post_save.connect(create_profile, sender = User)
| 32.880282 | 163 | 0.699293 | from __future__ import unicode_literals
from django.db import models
import datetime as dt
from django.contrib.auth.mixins import LoginRequiredMixin
from django.dispatch import receiver
from django.db.models.signals import (post_save,pre_save,)
# from PIL import Image
from django.core.files import File
from django.dispatch import receiver
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
from phonenumber_field.modelfields import PhoneNumberField
import numpy as np
from django.db.models import Avg, Max, Min
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
first_name = models.CharField(max_length = 60,null=True,blank=True)
last_name = models.CharField(max_length = 60,null=True,blank=True)
pic = CloudinaryField('pic',null=True)
bio = models.TextField(null=True,blank=True)
likes = models.IntegerField(default=0)
email = models.EmailField(null=True)
phone_number = PhoneNumberField(null=True)
def get_total_likes(self):
return self.likes.user.count()
@classmethod
def update_profile(cls, id, email, phone_number, first_name, last_name, bio, pic):
profile = cls.objects.filter(id = id).update(pic = pic, id = id, first_name=first_name, last_name=last_name,bio=bio,phone_number=phone_number, email=email)
return update
def __str__(self):
return str(self.user.username)
class Meta:
ordering = ['first_name']
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def create_profile(sender, instance, created, **kwargs):
if created: Profile.objects.create(user=instance)
post_save.connect(create_profile, sender = User)
class Project(models.Model):
title = models.CharField(max_length = 60)
pic = CloudinaryField('pic',null=True)
description = models.TextField()
link = models.URLField(max_length = 300)
@classmethod
def search_projects(cls, search_term):
projects = cls.objects.filter(title__icontains=search_term)
return projects
def save_project(self):
self.save()
def delete_project(self):
self.delete()
@classmethod
def update_project(cls, id, caption):
update = cls.objects.filter(id = id).update(description = description)
# return update
@classmethod
def get_all_projects(cls):
projects = cls.objects.all()
return projects
@classmethod
def get_project_by_id(cls,id):
project = cls.objects.filter(id= id).all()
return project
def average_design(self):
design_ratings = list(map(lambda x: x.design_rating, self.reviews.all()))
return np.mean(design_ratings)
def average_usability(self):
usability_ratings = list(map(lambda x: x.usability_rating, self.reviews.all()))
return np.mean(usability_ratings)
def average_content(self):
content_ratings = list(map(lambda x: x.content_rating, self.reviews.all()))
return np.mean(content_ratings)
def get_total_likes(self):
return self.likes.users.count()
def __str__(self):
return self.title
class Meta:
ordering = ['title']
class Review(models.Model):
RATING_CHOICES = ((1, '1'),(2, '2'),(3, '3'),(4, '4'),(5, '5'),(6, '6'),(7, '7'),(8, '8'),(9, '9'),(10, '10'),)
project = models.ForeignKey(Project, null=True, blank=True, on_delete=models.CASCADE, related_name="reviews")
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE, related_name='reviews')
comment = models.TextField()
design_rating = models.IntegerField(choices=RATING_CHOICES, default=0)
usability_rating = models.IntegerField(choices=RATING_CHOICES, default=0)
content_rating = models.IntegerField(choices=RATING_CHOICES, default=0)
def save_comment(self):
self.save()
def get_comment(self, id):
comments = Review.objects.filter(project_id =id)
return comments
def __str__(self):
return self.comment
class MoringaMerch(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
first_name = models.CharField(max_length = 60,null=True,blank=True)
pic = CloudinaryField('pic',null=True)
bio = models.TextField(null=True,blank=True)
email = models.EmailField(null=True)
class AwardsProject(models.Model):
title = models.CharField(max_length = 60)
pic = CloudinaryField('pic',null=True)
description = models.TextField()
link = models.URLField(max_length = 300)
| 1,490 | 2,407 | 137 |
b1934e332538a24404e96910b363154f4b60ff79 | 1,127 | py | Python | deeplab_features.py | gmum/proto-segmentation | e84e9e8e91711f664ffd1db26c8dabc111d17fdc | [
"MIT"
] | 1 | 2022-02-26T17:10:05.000Z | 2022-02-26T17:10:05.000Z | deeplab_features.py | gmum/proto-segmentation | e84e9e8e91711f664ffd1db26c8dabc111d17fdc | [
"MIT"
] | null | null | null | deeplab_features.py | gmum/proto-segmentation | e84e9e8e91711f664ffd1db26c8dabc111d17fdc | [
"MIT"
] | null | null | null | import torchvision
from torch import nn
def deeplabv3_resnet50_features(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Coco
"""
model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=pretrained)
model.classifier._modules = {k: model.classifier._modules[k] for k in list(model.classifier._modules.keys())[:-1]}
return DeeplabV3_features(model, [3, 4, 6, 3], **kwargs)
| 31.305556 | 118 | 0.662822 | import torchvision
from torch import nn
class DeeplabV3_features(nn.Module):
def __init__(self, model, layers, **kwargs):
super(DeeplabV3_features, self).__init__()
self.model = model
self.layers = layers
# comes from the first conv and the following max pool
self.kernel_sizes = [7, 3]
self.strides = [2, 2]
self.paddings = [3, 1]
def forward(self, *args, **kwargs):
result = self.model.forward(*args, **kwargs)
return result['out']
def conv_info(self):
return self.kernel_sizes, self.strides, self.paddings
def num_layers(self):
raise NotImplemented("TODO")
def deeplabv3_resnet50_features(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Coco
"""
model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=pretrained)
model.classifier._modules = {k: model.classifier._modules[k] for k in list(model.classifier._modules.keys())[:-1]}
return DeeplabV3_features(model, [3, 4, 6, 3], **kwargs)
| 484 | 15 | 130 |
0f9ecd4e1871405e5daa637649bc662fafc41edf | 2,539 | py | Python | src/globus_sdk/paging/base.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/globus_sdk/paging/base.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/globus_sdk/paging/base.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import abc
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
from globus_sdk.response import GlobusHTTPResponse
class Paginator(Iterable[GlobusHTTPResponse], metaclass=abc.ABCMeta):
"""
Base class for all paginators.
This guarantees is that they have generator methods named ``pages`` and ``items``.
Iterating on a Paginator is equivalent to iterating on its ``pages``.
:param method: A bound method of an SDK client, used to generate a paginated variant
:type method: callable
:param items_key: The key to use within pages of results to get an array of items
:type items_key: str
:param client_args: Arguments to the underlying method which are passed when the
paginator is instantiated. i.e. given ``client.paginated.foo(a, b, c=1)``, this
will be ``(a, b)``. The paginator will pass these arguments to each call of the
bound method as it pages.
:type client_args: tuple
:param client_kwargs: Keyword arguments to the underlying method, like
``client_args`` above. ``client.paginated.foo(a, b, c=1)`` will pass this as
``{"c": 1}``. As with ``client_args``, it's passed to each paginated call.
:type client_kwargs: dict
"""
@abc.abstractmethod
def pages(self) -> Iterator[GlobusHTTPResponse]:
"""``pages()`` yields GlobusHTTPResponse objects, each one representing a page
of results."""
def items(self) -> Iterator:
"""
``items()`` of a paginator is a generator which yields each item in each page of
results.
``items()`` may raise a ``ValueError`` if the paginator was constructed without
identifying a key for use within each page of results. This may be the case for
paginators whose pages are not primarily an array of data.
"""
if self.items_key is None:
raise ValueError(
"Cannot provide items() iteration on a paginator where 'items_key' "
"is not set."
)
for page in self.pages():
yield from page[self.items_key]
| 38.469697 | 88 | 0.648681 | import abc
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
from globus_sdk.response import GlobusHTTPResponse
class Paginator(Iterable[GlobusHTTPResponse], metaclass=abc.ABCMeta):
"""
Base class for all paginators.
This guarantees is that they have generator methods named ``pages`` and ``items``.
Iterating on a Paginator is equivalent to iterating on its ``pages``.
:param method: A bound method of an SDK client, used to generate a paginated variant
:type method: callable
:param items_key: The key to use within pages of results to get an array of items
:type items_key: str
:param client_args: Arguments to the underlying method which are passed when the
paginator is instantiated. i.e. given ``client.paginated.foo(a, b, c=1)``, this
will be ``(a, b)``. The paginator will pass these arguments to each call of the
bound method as it pages.
:type client_args: tuple
:param client_kwargs: Keyword arguments to the underlying method, like
``client_args`` above. ``client.paginated.foo(a, b, c=1)`` will pass this as
``{"c": 1}``. As with ``client_args``, it's passed to each paginated call.
:type client_kwargs: dict
"""
def __init__(
self,
method: Callable,
*,
items_key: Optional[str] = None,
client_args: List[Any],
client_kwargs: Dict[str, Any]
):
self.method = method
self.items_key = items_key
self.client_args = client_args
self.client_kwargs = client_kwargs
def __iter__(self) -> Iterator[GlobusHTTPResponse]:
yield from self.pages()
@abc.abstractmethod
def pages(self) -> Iterator[GlobusHTTPResponse]:
"""``pages()`` yields GlobusHTTPResponse objects, each one representing a page
of results."""
def items(self) -> Iterator:
"""
``items()`` of a paginator is a generator which yields each item in each page of
results.
``items()`` may raise a ``ValueError`` if the paginator was constructed without
identifying a key for use within each page of results. This may be the case for
paginators whose pages are not primarily an array of data.
"""
if self.items_key is None:
raise ValueError(
"Cannot provide items() iteration on a paginator where 'items_key' "
"is not set."
)
for page in self.pages():
yield from page[self.items_key]
| 369 | 0 | 54 |
4d26fa18f6869f7bce8e81bfdd5a86f5eabc9619 | 651 | py | Python | pysurf/__init__.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
] | 7 | 2020-10-28T13:46:08.000Z | 2021-05-27T06:41:56.000Z | pysurf/__init__.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
] | 2 | 2020-10-27T19:15:12.000Z | 2020-10-27T19:15:25.000Z | pysurf/__init__.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
] | 2 | 2021-04-15T05:54:30.000Z | 2022-02-08T00:10:10.000Z | # -*- coding: utf-8 -*-
"""Top-level package for pysurf."""
__author__ = """Maximilian F.S.J. Menger, Johannes Ehrmaier"""
__email__ = 'menger.maximilian@gmail.com'
__version__ = '0.1.0'
#
import os
#
from colt import PluginLoader
from .spp.spp import SurfacePointProvider
from .spp import AbinitioBase, Model, Interpolator
# load plugins
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
core_plugins = os.path.join(base, "core_plugins")
user_plugins = os.path.join(base, "plugins")
# load core plugins
PluginLoader(core_plugins, ignorefile='plugins.ini')
# load user plugins
PluginLoader(user_plugins, ignorefile='plugins.ini')
| 28.304348 | 66 | 0.751152 | # -*- coding: utf-8 -*-
"""Top-level package for pysurf."""
__author__ = """Maximilian F.S.J. Menger, Johannes Ehrmaier"""
__email__ = 'menger.maximilian@gmail.com'
__version__ = '0.1.0'
#
import os
#
from colt import PluginLoader
from .spp.spp import SurfacePointProvider
from .spp import AbinitioBase, Model, Interpolator
# load plugins
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
core_plugins = os.path.join(base, "core_plugins")
user_plugins = os.path.join(base, "plugins")
# load core plugins
PluginLoader(core_plugins, ignorefile='plugins.ini')
# load user plugins
PluginLoader(user_plugins, ignorefile='plugins.ini')
| 0 | 0 | 0 |
0cb4e9f9ef1c4a23223d6fde648bc00b342cdbc2 | 1,363 | py | Python | docker/dockerfile/genie-parser/docker_app_run/modules/command_parse/command_parse.py | btr1975/automation-framework | b0ba661cb6bae193bd5c6531c08d9dba55c4099e | [
"MIT"
] | 8 | 2021-06-02T23:08:40.000Z | 2022-02-11T16:50:24.000Z | docker/dockerfile/genie-parser/docker_app_run/modules/command_parse/command_parse.py | btr1975/automation-framework | b0ba661cb6bae193bd5c6531c08d9dba55c4099e | [
"MIT"
] | null | null | null | docker/dockerfile/genie-parser/docker_app_run/modules/command_parse/command_parse.py | btr1975/automation-framework | b0ba661cb6bae193bd5c6531c08d9dba55c4099e | [
"MIT"
] | 2 | 2021-09-30T14:46:03.000Z | 2021-11-14T23:47:35.000Z | """
This holds functionality to get commands, and parse commands
"""
from quick_netmiko import QuickNetmiko
from pyats_genie_command_parse import GenieCommandParse
def command_parse(python_dict, fifo_queue, thread_lock): # pylint: disable=inconsistent-return-statements
"""Function to get and parse commands from devices
:type python_dict: Dict
:param python_dict: A dictionary of connection data
:type fifo_queue: queue.Queue Object
:param fifo_queue: The FIFO queue
:type thread_lock: threading.Lock Object
:param thread_lock: The thread lock
:rtype: None
:returns: None, but it does put a item in the fifo_queue
"""
with thread_lock:
allowed_device_types = {'ios', 'iosxe', 'iosxr', 'nxos'}
if python_dict.get('device_type') not in allowed_device_types:
return None
command = python_dict.get('command')
netmiko_obj = QuickNetmiko(python_dict.get('device_ip_name'), python_dict.get('device_type'),
python_dict.get('username'), python_dict.get('password'))
command_result = netmiko_obj.send_commands(command)
genie_parse_obj = GenieCommandParse(python_dict.get('device_type'))
parse_result = genie_parse_obj.parse_string(command, command_result)
fifo_queue.put((parse_result, command_result))
| 33.243902 | 106 | 0.707263 | """
This holds functionality to get commands, and parse commands
"""
from quick_netmiko import QuickNetmiko
from pyats_genie_command_parse import GenieCommandParse
def command_parse(python_dict, fifo_queue, thread_lock): # pylint: disable=inconsistent-return-statements
"""Function to get and parse commands from devices
:type python_dict: Dict
:param python_dict: A dictionary of connection data
:type fifo_queue: queue.Queue Object
:param fifo_queue: The FIFO queue
:type thread_lock: threading.Lock Object
:param thread_lock: The thread lock
:rtype: None
:returns: None, but it does put a item in the fifo_queue
"""
with thread_lock:
allowed_device_types = {'ios', 'iosxe', 'iosxr', 'nxos'}
if python_dict.get('device_type') not in allowed_device_types:
return None
command = python_dict.get('command')
netmiko_obj = QuickNetmiko(python_dict.get('device_ip_name'), python_dict.get('device_type'),
python_dict.get('username'), python_dict.get('password'))
command_result = netmiko_obj.send_commands(command)
genie_parse_obj = GenieCommandParse(python_dict.get('device_type'))
parse_result = genie_parse_obj.parse_string(command, command_result)
fifo_queue.put((parse_result, command_result))
| 0 | 0 | 0 |
e79847edbdbcc10ef24602c8316e5826238d9256 | 31,015 | py | Python | modules/commons/transformer.py | leminhnguyen/NATSpeech | 66b7b5c27b43523952b4edf1413d7cedb8c9310e | [
"MIT"
] | 561 | 2022-02-13T04:57:38.000Z | 2022-03-28T03:16:15.000Z | modules/commons/transformer.py | zjumml/NATSpeech | b1cf33e336a69e8550953bf8091e1b5ac6c0608e | [
"MIT"
] | 9 | 2022-02-14T05:17:11.000Z | 2022-03-31T02:06:13.000Z | modules/commons/transformer.py | zjumml/NATSpeech | b1cf33e336a69e8550953bf8091e1b5ac6c0608e | [
"MIT"
] | 51 | 2022-02-13T04:50:36.000Z | 2022-03-25T23:22:35.000Z | import math
import torch
from torch import nn
from torch.nn import Parameter, Linear
from modules.commons.layers import LayerNorm, Embedding
from utils.nn.seq_utils import get_incremental_state, set_incremental_state, softmax, make_positions
import torch.nn.functional as F
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
| 41.463904 | 115 | 0.591391 | import math
import torch
from torch import nn
from torch.nn import Parameter, Linear
from modules.commons.layers import LayerNorm, Embedding
from utils.nn.seq_utils import get_incremental_state, set_incremental_state, softmax, make_positions
import torch.nn.functional as F
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class TransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
self.act = act
if padding == 'SAME':
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
elif padding == 'LEFT':
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size)
)
self.ffn_2 = Linear(filter_size, hidden_size)
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
prev_input = saved_state['prev_input']
x = torch.cat((prev_input, x), dim=0)
x = x[-self.kernel_size:]
saved_state['prev_input'] = x
self._set_input_buffer(incremental_state, saved_state)
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = x * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
if self.act == 'gelu':
x = F.gelu(x)
if self.act == 'relu':
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
def _get_input_buffer(self, incremental_state):
return get_incremental_state(
self,
incremental_state,
'f',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
set_incremental_state(
self,
incremental_state,
'f',
buffer,
)
def clear_buffer(self, incremental_state):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
del saved_state['prev_input']
self._set_input_buffer(incremental_state, saved_state)
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
'value to be of the same size'
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
self.last_attn_probs = None
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query, key, value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None,
reset_attn_weight=None
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
if static_kv:
key_padding_mask = prev_key_padding_mask
else:
key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if len(attn_mask.shape) == 2:
attn_mask = attn_mask.unsqueeze(0)
elif len(attn_mask.shape) == 3:
attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
bsz * self.num_heads, tgt_len, src_len)
attn_weights = attn_weights + attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
-1e8,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-1e8,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
if reset_attn_weight is not None:
if reset_attn_weight:
self.last_attn_probs = attn_probs.detach()
else:
assert self.last_attn_probs is not None
attn_probs = self.last_attn_probs
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[:self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim:2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim:]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def _get_input_buffer(self, incremental_state):
return get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
def clear_buffer(self, incremental_state=None):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
del saved_state['prev_key']
if 'prev_value' in saved_state:
del saved_state['prev_value']
self._set_input_buffer(incremental_state, saved_state)
class EncSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
relu_dropout=0.1, kernel_size=9, padding='SAME', act='gelu'):
super().__init__()
self.c = c
self.dropout = dropout
self.num_heads = num_heads
if num_heads > 0:
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False)
self.layer_norm2 = LayerNorm(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
if self.num_heads > 0:
residual = x
x = self.layer_norm1(x)
x, _, = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return x
class DecSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1,
kernel_size=9, act='gelu'):
super().__init__()
self.c = c
self.dropout = dropout
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
)
self.layer_norm2 = LayerNorm(c)
self.encoder_attn = MultiheadAttention(
c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
)
self.layer_norm3 = LayerNorm(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
attn_out=None,
reset_attn_weight=None,
**kwargs,
):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
self.layer_norm3.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
attn_logits = None
if encoder_out is not None or attn_out is not None:
residual = x
x = self.layer_norm2(x)
if encoder_out is not None:
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state,
'enc_dec_attn_constraint_mask'),
reset_attn_weight=reset_attn_weight
)
attn_logits = attn[1]
elif attn_out is not None:
x = self.encoder_attn.in_proj_v(attn_out)
if encoder_out is not None or attn_out is not None:
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm3(x)
x = self.ffn(x, incremental_state=incremental_state)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
return x, attn_logits
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
self.encoder_attn.clear_buffer(incremental_state)
self.ffn.clear_buffer(incremental_state)
def set_buffer(self, name, tensor, incremental_state):
return set_incremental_state(self, incremental_state, name, tensor)
class TransformerEncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.num_heads = num_heads
self.op = EncSALayer(
hidden_size, num_heads, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=kernel_size)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
class TransformerDecoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.num_heads = num_heads
self.op = DecSALayer(
hidden_size, num_heads, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=kernel_size)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
def clear_buffer(self, *args):
return self.op.clear_buffer(*args)
def set_buffer(self, *args):
return self.op.set_buffer(*args)
class FFTBlocks(nn.Module):
def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=0.0,
num_heads=2, use_pos_embed=True, use_last_norm=True,
use_pos_embed_alpha=True):
super().__init__()
self.num_layers = num_layers
embed_dim = self.hidden_size = hidden_size
self.dropout = dropout
self.use_pos_embed = use_pos_embed
self.use_last_norm = use_last_norm
if use_pos_embed:
self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
self.padding_idx = 0
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.hidden_size, self.dropout,
kernel_size=ffn_kernel_size, num_heads=num_heads)
for _ in range(self.num_layers)
])
if self.use_last_norm:
self.layer_norm = nn.LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
"""
:param x: [B, T, C]
:param padding_mask: [B, T]
:return: [B, T, C] or [L, B, T, C]
"""
padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
if self.use_pos_embed:
positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1) * nonpadding_mask_TB
hiddens = []
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
hiddens.append(x)
if self.use_last_norm:
x = self.layer_norm(x) * nonpadding_mask_TB
if return_hiddens:
x = torch.stack(hiddens, 0) # [L, T, B, C]
x = x.transpose(1, 2) # [L, B, T, C]
else:
x = x.transpose(0, 1) # [B, T, C]
return x
class FastSpeechEncoder(FFTBlocks):
def __init__(self, dict_size, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2,
dropout=0.0):
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
use_pos_embed=False, dropout=dropout) # use_pos_embed_alpha for compatibility
self.embed_tokens = Embedding(dict_size, hidden_size, 0)
self.embed_scale = math.sqrt(hidden_size)
self.padding_idx = 0
self.embed_positions = SinusoidalPositionalEmbedding(
hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
)
def forward(self, txt_tokens, attn_mask=None):
"""
:param txt_tokens: [B, T]
:return: {
'encoder_out': [B x T x C]
}
"""
encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
x = self.forward_embedding(txt_tokens) # [B, T, H]
if self.num_layers > 0:
x = super(FastSpeechEncoder, self).forward(x, encoder_padding_mask, attn_mask=attn_mask)
return x
def forward_embedding(self, txt_tokens):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(txt_tokens)
if self.use_pos_embed:
positions = self.embed_positions(txt_tokens)
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class FastSpeechDecoder(FFTBlocks):
def __init__(self, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2):
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
| 15,230 | 12,467 | 714 |
104170172bfc6376e2783ac34bbf0196bab87213 | 10,111 | py | Python | tests/test_point.py | pauljurczak/geometer | fbb7dd7219cc716c3ed94d390f6fa763ac1607ac | [
"MIT"
] | 83 | 2019-02-02T15:56:17.000Z | 2022-02-15T01:01:46.000Z | tests/test_point.py | pauljurczak/geometer | fbb7dd7219cc716c3ed94d390f6fa763ac1607ac | [
"MIT"
] | 54 | 2018-12-02T13:59:53.000Z | 2022-03-18T09:02:30.000Z | tests/test_point.py | pauljurczak/geometer | fbb7dd7219cc716c3ed94d390f6fa763ac1607ac | [
"MIT"
] | 11 | 2019-02-10T16:06:46.000Z | 2022-02-14T08:51:51.000Z | import numpy as np
from geometer import (
Point,
Line,
Plane,
PointCollection,
LineCollection,
PlaneCollection,
join,
meet,
is_perpendicular,
translation,
rotation,
)
| 27.777473 | 88 | 0.482742 | import numpy as np
from geometer import (
Point,
Line,
Plane,
PointCollection,
LineCollection,
PlaneCollection,
join,
meet,
is_perpendicular,
translation,
rotation,
)
class Test2D:
def test_join(self):
p = Point(1, 0)
q = Point(0, 1)
assert p.join(q) == Line(-1, -1, 1)
def test_meet(self):
l = Line(-1, -1, 2)
m = Line(1, -1, 0)
assert l.meet(m) == Point(1, 1)
def test_add(self):
p = Point(1, 0)
q = Point(0, 1)
assert p + q == Point(1, 1)
p = Point([1, 0, 0])
q = Point(0, 1)
assert 2 * p + 3 * q == Point(2, 3)
def test_parallel(self):
p = Point(0, 1)
q = Point(1, 1)
r = Point(0, 0)
l = Line(p, q)
m = l.parallel(through=r)
assert m == Line(0, 1, 0)
assert l.is_parallel(m)
def test_perpendicular(self):
p = Point(1, 1)
l = Line(1, 1, 0)
m = l.perpendicular(p)
assert m == Line(-1, 1, 0)
m = l.perpendicular(Point(0, 0))
assert m == Line(-1, 1, 0)
p = Point(1, 1, 0)
q = Point(0, 0, 1)
l = Line(p, q)
m = l.perpendicular(p)
assert is_perpendicular(l, m)
class Test3D:
def test_join(self):
p1 = Point(1, 1, 0)
p2 = Point(2, 1, 0)
p3 = Point(3, 4, 0)
p4 = Point(0, 2, 0)
# 3 points
assert join(p1, p2, p3).contains(p4)
# 2 points
l = p1.join(p2)
assert l.contains(Point(3, 1, 0))
# two lines
m = Line(Point(0, 0, 0), Point(1, 2, 0))
assert join(l, m) == Plane(0, 0, 1, 0)
# point and line
p = join(l, p3)
assert p.contains(p4)
def test_meet(self):
p1 = Plane(1, 0, 0, 0)
p2 = Plane(0, 0, 1, 0)
p3 = Plane(0, 1, 0, 0)
# three planes
assert meet(p1, p2, p3) == Point(0, 0, 0)
# two planes
l = p1.meet(p2)
m = Line(Point(0, 0, 0), Point(0, 1, 0))
assert l == m
# two lines
m = Line(Point(0, 0, 0), Point(1, 2, 5))
assert l.meet(m) == Point(0, 0, 0)
# plane and line
assert p3.meet(l) == Point(0, 0, 0)
def test_contains(self):
p1 = Point(1, 1, 0)
p2 = Point(2, 1, 0)
p3 = Point(3, 4, 0)
p4 = Point(0, 2, 0)
p = Plane(p1, p2, p3)
l = Line(p1, p2)
assert p.contains(p4)
assert p.contains(l)
def test_is_coplanar(self):
l = Line(Point(1, 1, 0), Point(2, 1, 0))
m = Line(Point(0, 0, 0), Point(1, 2, 0))
assert l.is_coplanar(m)
def test_project(self):
p1 = Point(1, 1, 0)
p2 = Point(2, 1, 0)
l = Line(p1, p2)
assert l.project(Point(0, 0, 0)) == Point(0, 1, 0)
e = Plane(0, 0, 1, 0)
assert e.project(Point(1, 1, 5)) == p1
def test_parallel(self):
p = Point(0, 0, 1)
q = Point(1, 0, 1)
r = Point(0, 1, 1)
e = Plane(p, q, r)
f = e.parallel(through=Point(0, 0, 0))
assert f == Plane(0, 0, 1, 0)
assert e.is_parallel(f)
def test_perpendicular(self):
p = Point(1, 1, 0)
q = Point(0, 0, 1)
r = Point(1, 2, 3)
l = Line(p, q)
m = l.perpendicular(p)
assert l.meet(m) == p
assert is_perpendicular(l, m)
m = l.perpendicular(r)
assert is_perpendicular(l, m)
e = Plane(l, r)
m = e.perpendicular(p)
assert e.meet(m) == p
assert is_perpendicular(l, m)
m = e.perpendicular(p + m.direction)
assert e.meet(m) == p
assert is_perpendicular(l, m)
f = e.perpendicular(l)
assert e.meet(f) == l
assert is_perpendicular(e, f)
class Test4D:
def test_join(self):
p1 = Point(1, 1, 4, 0)
p2 = Point(2, 1, 5, 0)
p3 = Point(3, 4, 6, 0)
p4 = Point(0, 2, 7, 0)
p5 = Point(1, 5, 8, 0)
# 4 points
assert join(p1, p2, p3, p4).contains(p5)
# 3 points
assert join(p1, p2, p3).contains(p3)
# two lines
l = Line(p1, p2)
m = Line(p3, p4)
assert join(l, m) == Plane(p1, p2, p3, p4)
# coplanar lines
l = Line(p1, p2)
m = Line(p1, p3)
assert join(l, m).contains(p3)
# point and line
p = join(l, p3)
assert p == join(p1, p2, p3)
# 2 points
l = p1.join(p2)
assert l.contains(Point(3, 1, 6, 0))
def test_meet(self):
p1 = Plane(1, 0, 0, 0, 0)
p2 = Plane(0, 1, 0, 0, 0)
p3 = Plane(0, 0, 1, 0, 0)
p4 = Plane(0, 0, 0, 1, 0)
# four hyperplanes
assert meet(p1, p2, p3, p4) == Point(0, 0, 0, 0)
# hyperplane and line
l = Line(Point(0, 0, 0, 0), Point(0, 0, 1, 0))
assert p3.meet(l) == Point(0, 0, 0, 0)
# two lines
m = Line(Point(0, 0, 0, 0), Point(1, 2, 5, 6))
assert l.meet(m) == Point(0, 0, 0, 0)
def test_project(self):
p1 = Point(1, 0, 0, 0)
p2 = Point(0, 1, 0, 0)
l = Line(p1, p2)
assert l.project(Point(0, 0, 0, 0)) == Point(0.5, 0.5, 0, 0)
class TestCollections:
def test_join(self):
# 2 points
a = PointCollection([Point(0, 0), Point(0, 1)])
b = PointCollection([Point(1, 0), Point(1, 1)])
assert a.join(b) == LineCollection([Line(0, 1, 0), Line(0, 1, -1)])
# 3 points
a = PointCollection([Point(0, 0, 0), Point(0, 0, 1)])
b = PointCollection([Point(1, 0, 0), Point(1, 0, 1)])
c = PointCollection([Point(0, 1, 0), Point(0, 1, 1)])
assert join(a, b, c) == PlaneCollection([Plane(0, 0, 1, 0), Plane(0, 0, 1, -1)])
# two lines
l = a.join(b)
m = a.join(c)
assert join(l, m) == PlaneCollection([Plane(0, 0, 1, 0), Plane(0, 0, 1, -1)])
# point and line
assert join(a, b.join(c)) == PlaneCollection(
[Plane(0, 0, 1, 0), Plane(0, 0, 1, -1)]
)
def test_meet(self):
# three planes
a = PlaneCollection([Plane(1, 0, 0, 0), Plane(1, 0, 0, -1)])
b = PlaneCollection([Plane(0, 1, 0, 0), Plane(0, 1, 0, -1)])
c = PlaneCollection([Plane(0, 0, 1, 0), Plane(0, 0, 1, -1)])
assert meet(a, b, c) == PointCollection([Point(0, 0, 0), Point(1, 1, 1)])
# two planes
l = a.meet(b)
m = LineCollection(
[Line(Point(0, 0, 0), Point(0, 0, 1)), Line(Point(1, 1, 0), Point(1, 1, 1))]
)
assert l == m
# two lines in 2D
a = LineCollection([Line(0, 1, 0), Line(0, 1, -1)])
b = LineCollection([Line(1, 0, 0), Line(1, 0, -1)])
assert a.meet(b) == PointCollection([Point(0, 0), Point(1, 1)])
# two lines in 3D
a = LineCollection(
[Line(Point(0, 0, 0), Point(0, 0, 1)), Line(Point(1, 0, 0), Point(1, 0, 1))]
)
b = LineCollection(
[Line(Point(0, 0, 0), Point(0, 1, 0)), Line(Point(1, 0, 0), Point(1, 1, 0))]
)
assert a.meet(b) == PointCollection([Point(0, 0, 0), Point(1, 0, 0)])
# plane and line
a = LineCollection(
[Line(Point(0, 0, 0), Point(0, 0, 1)), Line(Point(1, 0, 0), Point(1, 0, 1))]
)
b = PlaneCollection([Plane(0, 0, 1, 0), Plane(0, 0, 1, -1)])
assert a.meet(b) == PointCollection([Point(0, 0, 0), Point(1, 0, 1)])
def test_homogenize(self):
a = PointCollection([(0, 0), (0, 1)], homogenize=True)
b = PointCollection([Point(0, 0), Point(0, 1)])
assert a == b
def test_arithmetic(self):
a = PointCollection([Point(0, 1), Point(0, 1)])
b = PointCollection([Point(1, 0), Point(1, 0)])
c = PointCollection([Point(1, 1), Point(1, 1)])
assert a + b == c
assert a - c == -b
assert 2 * a + 2 * b == 2 * c
assert (2 * a + 2 * b) / 2 == c
assert a + Point(1, 0) == c
def test_transform(self):
a = PointCollection([(1, 0), (0, 1)], homogenize=True)
assert translation(1, 1) * a == PointCollection(
[(2, 1), (1, 2)], homogenize=True
)
assert rotation(np.pi / 2) * a == PointCollection(
[(0, 1), (-1, 0)], homogenize=True
)
def test_basis_matrix(self):
a = PlaneCollection([Plane(1, 0, 0, 0), Plane(0, 1, 0, 0), Plane(0, 0, 1, 0)])
assert a.basis_matrix.shape == (3, 3, 4)
assert np.allclose(np.matmul(a.basis_matrix, a.array[..., None]), 0)
def test_project(self):
p1 = PointCollection([(1, 1, 0), (1, 1, 5)], homogenize=True)
p2 = PointCollection([(2, 1, 0), (2, 1, 5)], homogenize=True)
p3 = PointCollection([(0, 0, 0), (0, 0, 5)], homogenize=True)
l = LineCollection(p1, p2)
assert l.project(p3) == PointCollection([(0, 1, 0), (0, 1, 5)], homogenize=True)
e = PlaneCollection([(0, 1, 0, -1), (0, 1, 0, -2)])
assert e.project(p3) == PointCollection([(0, 1, 0), (0, 2, 5)], homogenize=True)
def test_perpendicular(self):
p1 = PointCollection([(1, 1, 0), (1, 1, 5)], homogenize=True)
p2 = PointCollection([(2, 1, 0), (2, 1, 5)], homogenize=True)
p3 = PointCollection([(0, 0, 0), (0, 0, 5)], homogenize=True)
l = LineCollection(p1, p2)
m = l.perpendicular(p1)
assert l.meet(m) == p1
assert all(is_perpendicular(l, m))
m = l.perpendicular(
p3 + PointCollection([(1, 1, 0), (0, 0, 0)], homogenize=True)
)
assert all(is_perpendicular(l, m))
e = PlaneCollection(l, p3)
m = e.perpendicular(p1)
assert e.meet(m) == p1
assert all(is_perpendicular(l, m))
m = e.perpendicular(p1 + PointCollection([m.direction[0], Point(0, 0, 0)]))
assert e.meet(m) == p1
assert all(is_perpendicular(l, m))
f = e.perpendicular(l)
assert e.meet(f) == l
assert all(is_perpendicular(e, f))
| 9,210 | -23 | 709 |
e1cad1ebfe20d22de3794c61dbee9e731e565f93 | 959 | py | Python | src/infi/storagemodel/windows/device_helpers.py | Infinidat/infi.storagemodel | 81740970b5b1c0a691472f2e360d3a6e5c4d0875 | [
"Python-2.0",
"BSD-3-Clause"
] | 6 | 2015-07-29T11:22:36.000Z | 2019-01-22T19:07:42.000Z | src/infi/storagemodel/windows/device_helpers.py | Infinidat/infi.storagemodel | 81740970b5b1c0a691472f2e360d3a6e5c4d0875 | [
"Python-2.0",
"BSD-3-Clause"
] | null | null | null | src/infi/storagemodel/windows/device_helpers.py | Infinidat/infi.storagemodel | 81740970b5b1c0a691472f2e360d3a6e5c4d0875 | [
"Python-2.0",
"BSD-3-Clause"
] | 3 | 2015-01-05T13:55:38.000Z | 2018-07-07T05:05:36.000Z | from logging import getLogger
MPIO_BUS_DRIVER_INSTANCE_ID = u"Root\\MPIO\\0000".lower()
logger = getLogger(__name__)
| 26.638889 | 118 | 0.708029 | from logging import getLogger
MPIO_BUS_DRIVER_INSTANCE_ID = u"Root\\MPIO\\0000".lower()
logger = getLogger(__name__)
def is_disk_drive_managed_by_windows_mpio(disk_drive):
try:
return disk_drive.parent._instance_id.lower() == MPIO_BUS_DRIVER_INSTANCE_ID
except KeyError:
logger.debug("failed to get parent instance id for disk drive {!r}, assuming its not mpio".format(disk_drive))
return False
def safe_get_physical_drive_number(device):
try:
return device.get_physical_drive_number()
except KeyError:
logger.debug("failed to get physical drive number for {!r} ({!r})".format(device, device._device_object))
return -1
def is_disk_visible_in_device_manager(disk_drive):
try:
return not disk_drive.is_hidden()
except KeyError:
return False
def is_device_installed(device):
try:
device.hardware_ids
return True
except:
return False
| 745 | 0 | 92 |
01f29183e23e0bb3aad8b5b52c6dcadcb0b11833 | 4,070 | py | Python | analysis/post_fmriprep.py | VU-Cog-Sci/SB-ref | 6779fd5015aea49f37f47550dc6375ebe25c36f2 | [
"MIT"
] | null | null | null | analysis/post_fmriprep.py | VU-Cog-Sci/SB-ref | 6779fd5015aea49f37f47550dc6375ebe25c36f2 | [
"MIT"
] | null | null | null | analysis/post_fmriprep.py | VU-Cog-Sci/SB-ref | 6779fd5015aea49f37f47550dc6375ebe25c36f2 | [
"MIT"
] | null | null | null |
# extra processing after fmriprep, for all tasks
import os, json
import sys, glob
import re
import numpy as np
import pandas as pd
from utils import * #import script to use relevante functions
# define participant number and open json parameter file
if len(sys.argv)<2:
raise NameError('Please add subject number (ex:01) '
'as 1st argument in the command line!')
else:
sj = str(sys.argv[1]).zfill(2) #fill subject number with 0 in case user forgets
with open('analysis_params.json','r') as json_file:
analysis_params = json.load(json_file)
# define paths and list of files
filepath = glob.glob(os.path.join(analysis_params['fmriprep_dir'],'sub-{sj}'.format(sj=sj),'*','func/*'))
tasks = ['prf']#['fn','prf','soma','rlb','rli','rs']
for t,cond in enumerate(tasks):
# list of functional files
filename = [run for run in filepath if 'task-'+tasks[t] in run and 'fsaverage' in run and run.endswith('.func.gii')]
filename.sort()
# list of confounds
confounds = [run for run in filepath if 'task-'+tasks[t] in run and run.endswith('_desc-confounds_regressors.tsv')]
confounds.sort()
if not filename: # if list empty
print('Subject %s has no files for %s' %(sj,cond))
else:
TR = analysis_params["TR"]
# set output path for processed files
outpath = os.path.join(analysis_params['post_fmriprep_outdir'],tasks[t],'sub-{sj}'.format(sj=sj))
if not os.path.exists(outpath): # check if path to save median run exist
os.makedirs(outpath)
# make loop for length of filenames
for _,file in enumerate(filename):
# define hemisphere to plot
hemi='left' if '_hemi-L' in file else 'right'
# plot all steps as sanity check
#plot_tSNR(file,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
if cond in ('prf'): # if pRF we cut out first 7TRs from "raw file" to make further analysis better
file = crop_gii(file,analysis_params['crop_pRF_TR'],outpath)
# high pass filter all runs (savgoy-golay)
filt_gii,filt_gii_pth = highpass_gii(file,analysis_params['sg_filt_polyorder'],analysis_params['sg_filt_deriv'],
analysis_params['sg_filt_window_length'],outpath)
#plot_tSNR(filt_gii_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
if cond in ('prf','fn','soma'): # don't clean confounds for prf or fn.. doenst help retino maps(?)
clean_gii = filt_gii
clean_gii_pth = filt_gii_pth
else: #regress out confounds from data (not doing pca)
# to get run number, hence making sure that subtracting right confounds
run_str = '_run-'
run_num = os.path.split(file)[-1][os.path.split(file)[-1].index(run_str)+len(run_str):][0:2]
# confound for that run
conf = [tsv for _,tsv in enumerate(confounds) if run_str+run_num in os.path.split(tsv)[-1]][0]
# first sg filter them
filt_conf = highpass_pca_confounds(conf,analysis_params['nuisance_columns'],analysis_params['sg_filt_polyorder'],analysis_params['sg_filt_deriv'],
analysis_params['sg_filt_window_length'],TR,outpath)
# clean the counfounds from data
clean_gii, clean_gii_pth = clean_confounds(filt_gii_pth,filt_conf,outpath)
# do PSC
psc_data,psc_data_pth = psc_gii(clean_gii_pth,outpath, method='median')
#plot_tSNR(psc_data_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
# smooth it
smt_file, smt_pth = smooth_gii(psc_data_pth,outpath,fwhm=analysis_params['smooth_fwhm'])
#plot_tSNR(smt_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
| 38.396226 | 162 | 0.610565 |
# extra processing after fmriprep, for all tasks
import os, json
import sys, glob
import re
import numpy as np
import pandas as pd
from utils import * #import script to use relevante functions
# define participant number and open json parameter file
if len(sys.argv)<2:
raise NameError('Please add subject number (ex:01) '
'as 1st argument in the command line!')
else:
sj = str(sys.argv[1]).zfill(2) #fill subject number with 0 in case user forgets
with open('analysis_params.json','r') as json_file:
analysis_params = json.load(json_file)
# define paths and list of files
filepath = glob.glob(os.path.join(analysis_params['fmriprep_dir'],'sub-{sj}'.format(sj=sj),'*','func/*'))
tasks = ['prf']#['fn','prf','soma','rlb','rli','rs']
for t,cond in enumerate(tasks):
# list of functional files
filename = [run for run in filepath if 'task-'+tasks[t] in run and 'fsaverage' in run and run.endswith('.func.gii')]
filename.sort()
# list of confounds
confounds = [run for run in filepath if 'task-'+tasks[t] in run and run.endswith('_desc-confounds_regressors.tsv')]
confounds.sort()
if not filename: # if list empty
print('Subject %s has no files for %s' %(sj,cond))
else:
TR = analysis_params["TR"]
# set output path for processed files
outpath = os.path.join(analysis_params['post_fmriprep_outdir'],tasks[t],'sub-{sj}'.format(sj=sj))
if not os.path.exists(outpath): # check if path to save median run exist
os.makedirs(outpath)
# make loop for length of filenames
for _,file in enumerate(filename):
# define hemisphere to plot
hemi='left' if '_hemi-L' in file else 'right'
# plot all steps as sanity check
#plot_tSNR(file,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
if cond in ('prf'): # if pRF we cut out first 7TRs from "raw file" to make further analysis better
file = crop_gii(file,analysis_params['crop_pRF_TR'],outpath)
# high pass filter all runs (savgoy-golay)
filt_gii,filt_gii_pth = highpass_gii(file,analysis_params['sg_filt_polyorder'],analysis_params['sg_filt_deriv'],
analysis_params['sg_filt_window_length'],outpath)
#plot_tSNR(filt_gii_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
if cond in ('prf','fn','soma'): # don't clean confounds for prf or fn.. doenst help retino maps(?)
clean_gii = filt_gii
clean_gii_pth = filt_gii_pth
else: #regress out confounds from data (not doing pca)
# to get run number, hence making sure that subtracting right confounds
run_str = '_run-'
run_num = os.path.split(file)[-1][os.path.split(file)[-1].index(run_str)+len(run_str):][0:2]
# confound for that run
conf = [tsv for _,tsv in enumerate(confounds) if run_str+run_num in os.path.split(tsv)[-1]][0]
# first sg filter them
filt_conf = highpass_pca_confounds(conf,analysis_params['nuisance_columns'],analysis_params['sg_filt_polyorder'],analysis_params['sg_filt_deriv'],
analysis_params['sg_filt_window_length'],TR,outpath)
# clean the counfounds from data
clean_gii, clean_gii_pth = clean_confounds(filt_gii_pth,filt_conf,outpath)
# do PSC
psc_data,psc_data_pth = psc_gii(clean_gii_pth,outpath, method='median')
#plot_tSNR(psc_data_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
# smooth it
smt_file, smt_pth = smooth_gii(psc_data_pth,outpath,fwhm=analysis_params['smooth_fwhm'])
#plot_tSNR(smt_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
| 0 | 0 | 0 |
96f4ff5906600259def1caf22571c3a1ea5953a4 | 141 | py | Python | velhot/admin.py | matiasmane/BWA | 1dd3e68362fafb40e615f1485f2cdf4ad74837af | [
"MIT"
] | null | null | null | velhot/admin.py | matiasmane/BWA | 1dd3e68362fafb40e615f1485f2cdf4ad74837af | [
"MIT"
] | null | null | null | velhot/admin.py | matiasmane/BWA | 1dd3e68362fafb40e615f1485f2cdf4ad74837af | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Profile, FriendRequest
admin.site.register(Profile)
admin.site.register(FriendRequest)
| 23.5 | 42 | 0.836879 | from django.contrib import admin
from .models import Profile, FriendRequest
admin.site.register(Profile)
admin.site.register(FriendRequest)
| 0 | 0 | 0 |
50bc082940abce65201e4248b53c055bb82cec0d | 1,032 | py | Python | three_variable_spin_example_lecture3.py | CornerstonesQC/Annealing_Lectures | 1fe2144fc436ced00550f7248f68196a4cf6b135 | [
"Apache-2.0"
] | 3 | 2021-08-13T17:46:49.000Z | 2021-09-19T20:20:03.000Z | three_variable_spin_example_lecture3.py | CornerstonesQC/Annealing_Lectures | 1fe2144fc436ced00550f7248f68196a4cf6b135 | [
"Apache-2.0"
] | null | null | null | three_variable_spin_example_lecture3.py | CornerstonesQC/Annealing_Lectures | 1fe2144fc436ced00550f7248f68196a4cf6b135 | [
"Apache-2.0"
] | 1 | 2021-12-15T13:09:43.000Z | 2021-12-15T13:09:43.000Z | # Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dimod
from dwave.system import DWaveSampler, EmbeddingComposite
# 1. Define sampler
sampler = EmbeddingComposite(DWaveSampler(solver={'topology__type': 'chimera'}))
# 2. Define problem: anti-ferromagnetic chain
# E = a*b + b*c + c*a
bqm = dimod.BQM({}, {'ab': 1, 'bc': 1, 'ca': 1}, 0, 'SPIN')
# 3. Submit problem and parameters to the solver
sampleset = sampler.sample(bqm, num_reads=10)
# 4. Evaluate the solution
print(sampleset) | 35.586207 | 80 | 0.736434 | # Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dimod
from dwave.system import DWaveSampler, EmbeddingComposite
# 1. Define sampler
sampler = EmbeddingComposite(DWaveSampler(solver={'topology__type': 'chimera'}))
# 2. Define problem: anti-ferromagnetic chain
# E = a*b + b*c + c*a
bqm = dimod.BQM({}, {'ab': 1, 'bc': 1, 'ca': 1}, 0, 'SPIN')
# 3. Submit problem and parameters to the solver
sampleset = sampler.sample(bqm, num_reads=10)
# 4. Evaluate the solution
print(sampleset) | 0 | 0 | 0 |
7ac1b83c12e4dc275421ff7b51521d7518892152 | 6,106 | py | Python | genius/loader.py | duanhongyi/genius | 1bb8a8facd786c59405eb1df982a2f86d7934d61 | [
"BSD-2-Clause"
] | 204 | 2015-01-03T14:00:24.000Z | 2022-01-14T13:25:16.000Z | genius/loader.py | Liweiyanm/genius | 1bb8a8facd786c59405eb1df982a2f86d7934d61 | [
"BSD-2-Clause"
] | 5 | 2017-08-18T03:08:48.000Z | 2018-12-27T07:51:56.000Z | genius/loader.py | Liweiyanm/genius | 1bb8a8facd786c59405eb1df982a2f86d7934d61 | [
"BSD-2-Clause"
] | 63 | 2015-04-08T17:25:24.000Z | 2022-02-10T08:18:32.000Z | #encode:utf-8
from __future__ import unicode_literals
import re
import os
from wapiti import Model
from genius.trie import TrieTree
from genius.word import Word
here = os.path.abspath(os.path.dirname(__file__))
library_path = os.path.join(here, 'library')
| 38.1625 | 78 | 0.523092 | #encode:utf-8
from __future__ import unicode_literals
import re
import os
from wapiti import Model
from genius.trie import TrieTree
from genius.word import Word
here = os.path.abspath(os.path.dirname(__file__))
library_path = os.path.join(here, 'library')
class ResourceLoader(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(
ResourceLoader, cls).__new__(cls, *args, **kwargs)
cls._instance._trie_tree = None
cls._instance._crf_seg_model = None
cls._instance._crf_pos_model = None
cls._instance._idf_table=None
cls._instance._break_table = None
cls._instance._break_regex_method = None
cls._instance._combine_regex_method = None
return cls._instance
def load_crf_seg_model(self, path=None, force=False):
if not self._crf_seg_model or force:
options = {}
if path:
options['model'] = path
else:
options['model'] = os.path.join(
library_path, "crf_seg_model.txt")
if os.path.exists(options['model']):
_crf_seg_model = Model(**options)
else:
e = IOError()
e.errno = 2
e.filename = options['model']
e.strerror = "No such file or directory"
raise e
self._crf_seg_model = _crf_seg_model
return self._crf_seg_model
def load_crf_pos_model(self, path=None, force=False):
if not self._crf_pos_model or force:
options = {}
if path:
options['model'] = path
else:
options['model'] = os.path.join(
library_path, "crf_pos_model.txt")
if os.path.exists(options['model']):
_crf_pos_model = Model(**options)
else:
e = IOError()
e.errno = 2
e.filename = options['model']
e.strerror = "No such file or directory"
raise e
self._crf_pos_model = _crf_pos_model
return self._crf_pos_model
def load_trie_tree(self, path=None, force=False):
if not self._trie_tree or force:
trie_tree = TrieTree()
if not path:
path = library_path
for node_path in os.listdir(path):
if not node_path.endswith('.dic'):
continue
node_path = os.sep.join([path, node_path])
with open(node_path, 'rb') as f:
for line in f:
word, tagging, freq = line.decode(
'utf8').strip().split('\t')
trie_tree.add(word, Word(
word,
freq=freq,
tagging=tagging,
source='dic',
))
self._trie_tree = trie_tree
return self._trie_tree
def load_idf_table(self, path=None, force=False):
if not self._idf_table or force:
if not path:
idf_path = os.path.join(library_path, "idf.txt")
else:
idf_path = path
tree = {}
if not os.path.exists(idf_path):
return
with open(idf_path, 'rb') as idf_file:
for line in idf_file:
label = line.decode("utf8").strip().split('\t')
tree[label[0]] = float(label[1])
self._idf_table = tree
return self._idf_table
def load_break_table(self, path=None, force=False):
if not self._break_table or force:
if not path:
break_idx = os.path.join(library_path, "break.txt")
else:
break_idx = path
tree = {}
if not os.path.exists(break_idx):
return
with open(break_idx, 'rb') as break_file:
for line in break_file:
label = line.decode("utf8").strip().split('\t')
tree[label[0]] = label[1:]
self._break_table = tree
return self._break_table
def load_break_regex_method(self, path=None, force=False):
if not self._break_regex_method or force:
_break_regex_list = []
if not path:
break_regex_path = os.path.join(library_path, "break.regex")
else:
break_regex_path = path
with open(break_regex_path, 'rb') as break_regex_file:
for line in break_regex_file:
regex = line.decode('unicode-escape').strip()
if not regex or regex.startswith('#'):
continue
_break_regex_list.append(regex)
pattern = u'|'.join(
[u'[%s]+[*?]*' % regex for regex in _break_regex_list])
pattern += u'|[^%s]+[*?]*' % u''.join(_break_regex_list)
self._break_regex_method = re.compile(pattern, re.UNICODE).findall
return self._break_regex_method
def load_combine_regex_method(self, path=None, force=False):
if not self._combine_regex_method or force:
_combine_regex_list = []
if not path:
combine_regex_path = os.path.join(
library_path, "combine.regex")
else:
combine_regex_path = path
with open(combine_regex_path, 'rb') as combine_regex_file:
for line in combine_regex_file:
regex = line.decode('unicode-escape').strip()
if not regex or regex.startswith('#'):
continue
_combine_regex_list.append(regex)
self._combine_regex_method = re.compile(
'|'.join(_combine_regex_list), re.UNICODE).match
return self._combine_regex_method
| 5,577 | 246 | 23 |
8e302637155982b751babacb88f34b8b60462607 | 440 | py | Python | lj2.py | liujing0608lj/spider | 8ef2223be8515a171e5bdc85c801a50cbc793d52 | [
"Apache-2.0"
] | null | null | null | lj2.py | liujing0608lj/spider | 8ef2223be8515a171e5bdc85c801a50cbc793d52 | [
"Apache-2.0"
] | null | null | null | lj2.py | liujing0608lj/spider | 8ef2223be8515a171e5bdc85c801a50cbc793d52 | [
"Apache-2.0"
] | null | null | null | import urllib.request
import urllib.parse
def search(parsmeters)
data = urllib.parse.urlencode(parameters)
print(data)
request_ = urllib.request.Request(url='http://www.baidu.com/s?'+data
,method="GET")
response = urllib.request.urlopen(request_)
print(response.url)
HTML=response.read().decode()
print(HTML)
with open("/home/ubuntu/Desktop/lj2.txt",mode='w') as f:
f.write(HTML)
def main():
pars={
"wd":"胡旺是个好人"
} | 20.952381 | 68 | 0.702273 | import urllib.request
import urllib.parse
def search(parsmeters)
data = urllib.parse.urlencode(parameters)
print(data)
request_ = urllib.request.Request(url='http://www.baidu.com/s?'+data
,method="GET")
response = urllib.request.urlopen(request_)
print(response.url)
HTML=response.read().decode()
print(HTML)
with open("/home/ubuntu/Desktop/lj2.txt",mode='w') as f:
f.write(HTML)
def main():
pars={
"wd":"胡旺是个好人"
} | 0 | 0 | 0 |
e52e41a92b61592b299ead3462229704147e12e3 | 17,865 | py | Python | streamalert_cli/athena/handler.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | streamalert_cli/athena/handler.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | streamalert_cli/athena/handler.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.classifier.clients import FirehoseClient
from streamalert.shared.utils import get_database_name, get_data_file_format
from streamalert.shared.alert import Alert
from streamalert.shared.athena import AthenaClient
from streamalert.shared.config import firehose_alerts_bucket, firehose_data_bucket
from streamalert.shared.logger import get_logger
from streamalert_cli.athena import helpers
from streamalert_cli.helpers import continue_prompt, record_to_schema
from streamalert_cli.utils import (
CLICommand,
generate_subparser,
set_parser_epilog,
UniqueSetAction
)
LOGGER = get_logger(__name__)
CREATE_TABLE_STATEMENT = ('CREATE EXTERNAL TABLE {table_name} ({schema}) '
'PARTITIONED BY (dt string) '
'{file_format} '
'LOCATION \'s3://{bucket}/{table_name}/\'')
STORE_FORMAT_JSON = ('ROW FORMAT SERDE \'org.openx.data.jsonserde.JsonSerDe\' '
'WITH SERDEPROPERTIES (\'ignore.malformed.json\' = \'true\')')
STORE_FORMAT_PARQUET = 'STORED AS PARQUET'
def get_athena_client(config):
"""Get an athena client using the current config settings
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
AthenaClient: instantiated client for performing athena actions
"""
prefix = config['global']['account']['prefix']
athena_config = config['lambda']['athena_partition_refresh_config']
db_name = get_database_name(config)
# Get the S3 bucket to store Athena query results
results_bucket = athena_config.get(
'results_bucket',
's3://{}-streamalert-athena-results'.format(prefix)
)
return AthenaClient(
db_name,
results_bucket,
'streamalert_cli',
region=config['global']['account']['region']
)
def rebuild_partitions(table, bucket, config):
"""Rebuild an Athena table's partitions
Steps:
- Get the list of current partitions
- Destroy existing table
- Re-create tables
- Re-create partitions
Args:
table (str): The name of the table being rebuilt
bucket (str): The s3 bucket to be used as the location for Athena data
table_type (str): The type of table being refreshed
Types of 'data' and 'alert' are accepted, but only 'data' is implemented
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
sanitized_table_name = FirehoseClient.firehose_log_name(table)
athena_client = get_athena_client(config)
# Get the current set of partitions
partitions = athena_client.get_table_partitions(sanitized_table_name)
if not partitions:
LOGGER.info('No partitions to rebuild for %s, nothing to do', sanitized_table_name)
return False
# Drop the table
LOGGER.info('Dropping table %s', sanitized_table_name)
if not athena_client.drop_table(sanitized_table_name):
return False
LOGGER.info('Creating table %s', sanitized_table_name)
# Re-create the table with previous partitions
if not create_table(table, bucket, config):
return False
new_partitions_statements = helpers.add_partition_statements(
partitions, bucket, sanitized_table_name)
LOGGER.info('Creating total %d new partitions for %s', len(partitions), sanitized_table_name)
for idx, statement in enumerate(new_partitions_statements):
success = athena_client.run_query(query=statement)
LOGGER.info('Rebuilt partitions part %d', idx+1)
if not success:
LOGGER.error('Error re-creating new partitions for %s', sanitized_table_name)
write_partitions_statements(new_partitions_statements, sanitized_table_name)
return False
LOGGER.info('Successfully rebuilt all partitions for %s', sanitized_table_name)
return True
def write_partitions_statements(statements, sanitized_table_name):
"""Write partitions statements to a file if re-creating new partitions failed"""
file_name = 'partitions_{}.txt'.format(sanitized_table_name)
LOGGER.error(
'Rebuild partitions failed, writing to local file with name %s',
file_name
)
with open(file_name, 'w') as partition_file:
partition_file.write(statements)
def drop_all_tables(config):
"""Drop all 'streamalert' Athena tables
Used when cleaning up an existing deployment
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not continue_prompt(message='Are you sure you want to drop all Athena tables?'):
return False
athena_client = get_athena_client(config)
if not athena_client.drop_all_tables():
LOGGER.error('Failed to drop one or more tables from database: %s', athena_client.database)
return False
LOGGER.info('Successfully dropped all tables from database: %s', athena_client.database)
return True
def _construct_create_table_statement(schema, table_name, bucket, file_format='parquet'):
"""Convert a dictionary based Athena schema to a Hive DDL statement
Args:
schema (dict): The sanitized Athena schema
table_name (str): The name of the Athena table to create
bucket (str): The S3 bucket containing the data
Returns:
str: The Hive DDL CREATE TABLE expression
"""
# Construct the main Athena Schema
schema_statement = []
for key_name in sorted(schema.keys()):
key_type = schema[key_name]
if isinstance(key_type, str):
schema_statement.append('{0} {1}'.format(key_name, key_type))
# Account for nested structs
elif isinstance(key_type, dict):
struct_schema = ', '.join(
'{0}:{1}'.format(sub_key, key_type[sub_key])
for sub_key in sorted(key_type.keys())
)
schema_statement.append('{0} struct<{1}>'.format(key_name, struct_schema))
return CREATE_TABLE_STATEMENT.format(
table_name=table_name,
schema=', '.join(schema_statement),
file_format=STORE_FORMAT_PARQUET if file_format == 'parquet' else STORE_FORMAT_JSON,
bucket=bucket)
def create_table(table, bucket, config, schema_override=None):
"""Create a 'streamalert' Athena table
Args:
table (str): The name of the table being rebuilt
bucket (str): The s3 bucket to be used as the location for Athena data
table_type (str): The type of table being refreshed
config (CLIConfig): Loaded StreamAlert config
schema_override (set): An optional set of key=value pairs to be used for
overriding the configured column_name=value_type.
Returns:
bool: False if errors occurred, True otherwise
"""
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
# Convert special characters in schema name to underscores
sanitized_table_name = FirehoseClient.firehose_log_name(table)
# Check that the log type is enabled via Firehose
if sanitized_table_name != 'alerts' and sanitized_table_name not in enabled_logs:
LOGGER.error('Table name %s missing from configuration or '
'is not enabled.', sanitized_table_name)
return False
athena_client = get_athena_client(config)
config_data_bucket = firehose_data_bucket(config)
if not config_data_bucket:
LOGGER.error('The \'firehose\' module is not enabled in global.json')
return False
# Check if the table exists
if athena_client.check_table_exists(sanitized_table_name):
LOGGER.info('The \'%s\' table already exists.', sanitized_table_name)
return True
if table == 'alerts':
# get a fake alert so we can get the keys needed and their types
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = helpers.logs_schema_to_athena_schema(schema)
# Use the bucket if supplied, otherwise use the default alerts bucket
bucket = bucket or firehose_alerts_bucket(config)
query = _construct_create_table_statement(
schema=athena_schema,
table_name=table,
bucket=bucket,
file_format=get_data_file_format(config)
)
else: # all other tables are log types
# Use the bucket if supplied, otherwise use the default data bucket
bucket = bucket or config_data_bucket
log_info = config['logs'][table.replace('_', ':', 1)]
schema = dict(log_info['schema'])
sanitized_schema = FirehoseClient.sanitize_keys(schema)
athena_schema = helpers.logs_schema_to_athena_schema(sanitized_schema)
# Add envelope keys to Athena Schema
configuration_options = log_info.get('configuration')
if configuration_options:
envelope_keys = configuration_options.get('envelope_keys')
if envelope_keys:
sanitized_envelope_key_schema = FirehoseClient.sanitize_keys(envelope_keys)
# Note: this key is wrapped in backticks to be Hive compliant
athena_schema['`streamalert:envelope_keys`'] = helpers.logs_schema_to_athena_schema(
sanitized_envelope_key_schema)
# Handle Schema overrides
# This is useful when an Athena schema needs to differ from the normal log schema
if schema_override:
for override in schema_override:
column_name, column_type = override.split('=')
# Columns are escaped to avoid Hive issues with special characters
column_name = '`{}`'.format(column_name)
if column_name in athena_schema:
athena_schema[column_name] = column_type
LOGGER.info('Applied schema override: %s:%s', column_name, column_type)
else:
LOGGER.error(
'Schema override column %s not found in Athena Schema, skipping',
column_name
)
query = _construct_create_table_statement(
schema=athena_schema,
table_name=sanitized_table_name,
bucket=bucket,
file_format=get_data_file_format(config)
)
success = athena_client.run_query(query=query)
if not success:
LOGGER.error('The %s table could not be created', sanitized_table_name)
return False
# Update the CLI config
if table != 'alerts' and bucket != config_data_bucket:
# Only add buckets to the config if they are not one of the default/configured buckets
# Ensure 'buckets' exists in the config (since it is not required)
config['lambda']['athena_partition_refresh_config']['buckets'] = (
config['lambda']['athena_partition_refresh_config'].get('buckets', {})
)
if bucket not in config['lambda']['athena_partition_refresh_config']['buckets']:
config['lambda']['athena_partition_refresh_config']['buckets'][bucket] = 'data'
config.write()
LOGGER.info('The %s table was successfully created!', sanitized_table_name)
return True
def create_log_tables(config):
"""Create all tables needed for historical search
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not config['global']['infrastructure'].get('firehose', {}).get('enabled'):
return True
firehose_config = config['global']['infrastructure']['firehose']
firehose_s3_bucket_suffix = firehose_config.get('s3_bucket_suffix', 'streamalert-data')
firehose_s3_bucket_name = '{}-{}'.format(config['global']['account']['prefix'],
firehose_s3_bucket_suffix)
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
for log_stream_name in enabled_logs:
if not create_table(log_stream_name, firehose_s3_bucket_name, config):
return False
return True
| 36.384929 | 100 | 0.646907 | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.classifier.clients import FirehoseClient
from streamalert.shared.utils import get_database_name, get_data_file_format
from streamalert.shared.alert import Alert
from streamalert.shared.athena import AthenaClient
from streamalert.shared.config import firehose_alerts_bucket, firehose_data_bucket
from streamalert.shared.logger import get_logger
from streamalert_cli.athena import helpers
from streamalert_cli.helpers import continue_prompt, record_to_schema
from streamalert_cli.utils import (
CLICommand,
generate_subparser,
set_parser_epilog,
UniqueSetAction
)
LOGGER = get_logger(__name__)
CREATE_TABLE_STATEMENT = ('CREATE EXTERNAL TABLE {table_name} ({schema}) '
'PARTITIONED BY (dt string) '
'{file_format} '
'LOCATION \'s3://{bucket}/{table_name}/\'')
STORE_FORMAT_JSON = ('ROW FORMAT SERDE \'org.openx.data.jsonserde.JsonSerDe\' '
'WITH SERDEPROPERTIES (\'ignore.malformed.json\' = \'true\')')
STORE_FORMAT_PARQUET = 'STORED AS PARQUET'
class AthenaCommand(CLICommand):
description = 'Perform actions related to Athena'
@classmethod
def setup_subparser(cls, subparser):
"""Add athena subparser: manage.py athena [subcommand]"""
athena_subparsers = subparser.add_subparsers(dest="athena subcommand", required=True)
cls._setup_athena_create_table_subparser(athena_subparsers)
cls._setup_athena_rebuild_subparser(athena_subparsers)
cls._setup_athena_drop_all_subparser(athena_subparsers)
@classmethod
def _setup_athena_create_table_subparser(cls, subparsers):
"""Add the athena create-table subparser: manage.py athena create-table [options]"""
athena_create_table_parser = generate_subparser(
subparsers,
'create-table',
description='Create an Athena table',
subcommand=True
)
set_parser_epilog(
athena_create_table_parser,
epilog=(
'''\
Examples:
manage.py athena create-table \\
--bucket s3.bucket.name \\
--table-name my_athena_table
'''
)
)
cls._add_default_athena_args(athena_create_table_parser)
# Validate the provided schema-override options
def _validate_override(val):
"""Make sure the input is in the format column_name=type"""
err = ('Invalid override expression [{}]. The proper format is '
'"column_name=value_type"').format(val)
if '=' not in val:
raise athena_create_table_parser.error(err)
if len(val.split('=')) != 2:
raise athena_create_table_parser.error(err)
athena_create_table_parser.add_argument(
'--schema-override',
nargs='+',
help=(
'Value types to override with new types in the log schema. '
'The provided input should be space-separated '
'directives like "column_name=value_type"'
),
action=UniqueSetAction,
default=set(),
type=_validate_override
)
@classmethod
def _setup_athena_rebuild_subparser(cls, subparsers):
"""
Add the athena rebuild-partitions subparser:
$ manage.py athena rebuild-partitions [options]
"""
athena_rebuild_parser = generate_subparser(
subparsers,
'rebuild-partitions',
description='Rebuild the partitions for an Athena table',
subcommand=True
)
set_parser_epilog(
athena_rebuild_parser,
epilog=(
'''\
Examples:
manage.py athena rebuild-partitions \\
--bucket s3.bucket.name \\
--table-name my_athena_table
'''
)
)
cls._add_default_athena_args(athena_rebuild_parser)
@staticmethod
def _setup_athena_drop_all_subparser(subparsers):
"""Add the athena drop-all-tables subparser: manage.py athena drop-all-tables"""
generate_subparser(
subparsers,
'drop-all-tables',
description='Drop all tables from an Athena database',
subcommand=True
)
@staticmethod
def _add_default_athena_args(athena_parser):
"""Adds the default required arguments for athena subcommands (bucket and table)"""
athena_parser.add_argument(
'-b', '--bucket',
help=(
'Name of the S3 bucket where log data is located. If not supplied, default will '
'be "<prefix>-streamalert-data"'
)
)
athena_parser.add_argument(
'-t', '--table-name',
help=(
'Name of the Athena table to create. '
'This must be a type of log defined in logs.json'
),
required=True
)
@classmethod
def handler(cls, options, config):
"""Main Athena handler
Args:
options (argparse.Namespace): The parsed args passed from the CLI
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if options.subcommand == 'rebuild-partitions':
return rebuild_partitions(
options.table_name,
options.bucket,
config)
if options.subcommand == 'drop-all-tables':
return drop_all_tables(config)
if options.subcommand == 'create-table':
return create_table(
options.table_name,
options.bucket,
config,
options.schema_override
)
def get_athena_client(config):
"""Get an athena client using the current config settings
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
AthenaClient: instantiated client for performing athena actions
"""
prefix = config['global']['account']['prefix']
athena_config = config['lambda']['athena_partition_refresh_config']
db_name = get_database_name(config)
# Get the S3 bucket to store Athena query results
results_bucket = athena_config.get(
'results_bucket',
's3://{}-streamalert-athena-results'.format(prefix)
)
return AthenaClient(
db_name,
results_bucket,
'streamalert_cli',
region=config['global']['account']['region']
)
def rebuild_partitions(table, bucket, config):
"""Rebuild an Athena table's partitions
Steps:
- Get the list of current partitions
- Destroy existing table
- Re-create tables
- Re-create partitions
Args:
table (str): The name of the table being rebuilt
bucket (str): The s3 bucket to be used as the location for Athena data
table_type (str): The type of table being refreshed
Types of 'data' and 'alert' are accepted, but only 'data' is implemented
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
sanitized_table_name = FirehoseClient.firehose_log_name(table)
athena_client = get_athena_client(config)
# Get the current set of partitions
partitions = athena_client.get_table_partitions(sanitized_table_name)
if not partitions:
LOGGER.info('No partitions to rebuild for %s, nothing to do', sanitized_table_name)
return False
# Drop the table
LOGGER.info('Dropping table %s', sanitized_table_name)
if not athena_client.drop_table(sanitized_table_name):
return False
LOGGER.info('Creating table %s', sanitized_table_name)
# Re-create the table with previous partitions
if not create_table(table, bucket, config):
return False
new_partitions_statements = helpers.add_partition_statements(
partitions, bucket, sanitized_table_name)
LOGGER.info('Creating total %d new partitions for %s', len(partitions), sanitized_table_name)
for idx, statement in enumerate(new_partitions_statements):
success = athena_client.run_query(query=statement)
LOGGER.info('Rebuilt partitions part %d', idx+1)
if not success:
LOGGER.error('Error re-creating new partitions for %s', sanitized_table_name)
write_partitions_statements(new_partitions_statements, sanitized_table_name)
return False
LOGGER.info('Successfully rebuilt all partitions for %s', sanitized_table_name)
return True
def write_partitions_statements(statements, sanitized_table_name):
"""Write partitions statements to a file if re-creating new partitions failed"""
file_name = 'partitions_{}.txt'.format(sanitized_table_name)
LOGGER.error(
'Rebuild partitions failed, writing to local file with name %s',
file_name
)
with open(file_name, 'w') as partition_file:
partition_file.write(statements)
def drop_all_tables(config):
"""Drop all 'streamalert' Athena tables
Used when cleaning up an existing deployment
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not continue_prompt(message='Are you sure you want to drop all Athena tables?'):
return False
athena_client = get_athena_client(config)
if not athena_client.drop_all_tables():
LOGGER.error('Failed to drop one or more tables from database: %s', athena_client.database)
return False
LOGGER.info('Successfully dropped all tables from database: %s', athena_client.database)
return True
def _construct_create_table_statement(schema, table_name, bucket, file_format='parquet'):
"""Convert a dictionary based Athena schema to a Hive DDL statement
Args:
schema (dict): The sanitized Athena schema
table_name (str): The name of the Athena table to create
bucket (str): The S3 bucket containing the data
Returns:
str: The Hive DDL CREATE TABLE expression
"""
# Construct the main Athena Schema
schema_statement = []
for key_name in sorted(schema.keys()):
key_type = schema[key_name]
if isinstance(key_type, str):
schema_statement.append('{0} {1}'.format(key_name, key_type))
# Account for nested structs
elif isinstance(key_type, dict):
struct_schema = ', '.join(
'{0}:{1}'.format(sub_key, key_type[sub_key])
for sub_key in sorted(key_type.keys())
)
schema_statement.append('{0} struct<{1}>'.format(key_name, struct_schema))
return CREATE_TABLE_STATEMENT.format(
table_name=table_name,
schema=', '.join(schema_statement),
file_format=STORE_FORMAT_PARQUET if file_format == 'parquet' else STORE_FORMAT_JSON,
bucket=bucket)
def create_table(table, bucket, config, schema_override=None):
"""Create a 'streamalert' Athena table
Args:
table (str): The name of the table being rebuilt
bucket (str): The s3 bucket to be used as the location for Athena data
table_type (str): The type of table being refreshed
config (CLIConfig): Loaded StreamAlert config
schema_override (set): An optional set of key=value pairs to be used for
overriding the configured column_name=value_type.
Returns:
bool: False if errors occurred, True otherwise
"""
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
# Convert special characters in schema name to underscores
sanitized_table_name = FirehoseClient.firehose_log_name(table)
# Check that the log type is enabled via Firehose
if sanitized_table_name != 'alerts' and sanitized_table_name not in enabled_logs:
LOGGER.error('Table name %s missing from configuration or '
'is not enabled.', sanitized_table_name)
return False
athena_client = get_athena_client(config)
config_data_bucket = firehose_data_bucket(config)
if not config_data_bucket:
LOGGER.error('The \'firehose\' module is not enabled in global.json')
return False
# Check if the table exists
if athena_client.check_table_exists(sanitized_table_name):
LOGGER.info('The \'%s\' table already exists.', sanitized_table_name)
return True
if table == 'alerts':
# get a fake alert so we can get the keys needed and their types
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = helpers.logs_schema_to_athena_schema(schema)
# Use the bucket if supplied, otherwise use the default alerts bucket
bucket = bucket or firehose_alerts_bucket(config)
query = _construct_create_table_statement(
schema=athena_schema,
table_name=table,
bucket=bucket,
file_format=get_data_file_format(config)
)
else: # all other tables are log types
# Use the bucket if supplied, otherwise use the default data bucket
bucket = bucket or config_data_bucket
log_info = config['logs'][table.replace('_', ':', 1)]
schema = dict(log_info['schema'])
sanitized_schema = FirehoseClient.sanitize_keys(schema)
athena_schema = helpers.logs_schema_to_athena_schema(sanitized_schema)
# Add envelope keys to Athena Schema
configuration_options = log_info.get('configuration')
if configuration_options:
envelope_keys = configuration_options.get('envelope_keys')
if envelope_keys:
sanitized_envelope_key_schema = FirehoseClient.sanitize_keys(envelope_keys)
# Note: this key is wrapped in backticks to be Hive compliant
athena_schema['`streamalert:envelope_keys`'] = helpers.logs_schema_to_athena_schema(
sanitized_envelope_key_schema)
# Handle Schema overrides
# This is useful when an Athena schema needs to differ from the normal log schema
if schema_override:
for override in schema_override:
column_name, column_type = override.split('=')
# Columns are escaped to avoid Hive issues with special characters
column_name = '`{}`'.format(column_name)
if column_name in athena_schema:
athena_schema[column_name] = column_type
LOGGER.info('Applied schema override: %s:%s', column_name, column_type)
else:
LOGGER.error(
'Schema override column %s not found in Athena Schema, skipping',
column_name
)
query = _construct_create_table_statement(
schema=athena_schema,
table_name=sanitized_table_name,
bucket=bucket,
file_format=get_data_file_format(config)
)
success = athena_client.run_query(query=query)
if not success:
LOGGER.error('The %s table could not be created', sanitized_table_name)
return False
# Update the CLI config
if table != 'alerts' and bucket != config_data_bucket:
# Only add buckets to the config if they are not one of the default/configured buckets
# Ensure 'buckets' exists in the config (since it is not required)
config['lambda']['athena_partition_refresh_config']['buckets'] = (
config['lambda']['athena_partition_refresh_config'].get('buckets', {})
)
if bucket not in config['lambda']['athena_partition_refresh_config']['buckets']:
config['lambda']['athena_partition_refresh_config']['buckets'][bucket] = 'data'
config.write()
LOGGER.info('The %s table was successfully created!', sanitized_table_name)
return True
def create_log_tables(config):
"""Create all tables needed for historical search
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not config['global']['infrastructure'].get('firehose', {}).get('enabled'):
return True
firehose_config = config['global']['infrastructure']['firehose']
firehose_s3_bucket_suffix = firehose_config.get('s3_bucket_suffix', 'streamalert-data')
firehose_s3_bucket_name = '{}-{}'.format(config['global']['account']['prefix'],
firehose_s3_bucket_suffix)
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
for log_stream_name in enabled_logs:
if not create_table(log_stream_name, firehose_s3_bucket_name, config):
return False
return True
| 0 | 4,886 | 23 |
329309c4c9c1629d0252e323492359c67088df37 | 1,937 | py | Python | aws_lambda_builders/workflows/ruby_bundler/actions.py | txase/aws-lambda-builders | c8f2bef73fd7c7943d7c4d54f1c11d3625b5c596 | [
"Apache-2.0"
] | 1 | 2019-08-25T18:41:28.000Z | 2019-08-25T18:41:28.000Z | aws_lambda_builders/workflows/ruby_bundler/actions.py | txase/aws-lambda-builders | c8f2bef73fd7c7943d7c4d54f1c11d3625b5c596 | [
"Apache-2.0"
] | null | null | null | aws_lambda_builders/workflows/ruby_bundler/actions.py | txase/aws-lambda-builders | c8f2bef73fd7c7943d7c4d54f1c11d3625b5c596 | [
"Apache-2.0"
] | null | null | null | """
Actions for Ruby dependency resolution with Bundler
"""
import logging
from aws_lambda_builders.actions import BaseAction, Purpose, ActionFailedError
from .bundler import BundlerExecutionError
LOG = logging.getLogger(__name__)
class RubyBundlerInstallAction(BaseAction):
"""
A Lambda Builder Action which runs bundle install in order to build a full Gemfile.lock
"""
NAME = 'RubyBundle'
DESCRIPTION = "Resolving dependencies using Bundler"
PURPOSE = Purpose.RESOLVE_DEPENDENCIES
class RubyBundlerVendorAction(BaseAction):
"""
A Lambda Builder Action which vendors dependencies to the vendor/bundle directory.
"""
NAME = 'RubyBundleDeployment'
DESCRIPTION = "Package dependencies for deployment."
PURPOSE = Purpose.RESOLVE_DEPENDENCIES
| 32.283333 | 91 | 0.672173 | """
Actions for Ruby dependency resolution with Bundler
"""
import logging
from aws_lambda_builders.actions import BaseAction, Purpose, ActionFailedError
from .bundler import BundlerExecutionError
LOG = logging.getLogger(__name__)
class RubyBundlerInstallAction(BaseAction):
"""
A Lambda Builder Action which runs bundle install in order to build a full Gemfile.lock
"""
NAME = 'RubyBundle'
DESCRIPTION = "Resolving dependencies using Bundler"
PURPOSE = Purpose.RESOLVE_DEPENDENCIES
def __init__(self, source_dir, subprocess_bundler):
super(RubyBundlerInstallAction, self).__init__()
self.source_dir = source_dir
self.subprocess_bundler = subprocess_bundler
def execute(self):
try:
LOG.debug("Running bundle install in %s", self.source_dir)
self.subprocess_bundler.run(
['install', '--without', 'development', 'test'],
cwd=self.source_dir
)
except BundlerExecutionError as ex:
raise ActionFailedError(str(ex))
class RubyBundlerVendorAction(BaseAction):
"""
A Lambda Builder Action which vendors dependencies to the vendor/bundle directory.
"""
NAME = 'RubyBundleDeployment'
DESCRIPTION = "Package dependencies for deployment."
PURPOSE = Purpose.RESOLVE_DEPENDENCIES
def __init__(self, source_dir, subprocess_bundler):
super(RubyBundlerVendorAction, self).__init__()
self.source_dir = source_dir
self.subprocess_bundler = subprocess_bundler
def execute(self):
try:
LOG.debug("Running bundle install --deployment in %s", self.source_dir)
self.subprocess_bundler.run(
['install', '--deployment', '--without', 'development', 'test'],
cwd=self.source_dir
)
except BundlerExecutionError as ex:
raise ActionFailedError(str(ex))
| 1,034 | 0 | 108 |
1d627793567187279c32442c0d889cc8d1c094ef | 8,939 | py | Python | networkapi/infrastructure/ip_subnet_utils.py | brunodevel/GloboNetworkAPI | ea8eebc0337636f9250e628cc392514934db8edd | [
"Apache-2.0"
] | null | null | null | networkapi/infrastructure/ip_subnet_utils.py | brunodevel/GloboNetworkAPI | ea8eebc0337636f9250e628cc392514934db8edd | [
"Apache-2.0"
] | null | null | null | networkapi/infrastructure/ip_subnet_utils.py | brunodevel/GloboNetworkAPI | ea8eebc0337636f9250e628cc392514934db8edd | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import math
def network_mask_from_cidr_mask(cidr_mask):
'''Calcula a máscara de uma rede a partir do número do bloco do endereço.
@param cidr_mask: Valor do bloco do endereço.
@return: Tuple com o octeto 1, 2, 3, 4 da máscara: (oct1,oct2,oct3,oct4).
'''
address = 0xFFFFFFFF
address = address << (32 - cidr_mask)
return ((address >> 24) & 0xFF, (address >> 16) & 0xFF, (address >> 8) & 0xFF, (address >> 0) & 0xFF)
def is_subnetwork(network_address_01, network_address_02):
'''Verifica se o endereço network_address_01 é sub-rede do endereço network_address_02.
@param network_address_01: Uma tuple com os octetos do endereço, formato: (oct1, oct2, oct3, oct5)
@param network_address_02: Uma tuple com os octetos do endereço e o bloco, formato: (oct1, oct2, oct3, oct5, bloco)
@return: True se network_address_01 é sub-rede de network_address_02. False caso contrário.
'''
if network_address_01 is None or network_address_02 is None:
return False
if len(network_address_01) < 4 or len(network_address_02) != 5:
return False
network_mask_02 = network_mask_from_cidr_mask(network_address_02[4])
return network_address_02[0:4] == _applyNetmask(network_address_01, network_mask_02)
def is_valid_ip(address):
"""Verifica se address é um endereço ip válido."""
if address is None:
return address
pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b"
return re.match(pattern, address)
#=========================================================================
# Function to calculate num_hosts by prefix:
#
# IPV4:
# 2^(32-p) = num_hosts
# IPV6:
# 2^(128-p) = num_hosts
#
# where 'p' is, for example, 24, 32 (x.x.x.x/32)...
#
# so, to calculate prefix by number of hosts:
#
# IPV4:
# 32 - logarithm(num_hosts, 2) = p
# IPV6:
# 128 - logarithm(num_hosts, 2) = p
#
# where 'num_hosts' is the number of hosts expected
#=========================================================================
MAX_IPV4_HOSTS = 4294967296
MAX_IPV6_HOSTS = 340282366920938463463374607431768211456
if __name__ == '__main__':
print get_prefix_IPV4(17)
print get_prefix_IPV4(33)
print get_prefix_IPV4(255)
# IPV4
#=========================================================================
# /0 : 4294967296 /11 : 2097152 /22 : 1024
# /1 : 2147483648 /12 : 1048576 /23 : 512
# /2 : 1073741824 /13 : 524288 /24 : 256
# /3 : 536870912 /14 : 262144 /25 : 128
# /4 : 268435456 /15 : 131072 /26 : 64
# /5 : 134217728 /16 : 65536 /27 : 32
# /6 : 67108864 /17 : 32768 /28 : 16
# /7 : 33554432 /18 : 16384 /29 : 8
# /8 : 16777216 /19 : 8192 /30 : 4
# /9 : 8388608 /20 : 4096 /31 : 2
# /10 : 4194304 /21 : 2048 /32 : 1
#=========================================================================
# IPV6
#=========================================================================
# /0 : 340282366920938463463374607431768211456 /11 : 166153499473114484112975882535043072 /22 : 81129638414606681695789005144064
# /1 : 170141183460469231731687303715884105728 /12 : 83076749736557242056487941267521536 /23 : 40564819207303340847894502572032
# /2 : 85070591730234615865843651857942052864 /13 : 41538374868278621028243970633760768 /24 : 20282409603651670423947251286016
# /3 : 42535295865117307932921825928971026432 /14 : 20769187434139310514121985316880384 /25 : 10141204801825835211973625643008
# /4 : 21267647932558653966460912964485513216 /15 : 10384593717069655257060992658440192 /26 : 5070602400912917605986812821504
# /5 : 10633823966279326983230456482242756608 /16 : 5192296858534827628530496329220096 /27 : 2535301200456458802993406410752
# /6 : 5316911983139663491615228241121378304 /17 : 2596148429267413814265248164610048 /28 : 1267650600228229401496703205376
# /7 : 2658455991569831745807614120560689152 /18 : 1298074214633706907132624082305024 /29 : 633825300114114700748351602688
# /8 : 1329227995784915872903807060280344576 /19 : 649037107316853453566312041152512 /30 : 316912650057057350374175801344
# /9 : 664613997892457936451903530140172288 /20 : 324518553658426726783156020576256 /31 : 158456325028528675187087900672
# /10 : 332306998946228968225951765070086144 /21 : 162259276829213363391578010288128 /32 : 79228162514264337593543950336
#
# /33 : 39614081257132168796771975168 /44 : 19342813113834066795298816 /55 : 9444732965739290427392
# /34 : 19807040628566084398385987584 /45 : 9671406556917033397649408 /56 : 4722366482869645213696
# /35 : 9903520314283042199192993792 /46 : 4835703278458516698824704 /57 : 2361183241434822606848
# /36 : 4951760157141521099596496896 /47 : 2417851639229258349412352 /58 : 1180591620717411303424
# /37 : 2475880078570760549798248448 /48 : 1208925819614629174706176 /59 : 590295810358705651712
# /38 : 1237940039285380274899124224 /49 : 604462909807314587353088 /60 : 295147905179352825856
# /39 : 618970019642690137449562112 /50 : 302231454903657293676544 /61 : 147573952589676412928
# /40 : 309485009821345068724781056 /51 : 151115727451828646838272 /62 : 73786976294838206464
# /41 : 154742504910672534362390528 /52 : 75557863725914323419136 /63 : 36893488147419103232
# /42 : 77371252455336267181195264 /53 : 37778931862957161709568 /64 : 18446744073709551616
# /43 : 38685626227668133590597632 /54 : 18889465931478580854784 /65 : 9223372036854775808
#
# /66 : 4611686018427387904 /77 : 2251799813685248 /88 : 1099511627776 /99 : 536870912
# /67 : 2305843009213693952 /78 : 1125899906842624 /89 : 549755813888 /100 : 268435456
# /68 : 1152921504606846976 /79 : 562949953421312 /90 : 274877906944 /101 : 134217728
# /69 : 576460752303423488 /80 : 281474976710656 /91 : 137438953472 /102 : 67108864
# /70 : 288230376151711744 /81 : 140737488355328 /92 : 68719476736 /103 : 33554432
# /71 : 144115188075855872 /82 : 70368744177664 /93 : 34359738368 /104 : 16777216
# /72 : 72057594037927936 /83 : 35184372088832 /94 : 17179869184 /105 : 8388608
# /73 : 36028797018963968 /84 : 17592186044416 /95 : 8589934592 /106 : 4194304
# /74 : 18014398509481984 /85 : 8796093022208 /96 : 4294967296 /107 : 2097152
# /75 : 9007199254740992 /86 : 4398046511104 /97 : 2147483648 /108 : 1048576
# /76 : 4503599627370496 /87 : 2199023255552 /98 : 1073741824 /109 : 524288
#
# /110 : 262144 /122 : 64
# /111 : 131072 /123 : 32
# /112 : 65536 /124 : 16
# /113 : 32768 /125 : 8
# /114 : 16384 /126 : 4
# /115 : 8192 /127 : 2
# /116 : 4096 /128 : 1
# /117 : 2048
# /118 : 1024
# /119 : 512
# /120 : 256
# /121 : 128
#=========================================================================
| 50.219101 | 180 | 0.602752 | # -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import math
def network_mask_from_cidr_mask(cidr_mask):
'''Calcula a máscara de uma rede a partir do número do bloco do endereço.
@param cidr_mask: Valor do bloco do endereço.
@return: Tuple com o octeto 1, 2, 3, 4 da máscara: (oct1,oct2,oct3,oct4).
'''
address = 0xFFFFFFFF
address = address << (32 - cidr_mask)
return ((address >> 24) & 0xFF, (address >> 16) & 0xFF, (address >> 8) & 0xFF, (address >> 0) & 0xFF)
def _applyNetmask(host, mask):
return (host[0] & mask[0], host[1] & mask[1], host[2] & mask[2], host[3] & mask[3])
def is_subnetwork(network_address_01, network_address_02):
'''Verifica se o endereço network_address_01 é sub-rede do endereço network_address_02.
@param network_address_01: Uma tuple com os octetos do endereço, formato: (oct1, oct2, oct3, oct5)
@param network_address_02: Uma tuple com os octetos do endereço e o bloco, formato: (oct1, oct2, oct3, oct5, bloco)
@return: True se network_address_01 é sub-rede de network_address_02. False caso contrário.
'''
if network_address_01 is None or network_address_02 is None:
return False
if len(network_address_01) < 4 or len(network_address_02) != 5:
return False
network_mask_02 = network_mask_from_cidr_mask(network_address_02[4])
return network_address_02[0:4] == _applyNetmask(network_address_01, network_mask_02)
def is_valid_ip(address):
"""Verifica se address é um endereço ip válido."""
if address is None:
return address
pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b"
return re.match(pattern, address)
#=========================================================================
# Function to calculate num_hosts by prefix:
#
# IPV4:
# 2^(32-p) = num_hosts
# IPV6:
# 2^(128-p) = num_hosts
#
# where 'p' is, for example, 24, 32 (x.x.x.x/32)...
#
# so, to calculate prefix by number of hosts:
#
# IPV4:
# 32 - logarithm(num_hosts, 2) = p
# IPV6:
# 128 - logarithm(num_hosts, 2) = p
#
# where 'num_hosts' is the number of hosts expected
#=========================================================================
MAX_IPV4_HOSTS = 4294967296
MAX_IPV6_HOSTS = 340282366920938463463374607431768211456
def get_prefix_IPV4(num_hosts):
prefix = int(32 - math.log(float(num_hosts), 2))
return prefix
def get_prefix_IPV6(num_hosts):
prefix = int(128 - math.log(float(num_hosts), 2))
return prefix
if __name__ == '__main__':
print get_prefix_IPV4(17)
print get_prefix_IPV4(33)
print get_prefix_IPV4(255)
# IPV4
#=========================================================================
# /0 : 4294967296 /11 : 2097152 /22 : 1024
# /1 : 2147483648 /12 : 1048576 /23 : 512
# /2 : 1073741824 /13 : 524288 /24 : 256
# /3 : 536870912 /14 : 262144 /25 : 128
# /4 : 268435456 /15 : 131072 /26 : 64
# /5 : 134217728 /16 : 65536 /27 : 32
# /6 : 67108864 /17 : 32768 /28 : 16
# /7 : 33554432 /18 : 16384 /29 : 8
# /8 : 16777216 /19 : 8192 /30 : 4
# /9 : 8388608 /20 : 4096 /31 : 2
# /10 : 4194304 /21 : 2048 /32 : 1
#=========================================================================
# IPV6
#=========================================================================
# /0 : 340282366920938463463374607431768211456 /11 : 166153499473114484112975882535043072 /22 : 81129638414606681695789005144064
# /1 : 170141183460469231731687303715884105728 /12 : 83076749736557242056487941267521536 /23 : 40564819207303340847894502572032
# /2 : 85070591730234615865843651857942052864 /13 : 41538374868278621028243970633760768 /24 : 20282409603651670423947251286016
# /3 : 42535295865117307932921825928971026432 /14 : 20769187434139310514121985316880384 /25 : 10141204801825835211973625643008
# /4 : 21267647932558653966460912964485513216 /15 : 10384593717069655257060992658440192 /26 : 5070602400912917605986812821504
# /5 : 10633823966279326983230456482242756608 /16 : 5192296858534827628530496329220096 /27 : 2535301200456458802993406410752
# /6 : 5316911983139663491615228241121378304 /17 : 2596148429267413814265248164610048 /28 : 1267650600228229401496703205376
# /7 : 2658455991569831745807614120560689152 /18 : 1298074214633706907132624082305024 /29 : 633825300114114700748351602688
# /8 : 1329227995784915872903807060280344576 /19 : 649037107316853453566312041152512 /30 : 316912650057057350374175801344
# /9 : 664613997892457936451903530140172288 /20 : 324518553658426726783156020576256 /31 : 158456325028528675187087900672
# /10 : 332306998946228968225951765070086144 /21 : 162259276829213363391578010288128 /32 : 79228162514264337593543950336
#
# /33 : 39614081257132168796771975168 /44 : 19342813113834066795298816 /55 : 9444732965739290427392
# /34 : 19807040628566084398385987584 /45 : 9671406556917033397649408 /56 : 4722366482869645213696
# /35 : 9903520314283042199192993792 /46 : 4835703278458516698824704 /57 : 2361183241434822606848
# /36 : 4951760157141521099596496896 /47 : 2417851639229258349412352 /58 : 1180591620717411303424
# /37 : 2475880078570760549798248448 /48 : 1208925819614629174706176 /59 : 590295810358705651712
# /38 : 1237940039285380274899124224 /49 : 604462909807314587353088 /60 : 295147905179352825856
# /39 : 618970019642690137449562112 /50 : 302231454903657293676544 /61 : 147573952589676412928
# /40 : 309485009821345068724781056 /51 : 151115727451828646838272 /62 : 73786976294838206464
# /41 : 154742504910672534362390528 /52 : 75557863725914323419136 /63 : 36893488147419103232
# /42 : 77371252455336267181195264 /53 : 37778931862957161709568 /64 : 18446744073709551616
# /43 : 38685626227668133590597632 /54 : 18889465931478580854784 /65 : 9223372036854775808
#
# /66 : 4611686018427387904 /77 : 2251799813685248 /88 : 1099511627776 /99 : 536870912
# /67 : 2305843009213693952 /78 : 1125899906842624 /89 : 549755813888 /100 : 268435456
# /68 : 1152921504606846976 /79 : 562949953421312 /90 : 274877906944 /101 : 134217728
# /69 : 576460752303423488 /80 : 281474976710656 /91 : 137438953472 /102 : 67108864
# /70 : 288230376151711744 /81 : 140737488355328 /92 : 68719476736 /103 : 33554432
# /71 : 144115188075855872 /82 : 70368744177664 /93 : 34359738368 /104 : 16777216
# /72 : 72057594037927936 /83 : 35184372088832 /94 : 17179869184 /105 : 8388608
# /73 : 36028797018963968 /84 : 17592186044416 /95 : 8589934592 /106 : 4194304
# /74 : 18014398509481984 /85 : 8796093022208 /96 : 4294967296 /107 : 2097152
# /75 : 9007199254740992 /86 : 4398046511104 /97 : 2147483648 /108 : 1048576
# /76 : 4503599627370496 /87 : 2199023255552 /98 : 1073741824 /109 : 524288
#
# /110 : 262144 /122 : 64
# /111 : 131072 /123 : 32
# /112 : 65536 /124 : 16
# /113 : 32768 /125 : 8
# /114 : 16384 /126 : 4
# /115 : 8192 /127 : 2
# /116 : 4096 /128 : 1
# /117 : 2048
# /118 : 1024
# /119 : 512
# /120 : 256
# /121 : 128
#=========================================================================
| 271 | 0 | 75 |
0792965f1c270d48b67717e15eb78c5c0205c783 | 224 | py | Python | 30.11.2019/hamming.py | KruZZy/coderdojo-python | 0f9920de24c0ff8733badb81daed1e590825662c | [
"MIT"
] | null | null | null | 30.11.2019/hamming.py | KruZZy/coderdojo-python | 0f9920de24c0ff8733badb81daed1e590825662c | [
"MIT"
] | null | null | null | 30.11.2019/hamming.py | KruZZy/coderdojo-python | 0f9920de24c0ff8733badb81daed1e590825662c | [
"MIT"
] | null | null | null | from random import randint
n =int(input("n = "))
A =[]
B =[]
for i in range (n):
A.append(randint(1,20))
B.append(randint(1,20))
print(A,B)
dist=0
for i in range (n):
if A[i] != B[i]:
dist+=1
print(dist)
| 16 | 27 | 0.558036 | from random import randint
n =int(input("n = "))
A =[]
B =[]
for i in range (n):
A.append(randint(1,20))
B.append(randint(1,20))
print(A,B)
dist=0
for i in range (n):
if A[i] != B[i]:
dist+=1
print(dist)
| 0 | 0 | 0 |
81d43ab85ec341aaf5813b683d0d34aa09a3a77c | 582 | py | Python | platform_info.py | ljm7b2/OBSTstandardDeviation | 5eab7fecd7843a489d6ef5381e28a65aa24853c6 | [
"MIT"
] | null | null | null | platform_info.py | ljm7b2/OBSTstandardDeviation | 5eab7fecd7843a489d6ef5381e28a65aa24853c6 | [
"MIT"
] | null | null | null | platform_info.py | ljm7b2/OBSTstandardDeviation | 5eab7fecd7843a489d6ef5381e28a65aa24853c6 | [
"MIT"
] | null | null | null | import platform
# method copied from STL, not original work of author
| 48.5 | 97 | 0.671821 | import platform
# method copied from STL, not original work of author
def get_platform_info(output_file):
print("\nSYSTEM INFORMATION", file=output_file)
print("{:<20}{:>5}".format('system:', platform.system()), file=output_file)
print("{:<20}{:>5}".format('node:', platform.node()), file=output_file)
print("{:<20}{:>5}".format('version:', platform.version()), file=output_file)
print("{:<20}{:>5}".format('processor:', platform.processor()), file=output_file)
print("{:<20}{:>5}".format("python compiler:", platform.python_compiler()), file=output_file)
| 488 | 0 | 23 |
7d4736a6d2163bf79bf79d70e9effb9a771809e5 | 7,982 | py | Python | pbce/tml/examples/flowvisor/ryu_app.py | kit-tm/gcmi-exp | 34d850639a079bf73428bd70ac28cc972d030a7c | [
"BSD-2-Clause"
] | null | null | null | pbce/tml/examples/flowvisor/ryu_app.py | kit-tm/gcmi-exp | 34d850639a079bf73428bd70ac28cc972d030a7c | [
"BSD-2-Clause"
] | null | null | null | pbce/tml/examples/flowvisor/ryu_app.py | kit-tm/gcmi-exp | 34d850639a079bf73428bd70ac28cc972d030a7c | [
"BSD-2-Clause"
] | 1 | 2019-11-18T11:35:36.000Z | 2019-11-18T11:35:36.000Z | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An OpenFlow 1.0 L2 learning switch implementation.
"""
import collections
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import arp, ether_types, ethernet, icmp, ipv4, packet, tcp
from ryu.ofproto import inet, ofproto_v1_0, ofproto_v1_0_parser
IpPort = collections.namedtuple('IpPort', 'ip port')
ether_type_names = {
ether_types.ETH_TYPE_IP: "IPv4",
ether_types.ETH_TYPE_IPV6: "IPv6",
ether_types.ETH_TYPE_LLDP: "LLDP",
ether_types.ETH_TYPE_ARP: "ARP"
}
arp_opcode_names = {arp.ARP_REPLY: "Reply", arp.ARP_REQUEST: "Request"}
ip_proto_names = {
inet.IPPROTO_ICMP: "ICMP",
inet.IPPROTO_ICMPV6: "ICMPv6",
inet.IPPROTO_TCP: "TCP",
inet.IPPROTO_UDP: "UDP"
}
| 40.313131 | 79 | 0.640817 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An OpenFlow 1.0 L2 learning switch implementation.
"""
import collections
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import arp, ether_types, ethernet, icmp, ipv4, packet, tcp
from ryu.ofproto import inet, ofproto_v1_0, ofproto_v1_0_parser
IpPort = collections.namedtuple('IpPort', 'ip port')
ether_type_names = {
ether_types.ETH_TYPE_IP: "IPv4",
ether_types.ETH_TYPE_IPV6: "IPv6",
ether_types.ETH_TYPE_LLDP: "LLDP",
ether_types.ETH_TYPE_ARP: "ARP"
}
def ether_type_name(ethertype):
if ethertype in ether_type_names:
return ether_type_names[ethertype]
return "UNKNOWN"
arp_opcode_names = {arp.ARP_REPLY: "Reply", arp.ARP_REQUEST: "Request"}
def arp_opcode_name(opcode):
if opcode in arp_opcode_names:
return arp_opcode_names[opcode]
return "UNKNOWN"
ip_proto_names = {
inet.IPPROTO_ICMP: "ICMP",
inet.IPPROTO_ICMPV6: "ICMPv6",
inet.IPPROTO_TCP: "TCP",
inet.IPPROTO_UDP: "UDP"
}
def ip_proto_name(proto):
if proto in ip_proto_names:
return ip_proto_names[proto]
return "UNKNOWN"
class SimpleSwitch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
# { datapath_id: { mac_address: port } }
self.mac_to_port = {}
# { datapath_id: { ip_address: port } }
self.ip_to_port = {}
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def handle_packet_in(self, event: ofp_event.EventOFPPacketIn):
packet_in = event.msg # type: ofproto_v1_0_parser.OFPPacketIn
datapath_id = packet_in.datapath.id
frame = packet.Packet(packet_in.data)
eth_header = frame.get_protocol(ethernet.ethernet)
self.mac_to_port.setdefault(datapath_id,
{})[eth_header.src] = packet_in.in_port
eth_type = eth_header.ethertype
self.logger.info(
"received OFPT_PACKET_IN: buffer_id=0x%x total_len=%d in_port=%s",
packet_in.buffer_id, packet_in.total_len, packet_in.in_port)
self.logger.info(" %s -> %s, ethertype=0x%x (%s)", eth_header.src,
eth_header.dst, eth_type, ether_type_name(eth_type))
if eth_type == ether_types.ETH_TYPE_ARP:
self.handle_arp(packet_in, eth_header, frame.get_protocol(arp.arp))
elif eth_type == ether_types.ETH_TYPE_IP:
self.handle_ipv4(packet_in, frame, eth_header,
frame.get_protocol(ipv4.ipv4))
def handle_arp(self,
packet_in: ofproto_v1_0_parser.OFPPacketIn,
eth_header: ethernet.ethernet,
arp_header: arp.arp):
self.logger.info(" %s -> %s, opcode=0x%x (%s)", arp_header.src_ip,
arp_header.dst_ip, arp_header.opcode,
arp_opcode_name(arp_header.opcode))
out_port = packet_in.datapath.ofproto.OFPP_FLOOD
if arp_header.dst_mac in self.mac_to_port[packet_in.datapath.id]:
out_port = self.mac_to_port[packet_in.datapath.id][
arp_header.dst_mac]
self.forward(packet_in, out_port)
def forward(self, packet_in: ofproto_v1_0_parser.OFPPacketIn, port: int):
data = None
if packet_in.buffer_id == packet_in.datapath.ofproto.OFP_NO_BUFFER:
data = packet_in.data
packet_out = packet_in.datapath.ofproto_parser.OFPPacketOut(
datapath=packet_in.datapath,
buffer_id=packet_in.buffer_id,
in_port=packet_in.in_port,
data=data,
actions=[packet_in.datapath.ofproto_parser.OFPActionOutput(port)])
self.logger.info(
" sending packet_out: output packet on switch port %d", port)
packet_in.datapath.send_msg(packet_out)
def handle_ipv4(self,
packet_in: ofproto_v1_0_parser.OFPPacketIn,
frame: packet.Packet,
eth_header: ethernet.ethernet,
ipv4_header: ipv4.ipv4):
self.logger.info(" %s -> %s, proto=0x%x (%s)", ipv4_header.src,
ipv4_header.dst, ipv4_header.proto,
ip_proto_name(ipv4_header.proto))
datapath_id = packet_in.datapath.id
self.ip_to_port.setdefault(datapath_id,
{})[ipv4_header.src] = packet_in.in_port
if ipv4_header.proto == inet.IPPROTO_TCP:
tcp_header = frame.get_protocol(tcp.tcp)
self.handle_tcp(packet_in, eth_header, ipv4_header, tcp_header)
elif ipv4_header.proto == inet.IPPROTO_ICMP:
icmp_header = frame.get_protocol(icmp.icmp)
self.handle_icmp(packet_in, eth_header, ipv4_header, icmp_header)
def handle_tcp(self,
packet_in: ofproto_v1_0_parser.OFPPacketIn,
eth_header: ethernet.ethernet,
ipv4_header: ipv4.ipv4,
tcp_header: tcp.tcp):
self.logger.info(" %d -> %d", tcp_header.src_port,
tcp_header.dst_port)
datapath = packet_in.datapath
ofproto = datapath.ofproto
out_port = ofproto.OFPP_FLOOD
if ipv4_header.dst in self.ip_to_port[datapath.id]:
out_port = self.ip_to_port[datapath.id][ipv4_header.dst]
match = datapath.ofproto_parser.OFPMatch(
dl_type=ether_types.ETH_TYPE_IP, # doesn't work without this
nw_proto=inet.IPPROTO_TCP,
nw_dst=ipv4_header.dst,
tp_dst=tcp_header.dst_port)
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath,
match=match,
command=ofproto.OFPFC_ADD,
idle_timeout=0,
hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
buffer_id=packet_in.buffer_id,
actions=[datapath.ofproto_parser.OFPActionOutput(out_port)])
datapath.send_msg(mod)
self.forward(packet_in, out_port)
def handle_icmp(self,
packet_in: ofproto_v1_0_parser.OFPPacketIn,
eth_header: ethernet.ethernet,
ipv4_header: ipv4.ipv4,
icmp_header: icmp.icmp):
out_port = packet_in.datapath.ofproto.OFPP_FLOOD
datapath_id = packet_in.datapath.id
if ipv4_header.dst in self.ip_to_port[datapath_id]:
out_port = self.ip_to_port[datapath_id][ipv4_header.dst]
self.forward(packet_in, out_port)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added %s", port_no)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted %s", port_no)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified %s", port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason)
| 6,065 | 404 | 92 |
340bd0f34f1475a05d8a08d5a31c0aae250a0e8b | 2,716 | py | Python | 02-am/02-Decision Trees/Decision_tree_3_5.py | Matheusqz/pucpr-ciencia-de-dados | 28a833a902dba41a35dc9d02bc5607a66aca78b0 | [
"MIT"
] | null | null | null | 02-am/02-Decision Trees/Decision_tree_3_5.py | Matheusqz/pucpr-ciencia-de-dados | 28a833a902dba41a35dc9d02bc5607a66aca78b0 | [
"MIT"
] | null | null | null | 02-am/02-Decision Trees/Decision_tree_3_5.py | Matheusqz/pucpr-ciencia-de-dados | 28a833a902dba41a35dc9d02bc5607a66aca78b0 | [
"MIT"
] | null | null | null | # Este exemplo carrega a base Wine da UCI, treina uma Arvore de decisao usando
# holdout e outra usando validacao cruzada com 10 pastas.
# Importa bibliotecas necessarias
import numpy as np
import urllib
from sklearn import tree
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
from IPython.display import Image
from IPython.display import display
import pydotplus
#from sklearn.model_selection import StratifiedShuffleSplit
# Carrega uma base de dados do UCI
# Exemplo carrega a base Wine
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
raw_data = urllib.request.urlopen(url)
# Carrega arquivo como uma matriz
dataset = np.loadtxt(raw_data, delimiter=",")
# Imprime quantide de instancias e atributos da base
print("Instancias e atributos")
print(dataset.shape)
# Coloca em X os 13 atributos de entrada e em y as classes
# Observe que na base Wine a classe eh primeiro atributo
X = dataset[:,1:13]
y = dataset[:,0]
# EXEMPLO USANDO HOLDOUT
# Holdout -> dividindo a base em treinamento (70%) e teste (30%), estratificada
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.3, random_state=42, stratify=y)
# declara o classificador
clfa = tree.DecisionTreeClassifier(criterion='entropy')
# treina o classificador
clfa = clfa.fit(X_train, y_train)
# testa usando a base de testes
predicted=clfa.predict(X_test)
# calcula a acuracia na base de teste (taxa de acerto)
score=clfa.score(X_test, y_test)
# calcula a matriz de confusao
matrix = confusion_matrix(y_test, predicted)
# apresenta os resultados
print("\nResultados baseados em Holdout 70/30")
print("Taxa de acerto = %.2f " % score)
print("Matriz de confusao:")
print(matrix)
# EXEMPLO USANDO VALIDACAO CRUZADA
clfb = tree.DecisionTreeClassifier(criterion='entropy')
folds=10
result = model_selection.cross_val_score(clfb, X, y, cv=folds)
print("\nResultados baseados em Validacao Cruzada")
print("Qtde folds: %d:" % folds)
print("Taxa de Acerto: %.2f" % result.mean())
print("Desvio padrao: %.2f" % result.std())
# matriz de confusão da validacao cruzada
Z = model_selection.cross_val_predict(clfb, X, y, cv=folds)
cm=confusion_matrix(y, Z)
print("Matriz de confusao:")
print(cm)
#imprime a arvore gerada
print("\nArvore gerada no experimento baseado em Holdout")
dot_data = StringIO()
export_graphviz(clfa, out_file=dot_data,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
im=Image(graph.create_png())
display(im)
| 30.177778 | 98 | 0.765464 | # Este exemplo carrega a base Wine da UCI, treina uma Arvore de decisao usando
# holdout e outra usando validacao cruzada com 10 pastas.
# Importa bibliotecas necessarias
import numpy as np
import urllib
from sklearn import tree
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
from IPython.display import Image
from IPython.display import display
import pydotplus
#from sklearn.model_selection import StratifiedShuffleSplit
# Carrega uma base de dados do UCI
# Exemplo carrega a base Wine
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
raw_data = urllib.request.urlopen(url)
# Carrega arquivo como uma matriz
dataset = np.loadtxt(raw_data, delimiter=",")
# Imprime quantide de instancias e atributos da base
print("Instancias e atributos")
print(dataset.shape)
# Coloca em X os 13 atributos de entrada e em y as classes
# Observe que na base Wine a classe eh primeiro atributo
X = dataset[:,1:13]
y = dataset[:,0]
# EXEMPLO USANDO HOLDOUT
# Holdout -> dividindo a base em treinamento (70%) e teste (30%), estratificada
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.3, random_state=42, stratify=y)
# declara o classificador
clfa = tree.DecisionTreeClassifier(criterion='entropy')
# treina o classificador
clfa = clfa.fit(X_train, y_train)
# testa usando a base de testes
predicted=clfa.predict(X_test)
# calcula a acuracia na base de teste (taxa de acerto)
score=clfa.score(X_test, y_test)
# calcula a matriz de confusao
matrix = confusion_matrix(y_test, predicted)
# apresenta os resultados
print("\nResultados baseados em Holdout 70/30")
print("Taxa de acerto = %.2f " % score)
print("Matriz de confusao:")
print(matrix)
# EXEMPLO USANDO VALIDACAO CRUZADA
clfb = tree.DecisionTreeClassifier(criterion='entropy')
folds=10
result = model_selection.cross_val_score(clfb, X, y, cv=folds)
print("\nResultados baseados em Validacao Cruzada")
print("Qtde folds: %d:" % folds)
print("Taxa de Acerto: %.2f" % result.mean())
print("Desvio padrao: %.2f" % result.std())
# matriz de confusão da validacao cruzada
Z = model_selection.cross_val_predict(clfb, X, y, cv=folds)
cm=confusion_matrix(y, Z)
print("Matriz de confusao:")
print(cm)
#imprime a arvore gerada
print("\nArvore gerada no experimento baseado em Holdout")
dot_data = StringIO()
export_graphviz(clfa, out_file=dot_data,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
im=Image(graph.create_png())
display(im)
| 0 | 0 | 0 |
b3b8bc5092366ad27804dc5ece6a0fd50b476e50 | 1,032 | py | Python | tests/ops/test_snapshot.py | KarimAED/pennylane | d201dd52def0dfa44efd485e06ea06defda22dc0 | [
"Apache-2.0"
] | null | null | null | tests/ops/test_snapshot.py | KarimAED/pennylane | d201dd52def0dfa44efd485e06ea06defda22dc0 | [
"Apache-2.0"
] | null | null | null | tests/ops/test_snapshot.py | KarimAED/pennylane | d201dd52def0dfa44efd485e06ea06defda22dc0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Snapshot operation."""
from pennylane import Snapshot
def test_decomposition():
"""Test the decomposition of the Snapshot operation."""
assert Snapshot.compute_decomposition() == []
assert Snapshot().decomposition() == []
def test_label_method():
"""Test the label method for the Snapshot operation."""
assert Snapshot().label() == "|S|"
assert Snapshot("my_label").label() == "|S|"
| 35.586207 | 74 | 0.73062 | # Copyright 2018-2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Snapshot operation."""
from pennylane import Snapshot
def test_decomposition():
"""Test the decomposition of the Snapshot operation."""
assert Snapshot.compute_decomposition() == []
assert Snapshot().decomposition() == []
def test_label_method():
"""Test the label method for the Snapshot operation."""
assert Snapshot().label() == "|S|"
assert Snapshot("my_label").label() == "|S|"
| 0 | 0 | 0 |
3a6d504a6e9d9e570b87e4c72ca1f835859d6e9a | 599 | py | Python | app/modules/base/views.py | thestd/schedule-BOT | f6603cf7f3b1b0b0004b9c445edf271d2c959d11 | [
"MIT"
] | 9 | 2019-06-27T13:56:55.000Z | 2021-01-06T14:37:14.000Z | app/modules/base/views.py | thestd/schedule-BOT | f6603cf7f3b1b0b0004b9c445edf271d2c959d11 | [
"MIT"
] | null | null | null | app/modules/base/views.py | thestd/schedule-BOT | f6603cf7f3b1b0b0004b9c445edf271d2c959d11 | [
"MIT"
] | 3 | 2019-06-25T14:23:27.000Z | 2021-04-28T10:14:58.000Z | from aiogram import types
from app.modules.base.templates import choice_student_text, choice_teacher_text
from app.modules.schedule.consts import query_type
| 27.227273 | 79 | 0.692821 | from aiogram import types
from app.modules.base.templates import choice_student_text, choice_teacher_text
from app.modules.schedule.consts import query_type
def query_type_markup() -> types.InlineKeyboardMarkup:
line_markup = types.InlineKeyboardMarkup()
line_markup.add(
types.InlineKeyboardButton(
choice_student_text,
callback_data=query_type.new("group")
)
)
line_markup.add(
types.InlineKeyboardButton(
choice_teacher_text,
callback_data=query_type.new("teacher")
)
)
return line_markup
| 417 | 0 | 23 |
89f4319dc3948b345b2a80284951d3126c2c0a03 | 15,047 | py | Python | entity/tests/test_complex_view.py | syucream/airone | ce3c199f23c595a7c029ee52b57297b3666343e3 | [
"MIT"
] | null | null | null | entity/tests/test_complex_view.py | syucream/airone | ce3c199f23c595a7c029ee52b57297b3666343e3 | [
"MIT"
] | 2 | 2021-02-28T05:04:18.000Z | 2021-05-01T07:00:57.000Z | entity/tests/test_complex_view.py | syucream/airone | ce3c199f23c595a7c029ee52b57297b3666343e3 | [
"MIT"
] | null | null | null | import json
from airone.lib.acl import ACLType
from airone.lib.test import AironeViewTest
from airone.lib.types import AttrTypeStr
from airone.lib.types import AttrTypeArrStr, AttrTypeArrObj
from airone.lib.types import AttrTypeValue
from django.urls import reverse
from entity.models import Entity, EntityAttr
from entry.models import Entry, AttributeValue
from entry import tasks as entry_tasks
from entity import tasks as entity_tasks
from unittest.mock import patch
from unittest.mock import Mock
class ComplexViewTest(AironeViewTest):
"""
This has complex tests that combine multiple requests across the inter-applicational
"""
@patch('entry.tasks.create_entry_attrs.delay', Mock(side_effect=entry_tasks.create_entry_attrs))
@patch('entry.tasks.edit_entry_attrs.delay', Mock(side_effect=entry_tasks.edit_entry_attrs))
@patch('entity.tasks.create_entity.delay', Mock(side_effect=entity_tasks.create_entity))
@patch('entity.tasks.edit_entity.delay', Mock(side_effect=entity_tasks.edit_entity))
def test_add_attr_after_creating_entry(self):
"""
This test executes followings
- create a new Entity(entity) with an EntityAttr(attr)
- create a new Entry for entity
- update entity to append new EntityAttrs(arr-str, arr-obj)
Then, this checks following
- created additional Attributes which are corresponding to the added EntityAttrs
automatically for accessing show page.
- enable to edit entry correctly because #152 is fixed
"""
user = self.admin_login()
# create an Entity
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [
{'name': 'attr', 'type': str(AttrTypeStr), 'is_delete_in_chain': True,
'is_mandatory': False, 'row_index': '1'},
],
}
resp = self.client.post(reverse('entity:do_create'),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# get created objects
entity = Entity.objects.get(name='entity')
attr = entity.attrs.get(name='attr')
# create an Entry for the created entity
params = {
'entry_name': 'entry',
'attrs': [
{'id': str(attr.id), 'type': str(AttrTypeStr),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# get created entry object
entry = Entry.objects.get(name='entry')
refer_entity = Entity.objects.create(name='E0', note='', created_user=user)
# edit entity to append a new Array attributes
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [{
'id': str(attr.id),
'name': attr.name,
'type': str(attr.type),
'is_mandatory': attr.is_mandatory,
'is_delete_in_chain': False,
'row_index': '1',
}, {
'name': 'arr-str',
'type': str(AttrTypeArrStr),
'is_mandatory': True,
'is_delete_in_chain': False,
'row_index': '2',
}, {
'name': 'arr-obj',
'type': str(AttrTypeArrObj),
'ref_ids': [refer_entity.id],
'is_mandatory': True,
'is_delete_in_chain': False,
'row_index': '3',
}],
}
resp = self.client.post(reverse('entity:do_edit', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# Checks that the Attributes associated to the added EntityAttrs are not created
self.assertEqual(entity.attrs.count(), 3)
self.assertEqual(entry.attrs.count(), 1)
resp = self.client.get(reverse('entry:show', args=[entry.id]))
self.assertEqual(resp.status_code, 200)
# Checks that the new Attibutes is created in the show processing
self.assertEqual(entity.attrs.count(), 3)
self.assertEqual(entry.attrs.count(), entity.attrs.count())
attr_str = entry.attrs.get(name=attr.name)
attr_arr_str = entry.attrs.get(name='arr-str')
attr_arr_obj = entry.attrs.get(name='arr-obj')
refer_entry = Entry.objects.create(name='e0', schema=refer_entity, created_user=user)
attr_str_value_count = attr_str.values.count()
attr_arr_str_value_count = attr_arr_str.values.count()
attr_arr_obj_value_count = attr_arr_obj.values.count()
self.assertEqual(attr_str_value_count, 1)
self.assertEqual(attr_arr_str_value_count, 1)
self.assertEqual(attr_arr_obj_value_count, 1)
# edit to add values to the new attributes
params = {
'entry_name': entry.name,
'attrs': [
{
'id': str(attr_str.id),
'type': str(attr.type),
'value': [{'data': 'hoge', 'index': 0}],
'referral_key': []
},
{
'id': str(attr_arr_str.id),
'type': str(AttrTypeArrStr),
'value': [
{'data': 'foo', 'index': 0},
{'data': 'bar', 'index': 1},
],
'referral_key': []
},
{
'id': str(attr_arr_obj.id),
'type': str(AttrTypeArrObj),
'value': [{'data': refer_entry.id, 'index': 0}],
'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_edit', args=[entry.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# check updated values structure and count of AttributeValues
self.assertEqual(attr_str.values.count(), attr_str_value_count + 1)
self.assertEqual(attr_arr_str.values.count(), attr_arr_str_value_count + 1)
self.assertEqual(attr_arr_obj.values.count(), attr_arr_obj_value_count + 1)
value_arr_str = attr_arr_str.values.last()
self.assertEqual(value_arr_str.data_array.count(), 2)
value_arr_obj = attr_arr_obj.values.last()
self.assertEqual(value_arr_obj.data_array.count(), 1)
@patch('entity.tasks.create_entity.delay', Mock(side_effect=entity_tasks.create_entity))
@patch('entry.tasks.create_entry_attrs.delay', Mock(side_effect=entry_tasks.create_entry_attrs))
def test_inherite_attribute_acl(self):
"""
This test executes followings
- create a new Entity(entity) with an EntityAttr(attr)
- change ACL of attr to be private by admin user
- create a new Entry(entry1) from entity by admin user
- switch the user to guest
- create a new Entry(entry2) from entity by guest user
Then, this checks following
- The Entry(entry1) whcih is created by the admin user has one Attribute
- The Entry(entry2) whcih is created by the guest user has no Attribute
"""
user = self.admin_login()
# create an Entity
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [
{'name': 'attr', 'type': str(AttrTypeStr),
'is_delete_in_chain': False, 'is_mandatory': False, 'row_index': '1'},
],
}
resp = self.client.post(reverse('entity:do_create'),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(EntityAttr.objects.count(), 1)
# set acl of attr
entityattr = EntityAttr.objects.get(name='attr')
params = {
'object_id': str(entityattr.id),
'object_type': str(entityattr.objtype),
'acl': [
{
'member_id': str(user.id),
'member_type': 'user',
'value': str(ACLType.Full.id)
}
],
'default_permission': str(ACLType.Nothing.id),
}
resp = self.client.post(reverse('acl:set'), json.dumps(params), 'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entity.objects.count(), 1)
self.assertFalse(EntityAttr.objects.get(name='attr').is_public)
# create Entity by admin
entity = Entity.objects.get(name='entity')
params = {
'entry_name': 'entry1',
'attrs': [
{'id': str(entityattr.id), 'type': str(entityattr.objtype),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entry.objects.count(), 1)
self.assertEqual(Entry.objects.get(name='entry1').attrs.count(), 1)
# switch to guest user
self.guest_login()
entity = Entity.objects.get(name='entity')
params = {
'entry_name': 'entry2',
'attrs': [
{'id': str(entityattr.id), 'type': str(entityattr.objtype),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entry.objects.count(), 2)
self.assertEqual(Entry.objects.get(name='entry2').attrs.count(), 0)
@patch('entity.tasks.edit_entity.delay', Mock(side_effect=entity_tasks.edit_entity))
| 41 | 100 | 0.552336 | import json
from airone.lib.acl import ACLType
from airone.lib.test import AironeViewTest
from airone.lib.types import AttrTypeStr
from airone.lib.types import AttrTypeArrStr, AttrTypeArrObj
from airone.lib.types import AttrTypeValue
from django.urls import reverse
from entity.models import Entity, EntityAttr
from entry.models import Entry, AttributeValue
from entry import tasks as entry_tasks
from entity import tasks as entity_tasks
from unittest.mock import patch
from unittest.mock import Mock
class ComplexViewTest(AironeViewTest):
"""
This has complex tests that combine multiple requests across the inter-applicational
"""
@patch('entry.tasks.create_entry_attrs.delay', Mock(side_effect=entry_tasks.create_entry_attrs))
@patch('entry.tasks.edit_entry_attrs.delay', Mock(side_effect=entry_tasks.edit_entry_attrs))
@patch('entity.tasks.create_entity.delay', Mock(side_effect=entity_tasks.create_entity))
@patch('entity.tasks.edit_entity.delay', Mock(side_effect=entity_tasks.edit_entity))
def test_add_attr_after_creating_entry(self):
"""
This test executes followings
- create a new Entity(entity) with an EntityAttr(attr)
- create a new Entry for entity
- update entity to append new EntityAttrs(arr-str, arr-obj)
Then, this checks following
- created additional Attributes which are corresponding to the added EntityAttrs
automatically for accessing show page.
- enable to edit entry correctly because #152 is fixed
"""
user = self.admin_login()
# create an Entity
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [
{'name': 'attr', 'type': str(AttrTypeStr), 'is_delete_in_chain': True,
'is_mandatory': False, 'row_index': '1'},
],
}
resp = self.client.post(reverse('entity:do_create'),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# get created objects
entity = Entity.objects.get(name='entity')
attr = entity.attrs.get(name='attr')
# create an Entry for the created entity
params = {
'entry_name': 'entry',
'attrs': [
{'id': str(attr.id), 'type': str(AttrTypeStr),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# get created entry object
entry = Entry.objects.get(name='entry')
refer_entity = Entity.objects.create(name='E0', note='', created_user=user)
# edit entity to append a new Array attributes
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [{
'id': str(attr.id),
'name': attr.name,
'type': str(attr.type),
'is_mandatory': attr.is_mandatory,
'is_delete_in_chain': False,
'row_index': '1',
}, {
'name': 'arr-str',
'type': str(AttrTypeArrStr),
'is_mandatory': True,
'is_delete_in_chain': False,
'row_index': '2',
}, {
'name': 'arr-obj',
'type': str(AttrTypeArrObj),
'ref_ids': [refer_entity.id],
'is_mandatory': True,
'is_delete_in_chain': False,
'row_index': '3',
}],
}
resp = self.client.post(reverse('entity:do_edit', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# Checks that the Attributes associated to the added EntityAttrs are not created
self.assertEqual(entity.attrs.count(), 3)
self.assertEqual(entry.attrs.count(), 1)
resp = self.client.get(reverse('entry:show', args=[entry.id]))
self.assertEqual(resp.status_code, 200)
# Checks that the new Attibutes is created in the show processing
self.assertEqual(entity.attrs.count(), 3)
self.assertEqual(entry.attrs.count(), entity.attrs.count())
attr_str = entry.attrs.get(name=attr.name)
attr_arr_str = entry.attrs.get(name='arr-str')
attr_arr_obj = entry.attrs.get(name='arr-obj')
refer_entry = Entry.objects.create(name='e0', schema=refer_entity, created_user=user)
attr_str_value_count = attr_str.values.count()
attr_arr_str_value_count = attr_arr_str.values.count()
attr_arr_obj_value_count = attr_arr_obj.values.count()
self.assertEqual(attr_str_value_count, 1)
self.assertEqual(attr_arr_str_value_count, 1)
self.assertEqual(attr_arr_obj_value_count, 1)
# edit to add values to the new attributes
params = {
'entry_name': entry.name,
'attrs': [
{
'id': str(attr_str.id),
'type': str(attr.type),
'value': [{'data': 'hoge', 'index': 0}],
'referral_key': []
},
{
'id': str(attr_arr_str.id),
'type': str(AttrTypeArrStr),
'value': [
{'data': 'foo', 'index': 0},
{'data': 'bar', 'index': 1},
],
'referral_key': []
},
{
'id': str(attr_arr_obj.id),
'type': str(AttrTypeArrObj),
'value': [{'data': refer_entry.id, 'index': 0}],
'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_edit', args=[entry.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# check updated values structure and count of AttributeValues
self.assertEqual(attr_str.values.count(), attr_str_value_count + 1)
self.assertEqual(attr_arr_str.values.count(), attr_arr_str_value_count + 1)
self.assertEqual(attr_arr_obj.values.count(), attr_arr_obj_value_count + 1)
value_arr_str = attr_arr_str.values.last()
self.assertEqual(value_arr_str.data_array.count(), 2)
value_arr_obj = attr_arr_obj.values.last()
self.assertEqual(value_arr_obj.data_array.count(), 1)
@patch('entity.tasks.create_entity.delay', Mock(side_effect=entity_tasks.create_entity))
@patch('entry.tasks.create_entry_attrs.delay', Mock(side_effect=entry_tasks.create_entry_attrs))
def test_inherite_attribute_acl(self):
"""
This test executes followings
- create a new Entity(entity) with an EntityAttr(attr)
- change ACL of attr to be private by admin user
- create a new Entry(entry1) from entity by admin user
- switch the user to guest
- create a new Entry(entry2) from entity by guest user
Then, this checks following
- The Entry(entry1) whcih is created by the admin user has one Attribute
- The Entry(entry2) whcih is created by the guest user has no Attribute
"""
user = self.admin_login()
# create an Entity
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [
{'name': 'attr', 'type': str(AttrTypeStr),
'is_delete_in_chain': False, 'is_mandatory': False, 'row_index': '1'},
],
}
resp = self.client.post(reverse('entity:do_create'),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(EntityAttr.objects.count(), 1)
# set acl of attr
entityattr = EntityAttr.objects.get(name='attr')
params = {
'object_id': str(entityattr.id),
'object_type': str(entityattr.objtype),
'acl': [
{
'member_id': str(user.id),
'member_type': 'user',
'value': str(ACLType.Full.id)
}
],
'default_permission': str(ACLType.Nothing.id),
}
resp = self.client.post(reverse('acl:set'), json.dumps(params), 'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entity.objects.count(), 1)
self.assertFalse(EntityAttr.objects.get(name='attr').is_public)
# create Entity by admin
entity = Entity.objects.get(name='entity')
params = {
'entry_name': 'entry1',
'attrs': [
{'id': str(entityattr.id), 'type': str(entityattr.objtype),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entry.objects.count(), 1)
self.assertEqual(Entry.objects.get(name='entry1').attrs.count(), 1)
# switch to guest user
self.guest_login()
entity = Entity.objects.get(name='entity')
params = {
'entry_name': 'entry2',
'attrs': [
{'id': str(entityattr.id), 'type': str(entityattr.objtype),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entry.objects.count(), 2)
self.assertEqual(Entry.objects.get(name='entry2').attrs.count(), 0)
@patch('entity.tasks.edit_entity.delay', Mock(side_effect=entity_tasks.edit_entity))
def test_cache_referred_entry_at_deleting_attr(self):
user = self.admin_login()
ref_entity = Entity.objects.create(name='ref_entity', created_user=user)
ref_entry = Entry.objects.create(name='ref_entry', schema=ref_entity, created_user=user)
entity = Entity.objects.create(name='entity', created_user=user)
entity.attrs.add(EntityAttr.objects.create(name='ref',
type=AttrTypeValue['object'],
parent_entity=entity,
created_user=user))
entry = Entry.objects.create(name='entry', schema=entity, created_user=user)
entry.complement_attrs(user)
attrv_params = {
'value': '',
'created_user': user,
'parent_attr': entry.attrs.get(name='ref'),
'referral': ref_entry,
}
entry.attrs.get(name='ref').values.add(AttributeValue.objects.create(**attrv_params))
# make referred entry cache
ref_entries = ref_entry.get_referred_objects()
self.assertEqual(list(ref_entries), [entry])
self.assertEqual(ref_entries.count(), 1)
entity_attr = entity.attrs.last()
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [{
'id': entity_attr.id,
'name': entity_attr.name,
'type': str(entity_attr.type),
'is_mandatory': entity_attr.is_mandatory,
'is_delete_in_chain': False,
'ref_ids': [ref_entity.id],
'deleted': True,
'row_index': '1'
}], # delete EntityAttr 'ref'
}
resp = self.client.post(reverse('entity:do_edit', args=[entity.id]),
json.dumps(params),
'application/json')
# checks that the cache is cleared because of the removing EntityAttr
self.assertEqual(resp.status_code, 200)
self.assertEqual(entity.attrs.filter(is_active=True).count(), 0)
self.assertEqual(entry.attrs.filter(is_active=True).count(), 1)
def test_make_cache_referred_entry_after_updating_attr_type(self):
user = self.admin_login()
ref_entity = Entity.objects.create(name='ref_entity', created_user=user)
ref_entry = Entry.objects.create(name='ref_entry', schema=ref_entity, created_user=user)
entity = Entity.objects.create(name='entity', created_user=user)
entity.attrs.add(EntityAttr.objects.create(name='ref',
type=AttrTypeValue['object'],
parent_entity=entity,
created_user=user))
entry = Entry.objects.create(name='entry', schema=entity, created_user=user)
entry.complement_attrs(user)
attrv_params = {
'value': '',
'created_user': user,
'parent_attr': entry.attrs.get(name='ref'),
'referral': ref_entry,
}
entry.attrs.get(name='ref').values.add(AttributeValue.objects.create(**attrv_params))
# make referred entry cache
ref_entries = ref_entry.get_referred_objects()
self.assertEqual(list(ref_entries), [entry])
self.assertEqual(ref_entries.count(), 1)
entity_attr = entity.attrs.last()
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [{
'id': entity_attr.id,
'name': entity_attr.name,
'type': str(AttrTypeValue['string']),
'is_mandatory': entity_attr.is_mandatory,
'is_delete_in_chain': False,
'row_index': '1'
}], # delete EntityAttr 'ref'
}
resp = self.client.post(reverse('entity:do_edit', args=[entity.id]),
json.dumps(params),
'application/json')
# These check that request was succeeded, but attr type and values
# which are registered at that Attribute would not be changed.
self.assertEqual(resp.status_code, 200)
self.assertEqual(list(ref_entry.get_referred_objects()), [entry])
| 4,387 | 0 | 53 |
bf5724577612a2d00daec5ca840971656ba19be9 | 2,264 | py | Python | user_profile/views.py | ksarthak4ever/Restrict_API | e53e671965b825fa167b080fe7212ec6f3e4c6ca | [
"MIT"
] | null | null | null | user_profile/views.py | ksarthak4ever/Restrict_API | e53e671965b825fa167b080fe7212ec6f3e4c6ca | [
"MIT"
] | null | null | null | user_profile/views.py | ksarthak4ever/Restrict_API | e53e671965b825fa167b080fe7212ec6f3e4c6ca | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import AllowAny
from . import serializers
from . import models
from . permissions import IsAdminUser, IsLoggedInUserOrAdmin
| 41.925926 | 237 | 0.787102 | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import AllowAny
from . import serializers
from . import models
from . permissions import IsAdminUser, IsLoggedInUserOrAdmin
class MessageViewSet(viewsets.ViewSet): #A simple viewset to tell the aim of this api/project
def list(self, request):
objective = [
'The problem statement is',
'An api in which a user cant access another users data/profile',
'Eg:~ user with profile id 7 should be able to access /api/profile/7/ but not /api/profile/8/'
]
return Response({'Message': 'Welcome!', 'Objective': objective})
class UserProfileViewSet(viewsets.ModelViewSet): #Handles creating,reading and updating profiles.ModelViewSet of djangorestframework takes care of all the logic for creating,reading and updating model items(really useful for simple apis)
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all() #queryset tells the viewset how to retrieve the objects from database i,e from which model.
authentication_classes = (TokenAuthentication,)
def get_permissions(self):
permission_classes = []
if self.action == 'create': #so that anyone can create an account
permission_classes = [AllowAny]
elif self.action == 'retrieve' or self.action == 'update' or self.action == 'partial_update': #i.e to get a specific users details i.e api/profile/2 (LOGGED in user can only view and update his own profile.)
permission_classes = [IsLoggedInUserOrAdmin]
elif self.action == 'list' or self.action == 'destroy': #Only admin/superuser has permission to see all users in list
permission_classes = [IsAdminUser]
return [permission() for permission in permission_classes]
class LoginViewSet(viewsets.ViewSet): #Checks email and password and returns an auth token.
serializer_class = AuthTokenSerializer
def create(self, request): #using ObtainAuthToken APIView to validate and create a token.
return ObtainAuthToken().post(request)
| 1,011 | 688 | 94 |
d1204259d5d59c637e2ebf4a67f530c5f2e46420 | 272 | py | Python | ceciestunepipe/util/__init__.py | zekearneodo/ceciestunepipe | 7e771783769816f37de44077177152175aecc2b7 | [
"MIT"
] | null | null | null | ceciestunepipe/util/__init__.py | zekearneodo/ceciestunepipe | 7e771783769816f37de44077177152175aecc2b7 | [
"MIT"
] | null | null | null | ceciestunepipe/util/__init__.py | zekearneodo/ceciestunepipe | 7e771783769816f37de44077177152175aecc2b7 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
axes_pars = {'axes.labelpad': 5,
'axes.titlepad': 5,
'axes.titlesize': 'small',
'axes.grid': False,
'axes.xmargin': 0,
'axes.ymargin': 0}
plt.rcParams.update(axes_pars) | 27.2 | 39 | 0.540441 | from matplotlib import pyplot as plt
axes_pars = {'axes.labelpad': 5,
'axes.titlepad': 5,
'axes.titlesize': 'small',
'axes.grid': False,
'axes.xmargin': 0,
'axes.ymargin': 0}
plt.rcParams.update(axes_pars) | 0 | 0 | 0 |
e4f6143f7bfb4ed767a788628fa6495930e03951 | 145 | py | Python | servers/run.py | ibalance2005/ocr_server | e7fd190df692a19c8d090950ee9cdd9838b262ba | [
"Apache-2.0"
] | null | null | null | servers/run.py | ibalance2005/ocr_server | e7fd190df692a19c8d090950ee9cdd9838b262ba | [
"Apache-2.0"
] | null | null | null | servers/run.py | ibalance2005/ocr_server | e7fd190df692a19c8d090950ee9cdd9838b262ba | [
"Apache-2.0"
] | null | null | null | import config as C
from servers import app
if __name__ == '__main__':
app_run = app.init()
app_run.run(host='0.0.0.0', port=C.API_PORT) | 20.714286 | 48 | 0.675862 | import config as C
from servers import app
if __name__ == '__main__':
app_run = app.init()
app_run.run(host='0.0.0.0', port=C.API_PORT) | 0 | 0 | 0 |
6affc5422901263d6054be01632ec2f25f425d15 | 5,669 | py | Python | cossmo_tests/test_cossmo.py | PSI-Lab/COSSMO | 704772ffc6d406c6fd914a069c810845dfc6dde3 | [
"AFL-1.1"
] | 4 | 2018-07-06T07:17:03.000Z | 2019-04-26T03:16:40.000Z | cossmo_tests/test_cossmo.py | PSI-Lab/COSSMO | 704772ffc6d406c6fd914a069c810845dfc6dde3 | [
"AFL-1.1"
] | null | null | null | cossmo_tests/test_cossmo.py | PSI-Lab/COSSMO | 704772ffc6d406c6fd914a069c810845dfc6dde3 | [
"AFL-1.1"
] | 1 | 2021-01-26T06:27:02.000Z | 2021-01-26T06:27:02.000Z | import tensorflow as tf
import numpy as np
from cossmo.output_networks import BalancedOutputNetwork, RaggedOutputNetwork
| 38.304054 | 78 | 0.533604 | import tensorflow as tf
import numpy as np
from cossmo.output_networks import BalancedOutputNetwork, RaggedOutputNetwork
class TestCOSSMO(tf.test.TestCase):
def test_cossmo_predictions(self):
with self.test_session() as sess:
num_outputs = 4
N = 20
k = 10
logits_ph = tf.placeholder(tf.float32,
shape=[num_outputs, None, None])
model = BalancedOutputNetwork(logits_ph, num_outputs, 0., {})
predictions_t = model.get_psi_predictions()
feed_dict = {
logits_ph: np.random.rand(num_outputs, N, k)
}
predictions_val = sess.run(predictions_t, feed_dict)
self.assertTrue(predictions_val.shape, (num_outputs, N, k))
self.assertTrue(np.allclose(predictions_val.sum(2), 1))
def test_cossmo_optimizer(self):
with self.test_session() as sess:
num_outputs = 4
N = 20
k = 10
H = 15
X_ph = tf.placeholder(tf.float32,
shape=[num_outputs, N, H])
W = tf.get_variable('weights', [H, k],
initializer=tf.truncated_normal_initializer())
psi_targets_ph = tf.placeholder(tf.float32,
shape=[num_outputs, None, None])
logits = tf.reshape(tf.matmul(tf.reshape(X_ph, [-1, H]), W),
[num_outputs, -1, k])
model = BalancedOutputNetwork(logits, num_outputs, 0, {})
model.get_psi_predictions()
model.get_cross_entropy_loss(psi_targets_ph)
model.get_accuracy()
train_op = model.get_optimizer()
sess.run(tf.global_variables_initializer())
feed_dict = {
X_ph: np.random.rand(num_outputs, N, H),
psi_targets_ph: np.random.rand(num_outputs, N, k)
}
softmax_ce_val, loss_val, accuracy_val = sess.run(
[model.softmax_cross_entropy, model.loss, model.accuracy],
feed_dict
)
self.assertEqual(softmax_ce_val.shape, (num_outputs, N))
self.assertIsInstance(loss_val, np.float32)
self.assertIsInstance(accuracy_val, np.float32)
class TestMaskedCOSSMO(tf.test.TestCase):
def test_masked_cossmo_predictions(self):
with self.test_session() as sess:
num_outputs = 4
N = 20
k = 10
n_alt_ss_val = np.random.randint(0, k, N) + 1
output_mask = np.array(
[[1 if j < n_alt_ss_val[i] else 0 for j in range(k)]
for i in range(N)]
).astype(np.bool)
logits_ph = tf.placeholder(tf.float32,
shape=[num_outputs, None, None])
output_mask_ph = tf.placeholder(tf.bool,
shape=[None, None])
n_alt_ss = tf.placeholder(tf.int32, n_alt_ss_val.shape)
model = RaggedOutputNetwork(
logits_ph, num_outputs, n_alt_ss, 0., {})
predictions_t = model.get_psi_predictions()
feed_dict = {
n_alt_ss: n_alt_ss_val,
logits_ph: np.random.rand(num_outputs, N, k),
output_mask_ph: output_mask
}
predictions_val = sess.run(predictions_t, feed_dict)
self.assertTrue(predictions_val.shape, (num_outputs, N, k))
self.assertTrue(np.allclose(predictions_val.sum(2), 1))
def test_cossmo_optimizer(self):
with self.test_session() as sess:
num_outputs = 4
N = 20
k = 10
H = 15
n_alt_ss_val = np.random.randint(0, k, N) + 1
output_mask = np.array(
[[1 if j < n_alt_ss_val[i] else 0 for j in range(k)]
for i in range(N)]
).astype(np.bool)
X_ph = tf.placeholder(tf.float32,
shape=[num_outputs, N, H])
W = tf.get_variable('weights', [H, k],
initializer=tf.truncated_normal_initializer())
psi_targets_ph = tf.placeholder(tf.float32,
shape=[num_outputs, None, None])
output_mask_ph = tf.placeholder(tf.bool,
shape=[None, None])
logits = tf.reshape(tf.matmul(tf.reshape(X_ph, [-1, H]), W),
[num_outputs, -1, k])
n_alt_ss = tf.placeholder(tf.int32, n_alt_ss_val.shape)
model = RaggedOutputNetwork(
logits, num_outputs, n_alt_ss, 0, {})
model.get_psi_predictions()
model.get_cross_entropy_loss(psi_targets_ph)
model.get_accuracy()
train_op = model.get_optimizer()
sess.run(tf.global_variables_initializer())
feed_dict = {
n_alt_ss: n_alt_ss_val,
X_ph: np.random.rand(num_outputs, N, H),
output_mask_ph: output_mask,
psi_targets_ph: np.random.rand(num_outputs, N, k)
}
softmax_ce_val, loss_val, accuracy_val = sess.run(
[model.softmax_cross_entropy, model.loss, model.accuracy],
feed_dict
)
self.assertEqual(softmax_ce_val.shape, (num_outputs, N))
self.assertIsInstance(loss_val, np.float32)
self.assertIsInstance(accuracy_val, np.float32)
| 5,360 | 34 | 152 |
3a2b87367dc0dba91b6e90ca9a24a230cb5df047 | 2,115 | py | Python | tweeter_analysis.py | milindparvatia/StockMarketApp | 0a3de9f2b10da0dad524c1ee47db5cfa8f00cdb7 | [
"Apache-2.0"
] | 1 | 2020-08-09T07:36:31.000Z | 2020-08-09T07:36:31.000Z | tweeter_analysis.py | milindparvatia/StockMarketApp | 0a3de9f2b10da0dad524c1ee47db5cfa8f00cdb7 | [
"Apache-2.0"
] | 7 | 2020-02-11T23:10:00.000Z | 2021-06-10T17:37:24.000Z | tweeter_analysis.py | milindparvatia/StockMarketApp | 0a3de9f2b10da0dad524c1ee47db5cfa8f00cdb7 | [
"Apache-2.0"
] | 1 | 2019-06-09T08:10:04.000Z | 2019-06-09T08:10:04.000Z | import os
import json
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from collections import defaultdict
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
if __name__ == '__main__':
os.remove("tweets1234.json")
os.system('twitterscraper #GOOGL --limit 100 -bd 2018-01-10 -ed 2018-09-20 --output=tweets1234.json')
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt', 'via']
with open('tweets1234.json', 'r') as f:
line = f.read() # read only the first tweet/line
total = list()
sentiment = 0.0
pos = 0.0
neg = 0.0
tweet = json.loads(line) # load it as Python dict
type(tweet)
for key in tweet:
#print("\n")
#print("\n Tweet : ")
terms_stop = [term for term in word_tokenize(key['text']) if term not in stop] #Using Nltk to tokenize
total.extend(terms_stop)
for key in total:
if(len(key) < 3):
total.remove(key)
for i in range(len(total)):
total[i] = total[i].lower()
with open('bulltest.json','r') as temp:
bull = json.load(temp)
print(bull)
with open('beartest.json', 'r') as temp:
bear = json.load(temp)
print(bear)
f.close()
sentpos = 0.0
sentneg = 0.0
freq = leaders(total)
for key1 in freq:
#t1 = list(key) #convert tuple to list for comparing
for key2 in bull:
if(key1[0].lower() == key2[0].lower()):
sentpos = sentpos + (key2[1] * key1[1])
for key3 in bear:
if(key1[0].lower() == key3[0].lower()):
sentneg = sentneg - (key3[1] * key1[1])
print("\n\n")
# print(freq)
print(sentpos)
print(sentneg)
print(sentpos+sentneg)
| 27.115385 | 114 | 0.583924 | import os
import json
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from collections import defaultdict
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
def leaders(xs, top=500):
counts = defaultdict(int)
for x in xs:
counts[x] += 1
return sorted(counts.items(), reverse=True, key=lambda tup: tup[1])[:top]
if __name__ == '__main__':
os.remove("tweets1234.json")
os.system('twitterscraper #GOOGL --limit 100 -bd 2018-01-10 -ed 2018-09-20 --output=tweets1234.json')
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt', 'via']
with open('tweets1234.json', 'r') as f:
line = f.read() # read only the first tweet/line
total = list()
sentiment = 0.0
pos = 0.0
neg = 0.0
tweet = json.loads(line) # load it as Python dict
type(tweet)
for key in tweet:
#print("\n")
#print("\n Tweet : ")
terms_stop = [term for term in word_tokenize(key['text']) if term not in stop] #Using Nltk to tokenize
total.extend(terms_stop)
for key in total:
if(len(key) < 3):
total.remove(key)
for i in range(len(total)):
total[i] = total[i].lower()
with open('bulltest.json','r') as temp:
bull = json.load(temp)
print(bull)
with open('beartest.json', 'r') as temp:
bear = json.load(temp)
print(bear)
f.close()
sentpos = 0.0
sentneg = 0.0
freq = leaders(total)
for key1 in freq:
#t1 = list(key) #convert tuple to list for comparing
for key2 in bull:
if(key1[0].lower() == key2[0].lower()):
sentpos = sentpos + (key2[1] * key1[1])
for key3 in bear:
if(key1[0].lower() == key3[0].lower()):
sentneg = sentneg - (key3[1] * key1[1])
print("\n\n")
# print(freq)
print(sentpos)
print(sentneg)
print(sentpos+sentneg)
| 152 | 0 | 23 |
2c61a3cf22fed9100e56ac03ddf5dee0325b911d | 1,409 | py | Python | compare_records.py | KungPaoChick/CovidMonitor | 7b96d170e7583fc395dddd370f03eec0e0b71e0c | [
"MIT"
] | 2 | 2021-01-31T13:27:45.000Z | 2021-02-01T00:06:40.000Z | compare_records.py | KungPaoChick/CovidMonitor | 7b96d170e7583fc395dddd370f03eec0e0b71e0c | [
"MIT"
] | null | null | null | compare_records.py | KungPaoChick/CovidMonitor | 7b96d170e7583fc395dddd370f03eec0e0b71e0c | [
"MIT"
] | null | null | null | import os, errno
import pandas as pd
first_file = str(input('First Country: '))
second_file = str(input('Second Country: '))
file_path = str(input('Path:(year/month) '))
find_files(first_file, second_file, file_path)
| 35.225 | 99 | 0.535841 | import os, errno
import pandas as pd
def find_files(file_one, file_two, search_path):
results = []
path = os.getcwd() + '/Records/'
csv_one = file_one + '.csv'
csv_two = file_two + '.csv'
if os.path.isdir(path + search_path):
try:
for root, dirs, files in os.walk(path + search_path):
if csv_one and csv_two in files:
results.append(os.path.join(root, csv_one))
results.append(os.path.join(root, csv_two))
dirs.append('None')
else:
raise FileNotFoundError(
'Neither {' + csv_one + ' or ' + csv_two + '} exists in Records Library.')
for result in results:
df = pd.read_csv(result, encoding='utf-8')
pd.set_option('display.max_rows', None)
print(df)
print('\n\n')
except FileNotFoundError as io:
print('Directory/File has not been found! ', io)
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT),
"{" + path + search_path + "} is not a Directory")
first_file = str(input('First Country: '))
second_file = str(input('Second Country: '))
file_path = str(input('Path:(year/month) '))
find_files(first_file, second_file, file_path)
| 1,153 | 0 | 25 |
b4c602a7a6ac6de6dc83545d32aa9910df43a8d1 | 1,968 | py | Python | game-examples-AttilaToth/Minesweeper/main.py | CrtomirJuren/pygame-projects | f710f36050bfe3ece866bbda7d570caa1e037d7a | [
"MIT"
] | null | null | null | game-examples-AttilaToth/Minesweeper/main.py | CrtomirJuren/pygame-projects | f710f36050bfe3ece866bbda7d570caa1e037d7a | [
"MIT"
] | null | null | null | game-examples-AttilaToth/Minesweeper/main.py | CrtomirJuren/pygame-projects | f710f36050bfe3ece866bbda7d570caa1e037d7a | [
"MIT"
] | null | null | null | import pygame
from board import Grid
from player import Player, Stats
from enum import Enum, auto
import os
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (400,100)
surface = pygame.display.set_mode((1200, 900))
pygame.display.set_caption('Minesweeper')
state = States.running
player = Player()
grid = Grid(player)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and state == States.running:
if pygame.mouse.get_pressed()[0]: # check for the left mouse button
pos = pygame.mouse.get_pos()
grid.click(pos[0], pos[1])
elif pygame.mouse.get_pressed()[2]:
pos = pygame.mouse.get_pos()
grid.mark_mine(pos[0]//30, pos[1]//30)
if grid.check_if_win():
state = States.win
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and (state == States.game_over or state == States.win):
grid.reload()
state = States.running
if event.key == pygame.K_b:
grid.show_mines()
surface.fill((0,0,0))
if player.get_health() == 0:
state = States.game_over
if state == States.game_over:
Stats.draw(surface, 'Game over!', (970, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
elif state == States.win:
Stats.draw(surface, 'You win!', (1000, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
grid.draw(surface)
Stats.draw(surface, 'Lives remaining', (950, 100))
Stats.draw(surface, str(player.get_health()), (1020, 200))
Stats.draw(surface, 'RMB to mark mine', (950, 550))
Stats.draw(surface, 'press b to show mines', (920, 650))
pygame.display.flip()
| 30.75 | 98 | 0.598069 | import pygame
from board import Grid
from player import Player, Stats
from enum import Enum, auto
import os
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (400,100)
surface = pygame.display.set_mode((1200, 900))
pygame.display.set_caption('Minesweeper')
class States(Enum):
running = auto()
game_over = auto()
win = auto()
state = States.running
player = Player()
grid = Grid(player)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and state == States.running:
if pygame.mouse.get_pressed()[0]: # check for the left mouse button
pos = pygame.mouse.get_pos()
grid.click(pos[0], pos[1])
elif pygame.mouse.get_pressed()[2]:
pos = pygame.mouse.get_pos()
grid.mark_mine(pos[0]//30, pos[1]//30)
if grid.check_if_win():
state = States.win
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and (state == States.game_over or state == States.win):
grid.reload()
state = States.running
if event.key == pygame.K_b:
grid.show_mines()
surface.fill((0,0,0))
if player.get_health() == 0:
state = States.game_over
if state == States.game_over:
Stats.draw(surface, 'Game over!', (970, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
elif state == States.win:
Stats.draw(surface, 'You win!', (1000, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
grid.draw(surface)
Stats.draw(surface, 'Lives remaining', (950, 100))
Stats.draw(surface, str(player.get_health()), (1020, 200))
Stats.draw(surface, 'RMB to mark mine', (950, 550))
Stats.draw(surface, 'press b to show mines', (920, 650))
pygame.display.flip()
| 0 | 59 | 23 |
7904281c09f285f9f58bfe640ba09dcc99178926 | 20,018 | py | Python | numpy_indexed/grouping.py | EelcoHoogendoorn/Numpy_arraysetops_EP | 84dc8114bf8a79c3acb3f7f59128247b9fc97243 | [
"MIT"
] | 170 | 2016-04-02T07:29:12.000Z | 2022-03-30T02:57:15.000Z | numpy_indexed/grouping.py | EelcoHoogendoorn/Numpy_arraysetops_EP | 84dc8114bf8a79c3acb3f7f59128247b9fc97243 | [
"MIT"
] | 13 | 2016-08-31T14:39:51.000Z | 2022-01-10T16:29:00.000Z | numpy_indexed/grouping.py | EelcoHoogendoorn/Numpy_arraysetops_EP | 84dc8114bf8a79c3acb3f7f59128247b9fc97243 | [
"MIT"
] | 19 | 2016-07-20T18:49:36.000Z | 2021-04-16T06:38:09.000Z | """grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
| 32.655791 | 115 | 0.575082 | """grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
| 573 | 0 | 61 |
87beae9c101a147576d3bb34a1271a7aab30736e | 6,089 | py | Python | plugins/aea-cli-ipfs/aea_cli_ipfs/registry.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | plugins/aea-cli-ipfs/aea_cli_ipfs/registry.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | plugins/aea-cli-ipfs/aea_cli_ipfs/registry.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021-2022 Valory AG
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with methods for ipfs registry."""
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import jsonschema
from aea_cli_ipfs.exceptions import HashNotProvided
from aea_cli_ipfs.ipfs_utils import DownloadError, IPFSTool, NodeError
from aea.cli.registry.settings import DEFAULT_IPFS_URL
from aea.cli.utils.config import get_ipfs_node_multiaddr
from aea.configurations.base import PublicId
_default_logger = logging.getLogger(__name__)
LocalRegistry = Dict[str, Dict[str, str]]
LOCAL_REGISTRY_PATH = os.path.join(
os.path.expanduser("~"), ".aea", "local_registry.json"
)
LOCAL_REGISTRY_DEFAULT: LocalRegistry = {
"protocols": {},
"skills": {},
"connections": {},
"contracts": {},
"agents": {},
}
LOCAL_REGISTRY_SCHEMA = {
"type": "object",
"properties": {
"protocols": {
"type": "object",
"propertyNames": {"pattern": r"^[a-z][a-z0-9_]+\/[a-z_0-9]+:\d\.\d\.\d$"},
},
"skills": {"type": "object"},
"connections": {"type": "object"},
"contracts": {"type": "object"},
"agents": {"type": "object"},
},
"required": ["protocols", "skills", "connections", "contracts", "agents"],
}
def validate_registry(registry_data: LocalRegistry) -> None:
"""
Validate local registry data.
:param registry_data: json like object containing registry data.
"""
try:
jsonschema.validate(registry_data, schema=LOCAL_REGISTRY_SCHEMA)
except jsonschema.ValidationError as e:
_default_logger.debug("Registry Not Valid")
raise ValueError(str(e))
def write_local_registry(
registry_data: LocalRegistry, registry_path: str = LOCAL_REGISTRY_PATH
) -> None:
"""
Write registry data to file.
:param registry_data: json like object containing registry data.
:param registry_path: local registry path.
"""
validate_registry(registry_data)
with open(registry_path, mode="w+", encoding="utf-8") as fp:
json.dump(registry_data, fp)
def load_local_registry(registry_path: str = LOCAL_REGISTRY_PATH) -> LocalRegistry:
"""Returns local registry data."""
local_registry_path = Path(registry_path)
if not local_registry_path.is_file():
write_local_registry(LOCAL_REGISTRY_DEFAULT)
return LOCAL_REGISTRY_DEFAULT
with open(local_registry_path, mode="r", encoding="utf-8") as fp:
registry_data = json.load(fp)
validate_registry(registry_data)
return registry_data
def get_ipfs_hash_from_public_id(
item_type: str,
public_id: PublicId,
registry_path: str = LOCAL_REGISTRY_PATH,
) -> Optional[str]:
"""Get IPFS hash from local registry."""
registry_data = load_local_registry(registry_path=registry_path)
if public_id.package_version.is_latest:
package_versions: List[PublicId] = [
PublicId.from_str(_public_id)
for _public_id in registry_data.get(f"{item_type}s", {}).keys()
if public_id.same_prefix(PublicId.from_str(_public_id))
]
package_versions = list(
reversed(sorted(package_versions, key=lambda x: x.package_version))
)
if len(package_versions) == 0:
return None
public_id, *_ = package_versions
return registry_data.get(f"{item_type}s", {}).get(str(public_id), None)
def register_item_to_local_registry(
item_type: str,
public_id: Union[str, PublicId],
package_hash: str,
registry_path: str = LOCAL_REGISTRY_PATH,
) -> None:
"""
Add PublicId to hash mapping in the local registry.
:param item_type: item type.
:param public_id: public id of package.
:param package_hash: hash of package.
:param registry_path: local registry path.
"""
registry_data = load_local_registry(registry_path=registry_path)
registry_data[f"{item_type}s"][str(public_id)] = str(package_hash)
write_local_registry(registry_data, registry_path)
def fetch_ipfs(
item_type: str,
public_id: PublicId,
dest: str,
remote: bool = True,
) -> Optional[Path]:
"""Fetch a package from IPFS node."""
if remote:
ipfs_tool = IPFSTool(get_ipfs_node_multiaddr())
else:
ipfs_tool = IPFSTool(addr=DEFAULT_IPFS_URL)
try:
package_hash = public_id.hash
except ValueError:
package_hash = (
None if remote else get_ipfs_hash_from_public_id(item_type, public_id)
)
if package_hash is None:
raise HashNotProvided(f"Please provide hash; Public id {public_id}.")
try:
ipfs_tool.check_ipfs_node_running()
except NodeError: # pragma: nocover
if not remote:
ipfs_tool.daemon.start()
else:
raise Exception(f"Cannot connect to node with addr: {ipfs_tool.addr}")
try:
*_download_dir, _ = os.path.split(dest)
download_dir = os.path.sep.join(_download_dir)
ipfs_tool.download(package_hash, download_dir)
package_path = Path(dest).absolute()
ipfs_tool.daemon.stop()
return package_path
except DownloadError as e: # pragma: nocover
ipfs_tool.daemon.stop()
raise Exception(str(e)) from e
| 31.225641 | 86 | 0.658072 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021-2022 Valory AG
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with methods for ipfs registry."""
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import jsonschema
from aea_cli_ipfs.exceptions import HashNotProvided
from aea_cli_ipfs.ipfs_utils import DownloadError, IPFSTool, NodeError
from aea.cli.registry.settings import DEFAULT_IPFS_URL
from aea.cli.utils.config import get_ipfs_node_multiaddr
from aea.configurations.base import PublicId
_default_logger = logging.getLogger(__name__)
LocalRegistry = Dict[str, Dict[str, str]]
LOCAL_REGISTRY_PATH = os.path.join(
os.path.expanduser("~"), ".aea", "local_registry.json"
)
LOCAL_REGISTRY_DEFAULT: LocalRegistry = {
"protocols": {},
"skills": {},
"connections": {},
"contracts": {},
"agents": {},
}
LOCAL_REGISTRY_SCHEMA = {
"type": "object",
"properties": {
"protocols": {
"type": "object",
"propertyNames": {"pattern": r"^[a-z][a-z0-9_]+\/[a-z_0-9]+:\d\.\d\.\d$"},
},
"skills": {"type": "object"},
"connections": {"type": "object"},
"contracts": {"type": "object"},
"agents": {"type": "object"},
},
"required": ["protocols", "skills", "connections", "contracts", "agents"],
}
def validate_registry(registry_data: LocalRegistry) -> None:
"""
Validate local registry data.
:param registry_data: json like object containing registry data.
"""
try:
jsonschema.validate(registry_data, schema=LOCAL_REGISTRY_SCHEMA)
except jsonschema.ValidationError as e:
_default_logger.debug("Registry Not Valid")
raise ValueError(str(e))
def write_local_registry(
registry_data: LocalRegistry, registry_path: str = LOCAL_REGISTRY_PATH
) -> None:
"""
Write registry data to file.
:param registry_data: json like object containing registry data.
:param registry_path: local registry path.
"""
validate_registry(registry_data)
with open(registry_path, mode="w+", encoding="utf-8") as fp:
json.dump(registry_data, fp)
def load_local_registry(registry_path: str = LOCAL_REGISTRY_PATH) -> LocalRegistry:
"""Returns local registry data."""
local_registry_path = Path(registry_path)
if not local_registry_path.is_file():
write_local_registry(LOCAL_REGISTRY_DEFAULT)
return LOCAL_REGISTRY_DEFAULT
with open(local_registry_path, mode="r", encoding="utf-8") as fp:
registry_data = json.load(fp)
validate_registry(registry_data)
return registry_data
def get_ipfs_hash_from_public_id(
item_type: str,
public_id: PublicId,
registry_path: str = LOCAL_REGISTRY_PATH,
) -> Optional[str]:
"""Get IPFS hash from local registry."""
registry_data = load_local_registry(registry_path=registry_path)
if public_id.package_version.is_latest:
package_versions: List[PublicId] = [
PublicId.from_str(_public_id)
for _public_id in registry_data.get(f"{item_type}s", {}).keys()
if public_id.same_prefix(PublicId.from_str(_public_id))
]
package_versions = list(
reversed(sorted(package_versions, key=lambda x: x.package_version))
)
if len(package_versions) == 0:
return None
public_id, *_ = package_versions
return registry_data.get(f"{item_type}s", {}).get(str(public_id), None)
def register_item_to_local_registry(
item_type: str,
public_id: Union[str, PublicId],
package_hash: str,
registry_path: str = LOCAL_REGISTRY_PATH,
) -> None:
"""
Add PublicId to hash mapping in the local registry.
:param item_type: item type.
:param public_id: public id of package.
:param package_hash: hash of package.
:param registry_path: local registry path.
"""
registry_data = load_local_registry(registry_path=registry_path)
registry_data[f"{item_type}s"][str(public_id)] = str(package_hash)
write_local_registry(registry_data, registry_path)
def fetch_ipfs(
item_type: str,
public_id: PublicId,
dest: str,
remote: bool = True,
) -> Optional[Path]:
"""Fetch a package from IPFS node."""
if remote:
ipfs_tool = IPFSTool(get_ipfs_node_multiaddr())
else:
ipfs_tool = IPFSTool(addr=DEFAULT_IPFS_URL)
try:
package_hash = public_id.hash
except ValueError:
package_hash = (
None if remote else get_ipfs_hash_from_public_id(item_type, public_id)
)
if package_hash is None:
raise HashNotProvided(f"Please provide hash; Public id {public_id}.")
try:
ipfs_tool.check_ipfs_node_running()
except NodeError: # pragma: nocover
if not remote:
ipfs_tool.daemon.start()
else:
raise Exception(f"Cannot connect to node with addr: {ipfs_tool.addr}")
try:
*_download_dir, _ = os.path.split(dest)
download_dir = os.path.sep.join(_download_dir)
ipfs_tool.download(package_hash, download_dir)
package_path = Path(dest).absolute()
ipfs_tool.daemon.stop()
return package_path
except DownloadError as e: # pragma: nocover
ipfs_tool.daemon.stop()
raise Exception(str(e)) from e
| 0 | 0 | 0 |
61ef1912263dabb8f668d8da6532ccd6c0f92b63 | 639 | py | Python | library/infrastructure_architecture/event_sourced_architecture/event_queue_subscriber.py | piotrkluch/billenium-keras-api-python | 0d7c589dac150ab5363f33f1f6024c44a667d0ae | [
"MIT"
] | null | null | null | library/infrastructure_architecture/event_sourced_architecture/event_queue_subscriber.py | piotrkluch/billenium-keras-api-python | 0d7c589dac150ab5363f33f1f6024c44a667d0ae | [
"MIT"
] | null | null | null | library/infrastructure_architecture/event_sourced_architecture/event_queue_subscriber.py | piotrkluch/billenium-keras-api-python | 0d7c589dac150ab5363f33f1f6024c44a667d0ae | [
"MIT"
] | null | null | null |
# TODO: Turn this into a more general class which can subscribe and unsubscribe from
# TODO: anything, with a context manager interface.
from library.domain.events import subscribe, unsubscribe, DomainEvent
| 27.782609 | 84 | 0.740219 |
# TODO: Turn this into a more general class which can subscribe and unsubscribe from
# TODO: anything, with a context manager interface.
from library.domain.events import subscribe, unsubscribe, DomainEvent
class EventQueueSubscriber:
def __init__(self, event_queue):
self._event_queue = event_queue
subscribe(EventQueueSubscriber._all_events, self.enqueue_event)
def enqueue_event(self, event):
self._event_queue.append(event)
@staticmethod
def _all_events(event):
return isinstance(event, DomainEvent)
def close(self):
unsubscribe(self._all_events, self.enqueue_event)
| 274 | 132 | 23 |
cd407ac26d60f7b87f183c2fb73a65c50bfe7222 | 153 | py | Python | bauh/api/user.py | DN-debug/bauh | 83aeccae87d7fe26f6c5bf24be005288d5d54d84 | [
"Zlib"
] | null | null | null | bauh/api/user.py | DN-debug/bauh | 83aeccae87d7fe26f6c5bf24be005288d5d54d84 | [
"Zlib"
] | null | null | null | bauh/api/user.py | DN-debug/bauh | 83aeccae87d7fe26f6c5bf24be005288d5d54d84 | [
"Zlib"
] | null | null | null | import os
from typing import Optional
| 21.857143 | 68 | 0.712418 | import os
from typing import Optional
def is_root(user_id: Optional[int] = None):
return user_id == 0 if user_id is not None else os.getuid() == 0
| 91 | 0 | 23 |
909c3b54cecc30d25635aaefe0f6af45baae38eb | 4,821 | py | Python | preprocess.py | lionben89/NLP-3 | 5a0eb40fd40bb1c7c67d38a8e1b3478bac00afbb | [
"MIT"
] | null | null | null | preprocess.py | lionben89/NLP-3 | 5a0eb40fd40bb1c7c67d38a8e1b3478bac00afbb | [
"MIT"
] | null | null | null | preprocess.py | lionben89/NLP-3 | 5a0eb40fd40bb1c7c67d38a8e1b3478bac00afbb | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn import preprocessing
import nltk
nltk.download('punkt')
dataset_structure = None
TIMESTAMP_FEATURES = {
"timestamp": True,
"day_of_week": True,
"day_of_month": True,
"month": True,
"hour": True,
"minute": True,
"year": True
}
def preprocess(filename, train=True):
""" This function do all the preprocess according to the structure
Args:
filename ([string]): [filename with dataset as tsv]
Returns:
[dataframe]: [dataset after preprocess]
"""
dataset_train_structure = [{"name": "tweet_id", "func": empty_func},
{"name": "user_handle", "func": dummy_encoder},
{"name": "text", "func": text_preprocess},
{"name": "timestamp", "func": timestamp_preprocess},
{"name": "device", "func": label_encoder}]
dataset_test_structure = [{"name": "user_handle", "func": dummy_encoder},
{"name": "text", "func": text_preprocess},
{"name": "timestamp", "func": timestamp_preprocess}]
dataset_structure = dataset_train_structure if train else dataset_test_structure
column_names = list(map(lambda col_s: col_s["name"], dataset_structure))
ds = load_data(filename, column_names)
ds.dropna(thresh=0, inplace=True)
for i in range(len(dataset_structure)):
column_structure = dataset_structure[i]
ds = column_structure["func"](ds, i, column_structure["name"])
ds.reset_index(drop=True, inplace=True)
return ds
def load_data(filename, column_names):
"""This function loads the dataset into dataframe
Args:
filename ([string]): [filename]
Returns:
[dataframe]: [raw dataset]
"""
ds = pd.read_table(filename, names=column_names)
return ds
def dummy_encoder(ds, column, name):
"""this function transform a column in the dataframe into dummy code
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
dummies = pd.get_dummies(ds[name], prefix=name)
ds = ds.drop(columns=[name])
ds = pd.concat([ds, dummies], axis=1)
return ds
def text_preprocess(ds, column, name):
"""This function preprocess the text in the dataset
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
text = ds[name]
text = text.str.lower()
text = text.apply(remove_whitespace)
text = text.apply(lambda X: nltk.word_tokenize(X))
text = text.apply(lambda X: remove_punct(X))
ds[name] = text
return ds
def timestamp_preprocess(ds, column, name):
"""This function takes the timestamp in the dataset and create from it features according to the settings above
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
ts = pd.to_datetime(ds[name])
for feature in TIMESTAMP_FEATURES.keys():
if TIMESTAMP_FEATURES[feature] is not None:
if feature == "timestamp":
ds[feature] = ts
elif feature == "day_of_week":
ds[feature] = ts.apply(lambda X: X.day_of_week)
elif feature == "day_of_month":
ds[feature] = ts.apply(lambda X: X.day)
elif feature == "month":
ds[feature] = ts.apply(lambda X: X.month)
elif feature == "hour":
ds[feature] = ts.apply(lambda X: X.hour)
elif feature == "minute":
ds[feature] = ts.apply(lambda X: X.minute)
elif feature == "year":
ds[feature] = ts.apply(lambda X: X.year)
return ds
def label_encoder(ds, column, name):
"""This function transform labels in the column into numbers (label encoder)
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
alowed_labels = ["android", "iphone"]
ds = ds[ds[name].isin(alowed_labels)]
le = preprocessing.LabelEncoder()
le.fit(ds[name])
ds[name] = le.transform(ds[name])
## iphone 0 , android 1
return ds
| 28.868263 | 115 | 0.602365 | import pandas as pd
from sklearn import preprocessing
import nltk
nltk.download('punkt')
dataset_structure = None
TIMESTAMP_FEATURES = {
"timestamp": True,
"day_of_week": True,
"day_of_month": True,
"month": True,
"hour": True,
"minute": True,
"year": True
}
def preprocess(filename, train=True):
""" This function do all the preprocess according to the structure
Args:
filename ([string]): [filename with dataset as tsv]
Returns:
[dataframe]: [dataset after preprocess]
"""
dataset_train_structure = [{"name": "tweet_id", "func": empty_func},
{"name": "user_handle", "func": dummy_encoder},
{"name": "text", "func": text_preprocess},
{"name": "timestamp", "func": timestamp_preprocess},
{"name": "device", "func": label_encoder}]
dataset_test_structure = [{"name": "user_handle", "func": dummy_encoder},
{"name": "text", "func": text_preprocess},
{"name": "timestamp", "func": timestamp_preprocess}]
dataset_structure = dataset_train_structure if train else dataset_test_structure
column_names = list(map(lambda col_s: col_s["name"], dataset_structure))
ds = load_data(filename, column_names)
ds.dropna(thresh=0, inplace=True)
for i in range(len(dataset_structure)):
column_structure = dataset_structure[i]
ds = column_structure["func"](ds, i, column_structure["name"])
ds.reset_index(drop=True, inplace=True)
return ds
def load_data(filename, column_names):
"""This function loads the dataset into dataframe
Args:
filename ([string]): [filename]
Returns:
[dataframe]: [raw dataset]
"""
ds = pd.read_table(filename, names=column_names)
return ds
def empty_func(ds, column, name):
return ds
def dummy_encoder(ds, column, name):
"""this function transform a column in the dataframe into dummy code
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
dummies = pd.get_dummies(ds[name], prefix=name)
ds = ds.drop(columns=[name])
ds = pd.concat([ds, dummies], axis=1)
return ds
def remove_whitespace(text):
return " ".join(text.split())
def remove_punct(text):
tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
lst = tokenizer.tokenize(' '.join(text))
return lst
def text_preprocess(ds, column, name):
"""This function preprocess the text in the dataset
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
text = ds[name]
text = text.str.lower()
text = text.apply(remove_whitespace)
text = text.apply(lambda X: nltk.word_tokenize(X))
text = text.apply(lambda X: remove_punct(X))
ds[name] = text
return ds
def timestamp_preprocess(ds, column, name):
"""This function takes the timestamp in the dataset and create from it features according to the settings above
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
ts = pd.to_datetime(ds[name])
for feature in TIMESTAMP_FEATURES.keys():
if TIMESTAMP_FEATURES[feature] is not None:
if feature == "timestamp":
ds[feature] = ts
elif feature == "day_of_week":
ds[feature] = ts.apply(lambda X: X.day_of_week)
elif feature == "day_of_month":
ds[feature] = ts.apply(lambda X: X.day)
elif feature == "month":
ds[feature] = ts.apply(lambda X: X.month)
elif feature == "hour":
ds[feature] = ts.apply(lambda X: X.hour)
elif feature == "minute":
ds[feature] = ts.apply(lambda X: X.minute)
elif feature == "year":
ds[feature] = ts.apply(lambda X: X.year)
return ds
def label_encoder(ds, column, name):
"""This function transform labels in the column into numbers (label encoder)
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
alowed_labels = ["android", "iphone"]
ds = ds[ds[name].isin(alowed_labels)]
le = preprocessing.LabelEncoder()
le.fit(ds[name])
ds[name] = le.transform(ds[name])
## iphone 0 , android 1
return ds
| 183 | 0 | 69 |
af45aaf0a681c533ca3f1003ffbce78d13e4a35a | 440 | py | Python | course/migrations/0002_auto_20200813_1721.py | Seals6/stucoursetest | 7b8f63ac7bf2b4066a9b7af9672838d03ab859ad | [
"MIT"
] | 11 | 2021-01-13T05:12:24.000Z | 2022-03-17T16:29:30.000Z | course/migrations/0002_auto_20200813_1721.py | Seals6/stucoursetest | 7b8f63ac7bf2b4066a9b7af9672838d03ab859ad | [
"MIT"
] | 1 | 2021-04-21T04:16:11.000Z | 2021-04-21T04:17:14.000Z | course/migrations/0002_auto_20200813_1721.py | Seals6/stucoursetest | 7b8f63ac7bf2b4066a9b7af9672838d03ab859ad | [
"MIT"
] | 4 | 2021-04-26T02:35:49.000Z | 2021-12-12T09:28:23.000Z | # Generated by Django 2.2.11 on 2020-08-13 09:21
from django.db import migrations, models
| 23.157895 | 113 | 0.584091 | # Generated by Django 2.2.11 on 2020-08-13 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='semester',
field=models.CharField(choices=[('Autumn', '上'), ('Spring', '下')], max_length=20, verbose_name='学期'),
),
]
| 0 | 333 | 23 |
8a5fd65aa659ee11c18cb48fa2c1913634d25078 | 5,647 | py | Python | src/reppy/__init__.py | pombredanne/reppy2 | 757dc5e86ceb647b5bd27a2467e38dd860f5bf0e | [
"MIT"
] | null | null | null | src/reppy/__init__.py | pombredanne/reppy2 | 757dc5e86ceb647b5bd27a2467e38dd860f5bf0e | [
"MIT"
] | 1 | 2015-10-13T12:48:23.000Z | 2015-10-13T12:48:23.000Z | src/reppy/__init__.py | pombredanne/reppy2 | 757dc5e86ceb647b5bd27a2467e38dd860f5bf0e | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
# Copyright (c) 2011 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''A robot exclusion protocol parser. Because I could not find a good one.'''
__maintainer__ = 'Dan Lecocq'
__copyright__ = '2011-2014 SEOmoz'
__license__ = 'SEOmoz'
__version__ = '0.3.0'
__author__ = 'Dan Lecocq'
__status__ = 'Development'
__email__ = 'dan@moz.com'
#####################################################
# All things logging
#####################################################
import logging
logger = logging.getLogger('reppy')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
#####################################################
# A couple utilities
#####################################################
import sys
import re
import time
import email.utils
try:
from urllib import parse as urlparse
except ImportError:
# Python 2
import urlparse
if sys.version_info[0] == 3:
long = int
#####################################################
# Import our exceptions at the global level
#####################################################
from .exceptions import ServerError, ReppyException
class Utility(object):
'''Utility methods'''
@staticmethod
def hostname(url):
'''Return a normalized, canonicalized version of the url's hostname'''
return urlparse.urlparse(url).netloc
@staticmethod
def roboturl(url):
'''Return a normalized uri to the robots.txt'''
parsed = urlparse.urlparse(url)
return '%s://%s/robots.txt' % (parsed.scheme, parsed.netloc)
@staticmethod
def short_user_agent(strng):
'''Return a default user agent string to match, based on strng. For
example, for 'MyUserAgent/1.0', it will generate 'MyUserAgent' '''
index = strng.find('/')
if index == -1:
return strng
return strng[0:index]
@staticmethod
def parse_time(strng):
'''Parse an HTTP-style (i.e. email-style) time into a timestamp'''
v = email.utils.parsedate_tz(strng)
if v is None:
# Reject bad data
raise ValueError("Invalid time.")
if v[9] is None:
# Default time zone is GMT/UTC
v = list(v) # @$%?? Dutch
v[9] = 0
v = tuple(v)
return email.utils.mktime_tz(v)
@staticmethod
def get_ttl(headers, default):
'''Extract the correct ttl from the provided headers, or default'''
# Now, we'll determine the expiration
ttl = None
# If max-age is specified in Cache-Control, use it and ignore any
# Expires header, as per RFC2616 Sec. 13.2.4.
if headers.get('cache-control') is not None:
for directive in headers['cache-control'].split(','):
tokens = directive.lower().partition('=')
t_name = tokens[0].strip()
t_value = tokens[2].strip()
# If we're not allowed to cache, then expires is now
if t_name in ('no-store', 'must-revalidate'):
return 0
elif t_name == 'no-cache' and t_value == '':
# Only honor no-cache if there is no =value after it
return 0
elif t_name == 's-maxage':
try:
# Since s-maxage should override max-age, return
return long(t_value)
except ValueError:
# Couldn't parse s-maxage as an integer
continue
elif t_name == 'max-age':
try:
ttl = long(t_value)
except ValueError:
# Couldn't parse max-age as an integer
continue
# We should honor cache-control first, so if we found anything at
# all, we should return that
if ttl is not None:
return ttl
# Otherwise, we should use the expires tag
expires = headers.get('expires')
date = headers.get('date')
if expires:
if date is None:
base = time.time()
else:
try:
base = Utility.parse_time(date)
except ValueError:
base = time.time()
try:
return Utility.parse_time(expires) - base
except ValueError:
pass
return ttl or default
| 35.968153 | 78 | 0.571454 | #! /usr/bin/env python
#
# Copyright (c) 2011 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''A robot exclusion protocol parser. Because I could not find a good one.'''
__maintainer__ = 'Dan Lecocq'
__copyright__ = '2011-2014 SEOmoz'
__license__ = 'SEOmoz'
__version__ = '0.3.0'
__author__ = 'Dan Lecocq'
__status__ = 'Development'
__email__ = 'dan@moz.com'
#####################################################
# All things logging
#####################################################
import logging
logger = logging.getLogger('reppy')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
#####################################################
# A couple utilities
#####################################################
import sys
import re
import time
import email.utils
try:
from urllib import parse as urlparse
except ImportError:
# Python 2
import urlparse
if sys.version_info[0] == 3:
long = int
#####################################################
# Import our exceptions at the global level
#####################################################
from .exceptions import ServerError, ReppyException
class Utility(object):
'''Utility methods'''
@staticmethod
def hostname(url):
'''Return a normalized, canonicalized version of the url's hostname'''
return urlparse.urlparse(url).netloc
@staticmethod
def roboturl(url):
'''Return a normalized uri to the robots.txt'''
parsed = urlparse.urlparse(url)
return '%s://%s/robots.txt' % (parsed.scheme, parsed.netloc)
@staticmethod
def short_user_agent(strng):
'''Return a default user agent string to match, based on strng. For
example, for 'MyUserAgent/1.0', it will generate 'MyUserAgent' '''
index = strng.find('/')
if index == -1:
return strng
return strng[0:index]
@staticmethod
def parse_time(strng):
'''Parse an HTTP-style (i.e. email-style) time into a timestamp'''
v = email.utils.parsedate_tz(strng)
if v is None:
# Reject bad data
raise ValueError("Invalid time.")
if v[9] is None:
# Default time zone is GMT/UTC
v = list(v) # @$%?? Dutch
v[9] = 0
v = tuple(v)
return email.utils.mktime_tz(v)
@staticmethod
def get_ttl(headers, default):
'''Extract the correct ttl from the provided headers, or default'''
# Now, we'll determine the expiration
ttl = None
# If max-age is specified in Cache-Control, use it and ignore any
# Expires header, as per RFC2616 Sec. 13.2.4.
if headers.get('cache-control') is not None:
for directive in headers['cache-control'].split(','):
tokens = directive.lower().partition('=')
t_name = tokens[0].strip()
t_value = tokens[2].strip()
# If we're not allowed to cache, then expires is now
if t_name in ('no-store', 'must-revalidate'):
return 0
elif t_name == 'no-cache' and t_value == '':
# Only honor no-cache if there is no =value after it
return 0
elif t_name == 's-maxage':
try:
# Since s-maxage should override max-age, return
return long(t_value)
except ValueError:
# Couldn't parse s-maxage as an integer
continue
elif t_name == 'max-age':
try:
ttl = long(t_value)
except ValueError:
# Couldn't parse max-age as an integer
continue
# We should honor cache-control first, so if we found anything at
# all, we should return that
if ttl is not None:
return ttl
# Otherwise, we should use the expires tag
expires = headers.get('expires')
date = headers.get('date')
if expires:
if date is None:
base = time.time()
else:
try:
base = Utility.parse_time(date)
except ValueError:
base = time.time()
try:
return Utility.parse_time(expires) - base
except ValueError:
pass
return ttl or default
| 0 | 0 | 0 |
7ed29c6bfb8123ac9d4b0d843be8bdc86441d835 | 801 | py | Python | flask/config.py | Index01/GSL-vts | 4d683a3118d21204dd0feef3239ccad9a7a09031 | [
"MIT"
] | null | null | null | flask/config.py | Index01/GSL-vts | 4d683a3118d21204dd0feef3239ccad9a7a09031 | [
"MIT"
] | null | null | null | flask/config.py | Index01/GSL-vts | 4d683a3118d21204dd0feef3239ccad9a7a09031 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_iniconfig import INIConfig
from flask_sqlalchemy import SQLAlchemy
from ConfigParser import SafeConfigParser, NoSectionError
app = Flask(__name__)
parser = SafeConfigParser()
parser.read('../gateConfigs.ini')
app.config['Testing'] = True
app.config['DEBUG'] = True
app.config['WTF_CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = "super-generic-string"
#app.config['SERVER_NAME'] = parser.get('Flask', 'SERVER_NAME')
#print parser.get('Flask', 'SERVER_NAME')
app.config['SQLALCHEMY_DATABASE_URI'] = parser.get('PostgresConfigs', 'URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.config['SQLALCHEMY_DATABASE_URI']=parser.get('PostgresConfigs', 'URL')
#SERVER_NAME = "127.0.0.1:3000"
print parser.get('PostgresConfigs', 'URL')
db = SQLAlchemy(app)
| 28.607143 | 76 | 0.765293 | from flask import Flask
from flask_iniconfig import INIConfig
from flask_sqlalchemy import SQLAlchemy
from ConfigParser import SafeConfigParser, NoSectionError
app = Flask(__name__)
parser = SafeConfigParser()
parser.read('../gateConfigs.ini')
app.config['Testing'] = True
app.config['DEBUG'] = True
app.config['WTF_CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = "super-generic-string"
#app.config['SERVER_NAME'] = parser.get('Flask', 'SERVER_NAME')
#print parser.get('Flask', 'SERVER_NAME')
app.config['SQLALCHEMY_DATABASE_URI'] = parser.get('PostgresConfigs', 'URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.config['SQLALCHEMY_DATABASE_URI']=parser.get('PostgresConfigs', 'URL')
#SERVER_NAME = "127.0.0.1:3000"
print parser.get('PostgresConfigs', 'URL')
db = SQLAlchemy(app)
| 0 | 0 | 0 |
984949e87794d2666a101048f14f3ca5aa598fb9 | 868 | py | Python | talks/users/urls.py | davan690/talks.ox | a90b034b34600c06bc68cda0e48dd3c0663f4538 | [
"Apache-2.0"
] | null | null | null | talks/users/urls.py | davan690/talks.ox | a90b034b34600c06bc68cda0e48dd3c0663f4538 | [
"Apache-2.0"
] | null | null | null | talks/users/urls.py | davan690/talks.ox | a90b034b34600c06bc68cda0e48dd3c0663f4538 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import patterns, url
from talks.users.views import (manage_collections, list_public_collections, browse_public_collections, view_collection, add_collection, edit_collection, delete_collection, my_talks)
urlpatterns = patterns('',
url(r'^lists$', manage_collections, name='manage-lists'),
url(r'^mytalks$', my_talks, name='my-talks'),
url(r'^lists/public$', list_public_collections, name='view-public-lists'),
url(r'^lists/browse-public$', browse_public_collections, name='list-public-lists'),
url(r'^lists/new$', add_collection, name='add-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/$', view_collection, name='view-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/edit$', edit_collection, name='edit-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/delete', delete_collection, name='delete-list'),
)
| 54.25 | 181 | 0.715438 | from django.conf.urls import patterns, url
from talks.users.views import (manage_collections, list_public_collections, browse_public_collections, view_collection, add_collection, edit_collection, delete_collection, my_talks)
urlpatterns = patterns('',
url(r'^lists$', manage_collections, name='manage-lists'),
url(r'^mytalks$', my_talks, name='my-talks'),
url(r'^lists/public$', list_public_collections, name='view-public-lists'),
url(r'^lists/browse-public$', browse_public_collections, name='list-public-lists'),
url(r'^lists/new$', add_collection, name='add-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/$', view_collection, name='view-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/edit$', edit_collection, name='edit-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/delete', delete_collection, name='delete-list'),
)
| 0 | 0 | 0 |
97e5bc9b26a1b779dbc3cc482a168e3ced6f60f1 | 2,095 | py | Python | django/core/serializers/json.py | huicheese/Django-test3 | ac11d2dce245b48392e52d1f4acfd5e7433b243e | [
"BSD-3-Clause"
] | 91 | 2015-01-05T01:10:51.000Z | 2021-09-26T18:01:53.000Z | django/core/serializers/json.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 4 | 2015-07-05T21:09:37.000Z | 2019-09-06T14:34:59.000Z | django/core/serializers/json.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 32 | 2015-04-03T04:29:45.000Z | 2021-09-14T21:36:02.000Z | """
Serialize data to/from JSON
"""
import datetime
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # Python 2.3 fallback
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream)):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| 30.362319 | 89 | 0.683055 | """
Serialize data to/from JSON
"""
import datetime
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # Python 2.3 fallback
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream)):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| 819 | 0 | 81 |
ae6737103b4bbb20784262cd7a4c6f9a4bcbea1e | 14,531 | py | Python | auv_mission_planner/scripts/mission_planner.py | svbhat/smarc_planning | f2a69129f525aefc56ce29e5deb87a1f087c3c06 | [
"BSD-3-Clause"
] | 1 | 2021-12-13T03:06:52.000Z | 2021-12-13T03:06:52.000Z | auv_mission_planner/scripts/mission_planner.py | svbhat/smarc_planning | f2a69129f525aefc56ce29e5deb87a1f087c3c06 | [
"BSD-3-Clause"
] | null | null | null | auv_mission_planner/scripts/mission_planner.py | svbhat/smarc_planning | f2a69129f525aefc56ce29e5deb87a1f087c3c06 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 Nils Bore (nbore@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import random
import copy
import math
import os
import csv
import tf
from visualization_msgs.msg import Marker, InteractiveMarkerControl
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from sensor_msgs.msg import NavSatFix
from geodesy import utm
## Initialize the right-click menu
# Add Vertex callback
# Add Vertex callback
# Add Vertex callback
# Delete Vertex callback
# This part draws the line strips between the points
if __name__ == "__main__":
rospy.init_node('mission_planner', anonymous=True)
mission_planner = MissionPlanner()
rospy.spin()
| 38.441799 | 757 | 0.638291 | #!/usr/bin/env python
# Copyright 2018 Nils Bore (nbore@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import random
import copy
import math
import os
import csv
import tf
from visualization_msgs.msg import Marker, InteractiveMarkerControl
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from sensor_msgs.msg import NavSatFix
from geodesy import utm
def trapezoidal_shaped_func(a, b, c, d, x):
min_val = min(min((x - a)/(b - a), float(1.0)), (d - x)/(d - c))
return max(min_val, float(0.0))
def r_func(x):
a = -0.125
b = 0.125
c = 0.375
d = 0.625
x = 1.0 - x
value = trapezoidal_shaped_func(a,b,c,d,x)
return value
def g_func(x):
a = 0.125
b = 0.375
c = 0.625
d = 0.875
x = 1.0 - x
value = trapezoidal_shaped_func(a,b,c,d,x)
return value
def b_func(x):
a = 0.375
b = 0.625
c = 0.875
d = 1.125
x = 1.0 - x
value = trapezoidal_shaped_func(a,b,c,d,x)
return value
class MissionPlanner(object):
def __init__(self, config_file=None):
self._interactive = True
self.mission_file = rospy.get_param('~mission_file', "mission.csv")
self.starting_depth = rospy.get_param('~starting_depth', 0.)
self.default_rpm = rospy.get_param('~default_rpm', 300)
self.goal_tolerance = rospy.get_param('~goal_tolerance', 50)
self.marker_scale = rospy.get_param('~marker_scale', 20.)
self._server = InteractiveMarkerServer("mission_planner")
self.waypoints = []
self.edges = []
self._init_menu()
self.load_objects()
self._server.applyChanges()
def load_objects(self):
if os.path.isfile(self.mission_file):
with open(self.mission_file) as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
rospy.loginfo("Got entry: %s", " ".join(row))
pose = Pose()
pose.position.x = float(row[1])
pose.position.y = float(row[2])
pose.position.z = -float(row[3])
self.waypoints.append(pose)
if len(self.waypoints) == 0:
pose = Pose()
pose.position.x = 0
pose.position.y = 0
pose.position.z = -self.starting_depth
self.waypoints.append(pose)
# Draw the ROI
self.draw_waypoints()
## Initialize the right-click menu
def _init_menu(self):
self.menu_handler = MenuHandler()
add_point_entry = self.menu_handler.insert( "Add Waypoint", callback=self._add_point_cb)
del_point_entry = self.menu_handler.insert( "Delete Waypoint", callback=self._del_point_cb)
save_plan_entry = self.menu_handler.insert( "Save mission plan", callback=self._save_plan_cb)
save_plan_entry = self.menu_handler.insert( "Export LoLo mission plan", callback=self._save_plan_lat_long_cb)
enable_entry = self.menu_handler.insert( "Movement control", callback=self._enable_cb )
self.menu_handler.setCheckState( enable_entry, MenuHandler.CHECKED )
# Add Vertex callback
def _save_plan_cb(self, feedback):
#This is the object that we are pressing (feedback) so
#that we can get the marker name etc..
rospy.loginfo("Saving the plan to file: %s", self.mission_file)
with open(self.mission_file, 'w') as csvfile:
#uint64 task_id, float64 altitude, float64 depth, float64 x, float64 y, float64 theta, string action_topic, duration max_duration, smarc_msgs/StringArray[] action_arguments
thetas = []
for i in range(0, len(self.waypoints)-1):
xdiff = (self.waypoints[i+1].position.x - self.waypoints[i].position.x)
ydiff = (self.waypoints[i+1].position.y - self.waypoints[i].position.y)
thetas.append(180./math.pi*math.atan2(ydiff, xdiff))
if len(thetas) == 1:
thetas.append(0.)
else:
thetas.append(thetas[-1])
spamwriter = csv.writer(csvfile, delimiter=' ', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for waypoint_index, pose in enumerate(self.waypoints):
theta = thetas[waypoint_index]
quaternion = tf.transformations.quaternion_from_euler(0., 0., math.pi/180.*theta)
depth = -pose.position.z
duration = 100.
arguments = "{'target_pose': { 'header': {'frame_id': '%s'}, 'pose': {'position': {'x':%f, 'y':%f, 'z':%f}, 'orientation': {'x': %f, 'y':%f, 'z':%f, 'w':%f }}}}" % ("world", pose.position.x, pose.position.y, -depth, quaternion[0], quaternion[1], quaternion[2], quaternion[3])
print arguments
spamwriter.writerow([waypoint_index, pose.position.x, pose.position.y, depth, 0.0, theta, duration, "/bezier_planner", arguments])
# Add Vertex callback
def _save_plan_lat_long_cb(self, feedback):
#This is the object that we are pressing (feedback) so
#that we can get the marker name etc..
pre, ext = os.path.splitext(self.mission_file)
lat_lon_file = pre + ".lolo"
rospy.loginfo("Saving the plan to file: %s", lat_lon_file)
gps_msg = rospy.wait_for_message('/gps/fix', NavSatFix)
lon = gps_msg.longitude
lat = gps_msg.latitude
utm_obj = utm.fromLatLong(lat, lon)
with open(lat_lon_file, 'w') as csvfile:
csvfile.write("ts\n")
spamwriter = csv.writer(csvfile, delimiter=' ', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for waypoint_index, pose in enumerate(self.waypoints):
new_obj = copy.deepcopy(utm_obj)
new_obj.northing += pose.position.y
new_obj.easting += pose.position.x
geo_obj = new_obj.toMsg()
lat_rounded = round(geo_obj.latitude, 5)
lon_rounded = round(geo_obj.longitude, 5)
spamwriter.writerow(["ADD", "GOTOWP", lat_rounded, lon_rounded, self.goal_tolerance, self.default_rpm])
csvfile.write("start\n..\n")
# Add Vertex callback
def _add_point_cb(self, feedback):
#This is the object that we are pressing (feedback) so
#that we can get the marker name etc..
rospy.loginfo("Add point from marker: %s", feedback.marker_name)
scale = self.marker_scale
# Get the pose and create the new object a little away
pose = feedback.pose
pose.position.x = pose.position.x+scale*1.0*math.cos(math.radians(90))
pose.position.y = pose.position.y+scale*1.0*math.cos(math.radians(45))
######################################################
# Add object
waypoint_index = int(feedback.marker_name.split('_')[1]) + 1
self.waypoints[waypoint_index:waypoint_index] = [pose]
# Draw the ROI
self.draw_waypoints()
# Delete Vertex callback
def _del_point_cb(self, feedback):
rospy.loginfo("Delete point: %s", feedback.marker_name)
waypoint_index = int(feedback.marker_name.split('_')[1])
if len(self.waypoints) <= 1:
rospy.logerr("The minimum number of waypoints is 1!")
return
rospy.loginfo("Deleting waypoint %d: out of: %d", waypoint_index, len(self.waypoints))
# We only want to delete particular marker
del self.waypoints[waypoint_index]
self._server.erase("Waypoint_" + str(len(self.waypoints)))
self.draw_waypoints()
def _update_poly(self, feedback):
if feedback.control_name.startswith("move_plane") or \
feedback.control_name.startswith("move_axis"):
waypoint_index = int(feedback.marker_name.split('_')[1])
print "Setting new pose for waypoint: ", waypoint_index
self.waypoints[waypoint_index] = feedback.pose
int_marker = self.create_line_marker()
self._server.erase("Line")
self._server.insert(int_marker)
self._server.applyChanges()
def _enable_cb(self, feedback):
handle = feedback.menu_entry_id
state = self.menu_handler.getCheckState( handle )
if state == MenuHandler.CHECKED:
self.menu_handler.setCheckState( handle, MenuHandler.UNCHECKED )
self._interactive = False
else:
self.menu_handler.setCheckState( handle, MenuHandler.CHECKED )
self._interactive = True
self.menu_handler.reApply( self._server )
self.draw_waypoints()
def draw_waypoints(self):
for current_index, pose in enumerate(self.waypoints):
rospy.loginfo("Inserting waypoint: %s", "Waypoint_" + str(current_index))
int_marker = self.create_waypoint_marker(pose, current_index)
self._server.erase("Waypoint_" + str(current_index))
self._server.applyChanges()
self._server.insert(int_marker, self._update_poly)
self.menu_handler.apply(self._server, "Waypoint_" + str(current_index))
self._server.applyChanges()
int_marker = self.create_line_marker()
self._server.erase("Line")
self._server.insert(int_marker, self._update_poly)
self._server.applyChanges()
# This part draws the line strips between the points
def create_line_marker(self):
scale = self.marker_scale
int_marker = InteractiveMarker()
int_marker.header.frame_id = "world"
int_marker.name = "Line"
int_marker.description = ""
int_marker.pose = self.waypoints[0]
marker = Marker()
marker.type = Marker.LINE_STRIP
marker.scale.x = 0.1*scale
#random.seed()
val = random.random()
marker.color.r = 1.0 #r_func(val)
marker.color.g = 1.0 #g_func(val)
marker.color.b = 0.0 #b_func(val)
marker.color.a = 1.0
control = InteractiveMarkerControl()
control.always_visible = True
control.markers.append( marker )
int_marker.controls.append(control)
marker.points = []
for wp_pose in self.waypoints:
p = Point()
p.x = wp_pose.position.x - int_marker.pose.position.x
p.y = wp_pose.position.y - int_marker.pose.position.y
p.z = wp_pose.position.z - int_marker.pose.position.z
marker.points.append(p)
return int_marker
def create_waypoint_marker(self, pose, current_index):
# create an interactive marker for our server
int_marker = InteractiveMarker()
int_marker.header.frame_id = "world"
int_marker.name = "Waypoint_" + str(current_index)
int_marker.description = "Waypoint " + str(current_index)
scale = self.marker_scale
int_marker.pose = pose
int_marker.scale = scale
#int_marker.pose.position.z = 0.01
marker = Marker()
marker.type = Marker.SPHERE
marker.scale.x = 0.25*scale
marker.scale.y = 0.25*scale
marker.scale.z = 0.25*scale
#int_marker.pose.position.z = (marker.scale.z / 2)
#random.seed(soma_type)
val = random.random()
marker.color.r = 0.0 #r_func(val)
marker.color.g = 1.0 #g_func(val)
marker.color.b = 0.0 #b_func(val)
marker.color.a = 1.0
#marker.pose = pose
# create a control which will move the box
# this control does not contain any markers,
# which will cause RViz to insert two arrows
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 1
control.orientation.z = 0
#control.scale.x = 4.
#control.scale.y = 4.
#control.scale.z = 4.
control.interaction_mode = InteractiveMarkerControl.MOVE_PLANE
control.name = "move_plane"
if self._interactive:
int_marker.controls.append(copy.deepcopy(control))
control.name = "move_axis"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
#int_marker.color.r = 0 #r_func(val)
#int_marker.color.g = 255 #g_func(val)
#int_marker.color.b = 0 #b_func(val)
#int_marker.color.a = 0.5
if self._interactive:
int_marker.controls.append(copy.deepcopy(control))
# add menu control
menu_control = InteractiveMarkerControl()
menu_control.interaction_mode = InteractiveMarkerControl.BUTTON
menu_control.always_visible = True
menu_control.markers.append( marker) #makeBox(int_marker) )
int_marker.controls.append(menu_control)
return int_marker
if __name__ == "__main__":
rospy.init_node('mission_planner', anonymous=True)
mission_planner = MissionPlanner()
rospy.spin()
| 11,839 | 8 | 433 |
f5db14ce641a424ce8380470a2b3bcfcc423464e | 1,065 | py | Python | packs/device42/actions/lib/base_action.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/device42/actions/lib/base_action.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/device42/actions/lib/base_action.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | import requests
from st2actions.runners.pythonrunner import Action
| 34.354839 | 71 | 0.612207 | import requests
from st2actions.runners.pythonrunner import Action
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
self.d42_server = self.config.get('d42_server', None)
if not self.d42_server:
raise ValueError('"d42_server" config value is required')
self.d42_username = self.config.get('d42_username', None)
if not self.d42_username:
raise ValueError('"d42_username" config value is required')
self.d42_password = self.config.get('d42_password', None)
if not self.d42_password:
raise ValueError('"d42_password" config value is required')
self.verify = self.config.get('verify_certificate', False)
def getAPI(self, endpoint, params):
r = requests.get("%s%s" % (self.d42_server, endpoint),
params=params,
auth=(self.d42_username, self.d42_password),
verify=self.verify
)
return r.json()
| 917 | 4 | 76 |
16b13d095afe51d2bda71f8464d9899cbaaa5511 | 4,570 | py | Python | vyper/parser/constants.py | ryan-rozario/vyper | 9d235e6e7e85ee0dbfaf54a6efd5fb6334c2d00f | [
"Apache-2.0"
] | null | null | null | vyper/parser/constants.py | ryan-rozario/vyper | 9d235e6e7e85ee0dbfaf54a6efd5fb6334c2d00f | [
"Apache-2.0"
] | null | null | null | vyper/parser/constants.py | ryan-rozario/vyper | 9d235e6e7e85ee0dbfaf54a6efd5fb6334c2d00f | [
"Apache-2.0"
] | null | null | null | import copy
from vyper import (
ast as vy_ast,
)
from vyper.exceptions import (
StructureException,
TypeMismatch,
VariableDeclarationException,
)
from vyper.parser.context import (
Context,
)
from vyper.parser.expr import (
Expr,
)
from vyper.parser.memory_allocator import (
MemoryAllocator,
)
from vyper.types.types import (
BaseType,
ByteArrayType,
)
from vyper.utils import (
SizeLimits,
is_instances,
)
| 32.411348 | 98 | 0.610941 | import copy
from vyper import (
ast as vy_ast,
)
from vyper.exceptions import (
StructureException,
TypeMismatch,
VariableDeclarationException,
)
from vyper.parser.context import (
Context,
)
from vyper.parser.expr import (
Expr,
)
from vyper.parser.memory_allocator import (
MemoryAllocator,
)
from vyper.types.types import (
BaseType,
ByteArrayType,
)
from vyper.utils import (
SizeLimits,
is_instances,
)
class Constants(object):
def __init__(self):
self._constants = dict()
self._constants_ast = dict()
def __contains__(self, key):
return key in self._constants
def unroll_constant(self, const, global_ctx):
ann_expr = None
expr = Expr.parse_value_expr(
const.value,
Context(
vars=None,
global_ctx=global_ctx,
origcode=const.full_source_code,
memory_allocator=MemoryAllocator()
),
)
annotation_type = global_ctx.parse_type(const.annotation.args[0], None)
fail = False
if is_instances([expr.typ, annotation_type], ByteArrayType):
if expr.typ.maxlen < annotation_type.maxlen:
return const
fail = True
elif expr.typ != annotation_type:
fail = True
# special case for literals, which can be uint256 types as well.
is_special_case_uint256_literal = (
is_instances([expr.typ, annotation_type], BaseType)
) and (
[annotation_type.typ, expr.typ.typ] == ['uint256', 'int128']
) and SizeLimits.in_bounds('uint256', expr.value)
is_special_case_int256_literal = (
is_instances([expr.typ, annotation_type], BaseType)
) and (
[annotation_type.typ, expr.typ.typ] == ['int128', 'int128']
) and SizeLimits.in_bounds('int128', expr.value)
if is_special_case_uint256_literal or is_special_case_int256_literal:
fail = False
if fail:
raise TypeMismatch(
f"Invalid value for constant type, expected {annotation_type} got "
f"{expr.typ} instead",
const.value,
)
ann_expr = copy.deepcopy(expr)
ann_expr.typ = annotation_type
ann_expr.typ.is_literal = expr.typ.is_literal # Annotation type doesn't have literal set.
return ann_expr
def add_constant(self, item, global_ctx):
args = item.annotation.args
if not item.value:
raise StructureException('Constants must express a value!', item)
is_correctly_formatted_struct = (
len(args) == 1 and isinstance(args[0], (vy_ast.Subscript, vy_ast.Name, vy_ast.Call))
) and item.target
if is_correctly_formatted_struct:
c_name = item.target.id
if global_ctx.is_valid_varname(c_name, item):
self._constants[c_name] = self.unroll_constant(item, global_ctx)
self._constants_ast[c_name] = item.value
# TODO: the previous `if` has no else which will result in this
# *silently* existing without doing anything. is this intended
# behavior.
else:
raise StructureException('Incorrectly formatted struct', item)
def ast_is_constant(self, ast_node):
return isinstance(ast_node, vy_ast.Name) and ast_node.id in self._constants
def is_constant_of_base_type(self, ast_node, base_types):
base_types = (base_types) if not isinstance(base_types, tuple) else base_types
valid = self.ast_is_constant(ast_node)
if not valid:
return False
const = self._constants[ast_node.id]
if isinstance(const.typ, BaseType) and const.typ.typ in base_types:
return True
return False
def get_constant(self, const_name, context):
""" Return unrolled const """
# check if value is compatible with
const = self._constants[const_name]
if isinstance(const, vy_ast.AnnAssign): # Handle ByteArrays.
if context:
expr = Expr(const.value, context).lll_node
return expr
else:
raise VariableDeclarationException(
f"ByteArray: Can not be used outside of a function context: {const_name}"
)
# Other types are already unwrapped, no need
return self._constants[const_name]
| 3,290 | 803 | 23 |
75a7ea22e96f0fd9b60feeabde377cb245cf7c46 | 1,552 | py | Python | copy_rootless.py | clburlison/rootless_diff | daa8b547138e36b1d6ce887f9faa938de873c2d4 | [
"MIT"
] | 2 | 2017-04-06T18:35:40.000Z | 2017-05-20T20:48:29.000Z | copy_rootless.py | clburlison/rootless_diff | daa8b547138e36b1d6ce887f9faa938de873c2d4 | [
"MIT"
] | null | null | null | copy_rootless.py | clburlison/rootless_diff | daa8b547138e36b1d6ce887f9faa938de873c2d4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
A helper script to copy SIP related files.
"""
from __future__ import print_function
import os
import shutil
import sys
import plistlib
def get_version():
'''Obtain system version info from the disk version plist'''
SYSTEM_VERSION = ('/System/Library/CoreServices/SystemVersion.plist')
try:
sys_ver = plistlib.readPlist(SYSTEM_VERSION)
except:
sys.stderr.write("ERROR: Unable to read SystemVersion.plist")
sys.exit(1)
return sys_ver
def main():
'''Main method for copying files for git references'''
ver = get_version()
directory = '{}_{}'.format(ver.get('ProductUserVisibleVersion'),
ver.get('ProductBuildVersion'))
if os.path.exists(directory):
sys.stderr.write("ERROR: Directory '{}' exists. "
"Exiting...".format(directory))
sys.exit(1)
else:
os.makedirs(directory)
# Copy the launchd rootless file
LAUNCHD_FILE_NAME = 'com.apple.xpc.launchd.rootless.plist'
LAUNCHD_FILE = os.path.join('/System/Library/Sandbox/', LAUNCHD_FILE_NAME)
shutil.copyfile(LAUNCHD_FILE, os.path.join(directory, LAUNCHD_FILE_NAME))
# Copy the rootless conf file
CONF_FILE_NAME = 'rootless.conf'
CONF_FILE = os.path.join('/System/Library/Sandbox/', CONF_FILE_NAME)
shutil.copyfile(CONF_FILE, os.path.join(directory, CONF_FILE_NAME))
print("SUCESSFUL: Copy complete...")
if __name__ == '__main__':
main()
| 29.846154 | 78 | 0.673325 | #!/usr/bin/python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
A helper script to copy SIP related files.
"""
from __future__ import print_function
import os
import shutil
import sys
import plistlib
def get_version():
'''Obtain system version info from the disk version plist'''
SYSTEM_VERSION = ('/System/Library/CoreServices/SystemVersion.plist')
try:
sys_ver = plistlib.readPlist(SYSTEM_VERSION)
except:
sys.stderr.write("ERROR: Unable to read SystemVersion.plist")
sys.exit(1)
return sys_ver
def main():
'''Main method for copying files for git references'''
ver = get_version()
directory = '{}_{}'.format(ver.get('ProductUserVisibleVersion'),
ver.get('ProductBuildVersion'))
if os.path.exists(directory):
sys.stderr.write("ERROR: Directory '{}' exists. "
"Exiting...".format(directory))
sys.exit(1)
else:
os.makedirs(directory)
# Copy the launchd rootless file
LAUNCHD_FILE_NAME = 'com.apple.xpc.launchd.rootless.plist'
LAUNCHD_FILE = os.path.join('/System/Library/Sandbox/', LAUNCHD_FILE_NAME)
shutil.copyfile(LAUNCHD_FILE, os.path.join(directory, LAUNCHD_FILE_NAME))
# Copy the rootless conf file
CONF_FILE_NAME = 'rootless.conf'
CONF_FILE = os.path.join('/System/Library/Sandbox/', CONF_FILE_NAME)
shutil.copyfile(CONF_FILE, os.path.join(directory, CONF_FILE_NAME))
print("SUCESSFUL: Copy complete...")
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
4e2a80faa7ec26140be32a4d461ad8db48951bc1 | 80 | py | Python | plugin/exportsymbols/__init__.py | BlackVS/IDA-exportsymbols | ecbd5b34a2a87091cd0ddf8d088f53bb700d6d49 | [
"MIT"
] | 2 | 2020-10-31T06:43:37.000Z | 2022-02-12T15:57:55.000Z | plugin/exportsymbols/__init__.py | BlackVS/IDA-exportsymbols | ecbd5b34a2a87091cd0ddf8d088f53bb700d6d49 | [
"MIT"
] | null | null | null | plugin/exportsymbols/__init__.py | BlackVS/IDA-exportsymbols | ecbd5b34a2a87091cd0ddf8d088f53bb700d6d49 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding: utf-8
#
# HeapViewer - by @danigargu
#
import os
| 8 | 28 | 0.6375 | #!/usr/bin/python
# coding: utf-8
#
# HeapViewer - by @danigargu
#
import os
| 0 | 0 | 0 |
bcf90eb98f466a86db67b9814a21ceb8773b5463 | 1,987 | py | Python | autosk_dev_test/sparse_read_test.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | 2 | 2018-01-18T06:25:21.000Z | 2018-12-11T07:43:09.000Z | autosk_dev_test/sparse_read_test.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | 1 | 2016-03-29T07:55:18.000Z | 2016-03-29T07:55:18.000Z | autosk_dev_test/sparse_read_test.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.datasets import load_svmlight_file as lsf
from autosklearn.pipeline.components.classification import add_classifier
from autosklearn.data import competition_data_manager as askdata
import autosklearn.automl as autosk
from component import DeepFeedNet
aad_dataset_dir = '../datasets/dataset_243/'
automl_dataset_dir = '/data/aad/automl_data/openml/293_acc/293_acc_'
libsvm_dataset = '../datasets/covtype.libsvm.binary'
# Also one need to size of features
X_list = askdata.sparse_file_to_sparse_list(automl_dataset_dir + 'train.data')
X_train = askdata.sparse_list_to_csr_sparse(X_list, nbr_features=54)
y_train = np.loadtxt(automl_dataset_dir + 'train.solution')
#X, y = lsf(libsvm_dataset, n_features=54)
#train_size = int(X.shape[0] * 0.9)
#X_train = X[:train_size]
#y_train = y[:train_size] - 1
add_classifier(DeepFeedNet.DeepFeedNet)
# Create model
modl = autosk.AutoML(time_left_for_this_task=1800, seed=20, per_run_time_limit=180,
ensemble_nbest=1, ensemble_size=1,
ml_memory_limit=2048, resampling_strategy='holdout',
tmp_dir='tmp/sparse_tmp', output_dir='tmp/sparse_out',
delete_tmp_folder_after_terminate=False,
initial_configurations_via_metalearning=None,
include_preprocessors=['no_preprocessing'],
include_estimators=['DeepFeedNet'])
modl.fit(X_train, y_train)
# Also one need to size of features
X_test_list = askdata.sparse_file_to_sparse_list(automl_dataset_dir + 'test.data')
X_test = askdata.sparse_list_to_csr_sparse(X_list, nbr_features=54)
y_test = np.loadtxt(automl_dataset_dir + 'test.solution')
#X_test = X[train_size:]
#y_test = y[train_size:] - 1
# Only predict before getting scorin'
y_pred = modl.predict(X_test)
tot_score = modl.score(X_test, y_test)
print(tot_score)
# Comparison
accuracy = np.count_nonzero(y_test == y_pred)
print(float(accuracy) / X_test.shape[0])
| 36.796296 | 83 | 0.744338 | import numpy as np
from sklearn.datasets import load_svmlight_file as lsf
from autosklearn.pipeline.components.classification import add_classifier
from autosklearn.data import competition_data_manager as askdata
import autosklearn.automl as autosk
from component import DeepFeedNet
aad_dataset_dir = '../datasets/dataset_243/'
automl_dataset_dir = '/data/aad/automl_data/openml/293_acc/293_acc_'
libsvm_dataset = '../datasets/covtype.libsvm.binary'
# Also one need to size of features
X_list = askdata.sparse_file_to_sparse_list(automl_dataset_dir + 'train.data')
X_train = askdata.sparse_list_to_csr_sparse(X_list, nbr_features=54)
y_train = np.loadtxt(automl_dataset_dir + 'train.solution')
#X, y = lsf(libsvm_dataset, n_features=54)
#train_size = int(X.shape[0] * 0.9)
#X_train = X[:train_size]
#y_train = y[:train_size] - 1
add_classifier(DeepFeedNet.DeepFeedNet)
# Create model
modl = autosk.AutoML(time_left_for_this_task=1800, seed=20, per_run_time_limit=180,
ensemble_nbest=1, ensemble_size=1,
ml_memory_limit=2048, resampling_strategy='holdout',
tmp_dir='tmp/sparse_tmp', output_dir='tmp/sparse_out',
delete_tmp_folder_after_terminate=False,
initial_configurations_via_metalearning=None,
include_preprocessors=['no_preprocessing'],
include_estimators=['DeepFeedNet'])
modl.fit(X_train, y_train)
# Also one need to size of features
X_test_list = askdata.sparse_file_to_sparse_list(automl_dataset_dir + 'test.data')
X_test = askdata.sparse_list_to_csr_sparse(X_list, nbr_features=54)
y_test = np.loadtxt(automl_dataset_dir + 'test.solution')
#X_test = X[train_size:]
#y_test = y[train_size:] - 1
# Only predict before getting scorin'
y_pred = modl.predict(X_test)
tot_score = modl.score(X_test, y_test)
print(tot_score)
# Comparison
accuracy = np.count_nonzero(y_test == y_pred)
print(float(accuracy) / X_test.shape[0])
| 0 | 0 | 0 |
a758accc63c2338cfb280f4a7bd5ac766b0517a7 | 2,159 | py | Python | hailo_model_zoo/core/datasets/parse_mot.py | nadaved1/hailo_model_zoo | 42b716f337dde4ec602022a34d6a07a1bbd45539 | [
"MIT"
] | 29 | 2021-07-19T13:53:18.000Z | 2022-01-26T11:20:55.000Z | hailo_model_zoo/core/datasets/parse_mot.py | nadaved1/hailo_model_zoo | 42b716f337dde4ec602022a34d6a07a1bbd45539 | [
"MIT"
] | 1 | 2022-03-18T03:27:24.000Z | 2022-03-20T14:58:41.000Z | hailo_model_zoo/core/datasets/parse_mot.py | nadaved1/hailo_model_zoo | 42b716f337dde4ec602022a34d6a07a1bbd45539 | [
"MIT"
] | 10 | 2021-07-20T03:19:55.000Z | 2022-02-25T13:57:30.000Z | import tensorflow as tf
def parse_mot_record(serialized_example):
"""Parse serialized example of TfRecord and extract dictionary of all the information
"""
features = tf.io.parse_single_example(
serialized_example,
features={
'video_name': tf.io.FixedLenFeature([], tf.string),
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'person_id': tf.io.VarLenFeature(tf.int64),
'xmin': tf.io.VarLenFeature(tf.int64),
'xmax': tf.io.VarLenFeature(tf.int64),
'ymin': tf.io.VarLenFeature(tf.int64),
'ymax': tf.io.VarLenFeature(tf.int64),
'mark': tf.io.VarLenFeature(tf.int64),
'label': tf.io.VarLenFeature(tf.int64),
'visibility_ratio': tf.io.VarLenFeature(tf.float32),
'image_name': tf.io.FixedLenFeature([], tf.string),
'image_jpeg': tf.io.FixedLenFeature([], tf.string),
'is_ignore': tf.io.VarLenFeature(tf.int64),
})
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_name = tf.cast(features['image_name'], tf.string)
video_name = tf.cast(features['video_name'], tf.string)
image = tf.image.decode_jpeg(features['image_jpeg'], channels=3, dct_method='INTEGER_ACCURATE')
image_shape = tf.stack([height, width, 3])
image = tf.cast(tf.reshape(image, image_shape), tf.uint8)
image_info = {
'image_name': image_name, 'video_name': video_name, 'height': height, 'width': width,
'xmin': tf.sparse.to_dense(features['xmin'], default_value=0),
'xmax': tf.sparse.to_dense(features['xmax'], default_value=0),
'ymin': tf.sparse.to_dense(features['ymin'], default_value=0),
'ymax': tf.sparse.to_dense(features['ymax'], default_value=0),
'person_id': tf.sparse.to_dense(features['person_id'], default_value=0),
'label': tf.sparse.to_dense(features['label'], default_value=0),
'is_ignore': tf.sparse.to_dense(features['is_ignore'], default_value=0),
}
return [image, image_info]
| 49.068182 | 99 | 0.635016 | import tensorflow as tf
def parse_mot_record(serialized_example):
"""Parse serialized example of TfRecord and extract dictionary of all the information
"""
features = tf.io.parse_single_example(
serialized_example,
features={
'video_name': tf.io.FixedLenFeature([], tf.string),
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'person_id': tf.io.VarLenFeature(tf.int64),
'xmin': tf.io.VarLenFeature(tf.int64),
'xmax': tf.io.VarLenFeature(tf.int64),
'ymin': tf.io.VarLenFeature(tf.int64),
'ymax': tf.io.VarLenFeature(tf.int64),
'mark': tf.io.VarLenFeature(tf.int64),
'label': tf.io.VarLenFeature(tf.int64),
'visibility_ratio': tf.io.VarLenFeature(tf.float32),
'image_name': tf.io.FixedLenFeature([], tf.string),
'image_jpeg': tf.io.FixedLenFeature([], tf.string),
'is_ignore': tf.io.VarLenFeature(tf.int64),
})
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_name = tf.cast(features['image_name'], tf.string)
video_name = tf.cast(features['video_name'], tf.string)
image = tf.image.decode_jpeg(features['image_jpeg'], channels=3, dct_method='INTEGER_ACCURATE')
image_shape = tf.stack([height, width, 3])
image = tf.cast(tf.reshape(image, image_shape), tf.uint8)
image_info = {
'image_name': image_name, 'video_name': video_name, 'height': height, 'width': width,
'xmin': tf.sparse.to_dense(features['xmin'], default_value=0),
'xmax': tf.sparse.to_dense(features['xmax'], default_value=0),
'ymin': tf.sparse.to_dense(features['ymin'], default_value=0),
'ymax': tf.sparse.to_dense(features['ymax'], default_value=0),
'person_id': tf.sparse.to_dense(features['person_id'], default_value=0),
'label': tf.sparse.to_dense(features['label'], default_value=0),
'is_ignore': tf.sparse.to_dense(features['is_ignore'], default_value=0),
}
return [image, image_info]
| 0 | 0 | 0 |
a230961c4d1bf0bd2d1efe7972b4baa33c5d7013 | 20,516 | py | Python | models/stylegan/model.py | mcartagenah/ganspace | f297c090257939dce1eef0eb87e6d9c4c19928a8 | [
"Apache-2.0"
] | 1,644 | 2020-04-07T01:00:10.000Z | 2022-03-30T10:27:13.000Z | models/stylegan/model.py | mcartagenah/ganspace | f297c090257939dce1eef0eb87e6d9c4c19928a8 | [
"Apache-2.0"
] | 54 | 2020-04-07T23:32:19.000Z | 2022-03-27T15:06:26.000Z | models/stylegan/model.py | mcartagenah/ganspace | f297c090257939dce1eef0eb87e6d9c4c19928a8 | [
"Apache-2.0"
] | 224 | 2020-04-06T22:59:44.000Z | 2022-03-29T14:35:45.000Z | # Copyright 2020 Erik Härkönen. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from pathlib import Path
import requests
import pickle
import sys
import numpy as np
# Reimplementation of StyleGAN in PyTorch
# Source: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
class MyConv2d(nn.Module):
"""Conv layer with equalized learning rate and custom learning rate multiplier."""
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
class LayerEpilogue(nn.Module):
"""Things to do at the end of each layer."""
# From: https://github.com/lernapparat/lernapparat/releases/download/v2019-02-01/ | 44.991228 | 181 | 0.585543 | # Copyright 2020 Erik Härkönen. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from pathlib import Path
import requests
import pickle
import sys
import numpy as np
# Reimplementation of StyleGAN in PyTorch
# Source: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2**(0.5), use_wscale=False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size**(-0.5) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class MyConv2d(nn.Module):
"""Conv layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_channels, output_channels, kernel_size, gain=2**(0.5), use_wscale=False, lrmul=1, bias=True,
intermediate=None, upscale=False):
super().__init__()
if upscale:
self.upscale = Upscale2d()
else:
self.upscale = None
he_std = gain * (input_channels * kernel_size ** 2) ** (-0.5) # He init
self.kernel_size = kernel_size
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_channels))
self.b_mul = lrmul
else:
self.bias = None
self.intermediate = intermediate
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
have_convolution = False
if self.upscale is not None and min(x.shape[2:]) * 2 >= 128:
# this is the fused upscale + conv from StyleGAN, sadly this seems incompatible with the non-fused way
# this really needs to be cleaned up and go into the conv...
w = self.weight * self.w_mul
w = w.permute(1, 0, 2, 3)
# probably applying a conv on w would be more efficient. also this quadruples the weight (average)?!
w = F.pad(w, (1,1,1,1))
w = w[:, :, 1:, 1:]+ w[:, :, :-1, 1:] + w[:, :, 1:, :-1] + w[:, :, :-1, :-1]
x = F.conv_transpose2d(x, w, stride=2, padding=(w.size(-1)-1)//2)
have_convolution = True
elif self.upscale is not None:
x = self.upscale(x)
if not have_convolution and self.intermediate is None:
return F.conv2d(x, self.weight * self.w_mul, bias, padding=self.kernel_size//2)
elif not have_convolution:
x = F.conv2d(x, self.weight * self.w_mul, None, padding=self.kernel_size//2)
if self.intermediate is not None:
x = self.intermediate(x)
if bias is not None:
x = x + bias.view(1, -1, 1, 1)
return x
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, x, noise=None):
if noise is None and self.noise is None:
noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device=x.device, dtype=x.dtype)
elif noise is None:
# here is a little trick: if you get all the noiselayers and set each
# modules .noise attribute, you can have pre-defined noise.
# Very useful for analysis
noise = self.noise
x = x + self.weight.view(1, -1, 1, 1) * noise
return x
class StyleMod(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleMod, self).__init__()
self.lin = MyLinear(latent_size,
channels * 2,
gain=1.0, use_wscale=use_wscale)
def forward(self, x, latent):
style = self.lin(latent) # style => [batch_size, n_channels*2]
shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1]
style = style.view(shape) # [batch_size, 2, n_channels, ...]
x = x * (style[:, 0] + 1.) + style[:, 1]
return x
class PixelNormLayer(nn.Module):
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, x):
return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon)
class BlurLayer(nn.Module):
def __init__(self, kernel=[1, 2, 1], normalize=True, flip=False, stride=1):
super(BlurLayer, self).__init__()
kernel=[1, 2, 1]
kernel = torch.tensor(kernel, dtype=torch.float32)
kernel = kernel[:, None] * kernel[None, :]
kernel = kernel[None, None]
if normalize:
kernel = kernel / kernel.sum()
if flip:
kernel = kernel[:, :, ::-1, ::-1]
self.register_buffer('kernel', kernel)
self.stride = stride
def forward(self, x):
# expand kernel channels
kernel = self.kernel.expand(x.size(1), -1, -1, -1)
x = F.conv2d(
x,
kernel,
stride=self.stride,
padding=int((self.kernel.size(2)-1)/2),
groups=x.size(1)
)
return x
def upscale2d(x, factor=2, gain=1):
assert x.dim() == 4
if gain != 1:
x = x * gain
if factor != 1:
shape = x.shape
x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor)
x = x.contiguous().view(shape[0], shape[1], factor * shape[2], factor * shape[3])
return x
class Upscale2d(nn.Module):
def __init__(self, factor=2, gain=1):
super().__init__()
assert isinstance(factor, int) and factor >= 1
self.gain = gain
self.factor = factor
def forward(self, x):
return upscale2d(x, factor=self.factor, gain=self.gain)
class G_mapping(nn.Sequential):
def __init__(self, nonlinearity='lrelu', use_wscale=True):
act, gain = {'relu': (torch.relu, np.sqrt(2)),
'lrelu': (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2))}[nonlinearity]
layers = [
('pixel_norm', PixelNormLayer()),
('dense0', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense0_act', act),
('dense1', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense1_act', act),
('dense2', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense2_act', act),
('dense3', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense3_act', act),
('dense4', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense4_act', act),
('dense5', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense5_act', act),
('dense6', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense6_act', act),
('dense7', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),
('dense7_act', act)
]
super().__init__(OrderedDict(layers))
def forward(self, x):
return super().forward(x)
class Truncation(nn.Module):
def __init__(self, avg_latent, max_layer=8, threshold=0.7):
super().__init__()
self.max_layer = max_layer
self.threshold = threshold
self.register_buffer('avg_latent', avg_latent)
def forward(self, x):
assert x.dim() == 3
interp = torch.lerp(self.avg_latent, x, self.threshold)
do_trunc = (torch.arange(x.size(1)) < self.max_layer).view(1, -1, 1)
return torch.where(do_trunc, interp, x)
class LayerEpilogue(nn.Module):
"""Things to do at the end of each layer."""
def __init__(self, channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer):
super().__init__()
layers = []
if use_noise:
layers.append(('noise', NoiseLayer(channels)))
layers.append(('activation', activation_layer))
if use_pixel_norm:
layers.append(('pixel_norm', PixelNorm()))
if use_instance_norm:
layers.append(('instance_norm', nn.InstanceNorm2d(channels)))
self.top_epi = nn.Sequential(OrderedDict(layers))
if use_styles:
self.style_mod = StyleMod(dlatent_size, channels, use_wscale=use_wscale)
else:
self.style_mod = None
def forward(self, x, dlatents_in_slice=None):
x = self.top_epi(x)
if self.style_mod is not None:
x = self.style_mod(x, dlatents_in_slice)
else:
assert dlatents_in_slice is None
return x
class InputBlock(nn.Module):
def __init__(self, nf, dlatent_size, const_input_layer, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer):
super().__init__()
self.const_input_layer = const_input_layer
self.nf = nf
if self.const_input_layer:
# called 'const' in tf
self.const = nn.Parameter(torch.ones(1, nf, 4, 4))
self.bias = nn.Parameter(torch.ones(nf))
else:
self.dense = MyLinear(dlatent_size, nf*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN
self.epi1 = LayerEpilogue(nf, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer)
self.conv = MyConv2d(nf, nf, 3, gain=gain, use_wscale=use_wscale)
self.epi2 = LayerEpilogue(nf, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer)
def forward(self, dlatents_in_range):
batch_size = dlatents_in_range.size(0)
if self.const_input_layer:
x = self.const.expand(batch_size, -1, -1, -1)
x = x + self.bias.view(1, -1, 1, 1)
else:
x = self.dense(dlatents_in_range[:, 0]).view(batch_size, self.nf, 4, 4)
x = self.epi1(x, dlatents_in_range[:, 0])
x = self.conv(x)
x = self.epi2(x, dlatents_in_range[:, 1])
return x
class GSynthesisBlock(nn.Module):
def __init__(self, in_channels, out_channels, blur_filter, dlatent_size, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer):
# 2**res x 2**res # res = 3..resolution_log2
super().__init__()
if blur_filter:
blur = BlurLayer(blur_filter)
else:
blur = None
self.conv0_up = MyConv2d(in_channels, out_channels, kernel_size=3, gain=gain, use_wscale=use_wscale,
intermediate=blur, upscale=True)
self.epi1 = LayerEpilogue(out_channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer)
self.conv1 = MyConv2d(out_channels, out_channels, kernel_size=3, gain=gain, use_wscale=use_wscale)
self.epi2 = LayerEpilogue(out_channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer)
def forward(self, x, dlatents_in_range):
x = self.conv0_up(x)
x = self.epi1(x, dlatents_in_range[:, 0])
x = self.conv1(x)
x = self.epi2(x, dlatents_in_range[:, 1])
return x
class G_synthesis(nn.Module):
def __init__(self,
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_styles = True, # Enable style inputs?
const_input_layer = True, # First layer is a learned constant?
use_noise = True, # Enable noise inputs?
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'
use_wscale = True, # Enable equalized learning rate?
use_pixel_norm = False, # Enable pixelwise feature vector normalization?
use_instance_norm = True, # Enable instance normalization?
dtype = torch.float32, # Data type to use for activations and outputs.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
):
super().__init__()
def nf(stage):
return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
self.dlatent_size = dlatent_size
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
act, gain = {'relu': (torch.relu, np.sqrt(2)),
'lrelu': (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2))}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
torgbs = []
blocks = []
for res in range(2, resolution_log2 + 1):
channels = nf(res-1)
name = '{s}x{s}'.format(s=2**res)
if res == 2:
blocks.append((name,
InputBlock(channels, dlatent_size, const_input_layer, gain, use_wscale,
use_noise, use_pixel_norm, use_instance_norm, use_styles, act)))
else:
blocks.append((name,
GSynthesisBlock(last_channels, channels, blur_filter, dlatent_size, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, act)))
last_channels = channels
self.torgb = MyConv2d(channels, num_channels, 1, gain=1, use_wscale=use_wscale)
self.blocks = nn.ModuleDict(OrderedDict(blocks))
def forward(self, dlatents_in):
# Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
# lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
batch_size = dlatents_in.size(0)
for i, m in enumerate(self.blocks.values()):
if i == 0:
x = m(dlatents_in[:, 2*i:2*i+2])
else:
x = m(x, dlatents_in[:, 2*i:2*i+2])
rgb = self.torgb(x)
return rgb
class StyleGAN_G(nn.Sequential):
def __init__(self, resolution, truncation=1.0):
self.resolution = resolution
self.layers = OrderedDict([
('g_mapping', G_mapping()),
#('truncation', Truncation(avg_latent)),
('g_synthesis', G_synthesis(resolution=resolution)),
])
super().__init__(self.layers)
def forward(self, x, latent_is_w=False):
if isinstance(x, list):
assert len(x) == 18, 'Must provide 1 or 18 latents'
if not latent_is_w:
x = [self.layers['g_mapping'].forward(l) for l in x]
x = torch.stack(x, dim=1)
else:
if not latent_is_w:
x = self.layers['g_mapping'].forward(x)
x = x.unsqueeze(1).expand(-1, 18, -1)
x = self.layers['g_synthesis'].forward(x)
return x
# From: https://github.com/lernapparat/lernapparat/releases/download/v2019-02-01/
def load_weights(self, checkpoint):
self.load_state_dict(torch.load(checkpoint))
def export_from_tf(self, pickle_path):
module_path = Path(__file__).parent / 'stylegan_tf'
sys.path.append(str(module_path.resolve()))
import dnnlib, dnnlib.tflib, pickle, torch, collections
dnnlib.tflib.init_tf()
weights = pickle.load(open(pickle_path,'rb'))
weights_pt = [collections.OrderedDict([(k, torch.from_numpy(v.value().eval())) for k,v in w.trainables.items()]) for w in weights]
#torch.save(weights_pt, pytorch_name)
# then on the PyTorch side run
state_G, state_D, state_Gs = weights_pt #torch.load('./karras2019stylegan-ffhq-1024x1024.pt')
def key_translate(k):
k = k.lower().split('/')
if k[0] == 'g_synthesis':
if not k[1].startswith('torgb'):
k.insert(1, 'blocks')
k = '.'.join(k)
k = (k.replace('const.const','const').replace('const.bias','bias').replace('const.stylemod','epi1.style_mod.lin')
.replace('const.noise.weight','epi1.top_epi.noise.weight')
.replace('conv.noise.weight','epi2.top_epi.noise.weight')
.replace('conv.stylemod','epi2.style_mod.lin')
.replace('conv0_up.noise.weight', 'epi1.top_epi.noise.weight')
.replace('conv0_up.stylemod','epi1.style_mod.lin')
.replace('conv1.noise.weight', 'epi2.top_epi.noise.weight')
.replace('conv1.stylemod','epi2.style_mod.lin')
.replace('torgb_lod0','torgb'))
else:
k = '.'.join(k)
return k
def weight_translate(k, w):
k = key_translate(k)
if k.endswith('.weight'):
if w.dim() == 2:
w = w.t()
elif w.dim() == 1:
pass
else:
assert w.dim() == 4
w = w.permute(3, 2, 0, 1)
return w
# we delete the useless torgb filters
param_dict = {key_translate(k) : weight_translate(k, v) for k,v in state_Gs.items() if 'torgb_lod' not in key_translate(k)}
if 1:
sd_shapes = {k : v.shape for k,v in self.state_dict().items()}
param_shapes = {k : v.shape for k,v in param_dict.items() }
for k in list(sd_shapes)+list(param_shapes):
pds = param_shapes.get(k)
sds = sd_shapes.get(k)
if pds is None:
print ("sd only", k, sds)
elif sds is None:
print ("pd only", k, pds)
elif sds != pds:
print ("mismatch!", k, pds, sds)
self.load_state_dict(param_dict, strict=False) # needed for the blur kernels
torch.save(self.state_dict(), Path(pickle_path).with_suffix('.pt')) | 17,870 | 83 | 1,096 |
210403e51b31d354888d0f1806cbd677afdb21b6 | 2,772 | py | Python | tests/unit/altimeter/aws/resource/ec2/test_vpc.py | AmOr1984v02/altimeter | 4adcf8d759b1f3f615b00521cc1756c8007e04f3 | [
"MIT"
] | null | null | null | tests/unit/altimeter/aws/resource/ec2/test_vpc.py | AmOr1984v02/altimeter | 4adcf8d759b1f3f615b00521cc1756c8007e04f3 | [
"MIT"
] | null | null | null | tests/unit/altimeter/aws/resource/ec2/test_vpc.py | AmOr1984v02/altimeter | 4adcf8d759b1f3f615b00521cc1756c8007e04f3 | [
"MIT"
] | null | null | null | from unittest import TestCase
import boto3
from moto import mock_ec2
from altimeter.aws.resource.ec2.vpc import VPCResourceSpec
from altimeter.aws.scan.aws_accessor import AWSAccessor
| 36.96 | 100 | 0.440115 | from unittest import TestCase
import boto3
from moto import mock_ec2
from altimeter.aws.resource.ec2.vpc import VPCResourceSpec
from altimeter.aws.scan.aws_accessor import AWSAccessor
class TestVPCResourceSpec(TestCase):
@mock_ec2
def test_scan(self):
account_id = "123456789012"
region_name = "us-east-1"
session = boto3.Session()
ec2_client = session.client("ec2", region_name=region_name)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
scan_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region_name)
resources = VPCResourceSpec.scan(scan_accessor=scan_accessor)
expected_resources = [
{
"type": "aws:ec2:vpc",
"links": [
{"pred": "is_default", "obj": True, "type": "simple"},
{
"pred": "cidr_block",
"obj": "172.31.0.0/16",
"type": "simple",
}, # from moto
{"pred": "state", "obj": "available", "type": "simple"},
{
"pred": "account",
"obj": "arn:aws::::account/123456789012",
"type": "resource_link",
},
{
"pred": "region",
"obj": "arn:aws:::123456789012:region/us-east-1",
"type": "resource_link",
},
],
},
{
"type": "aws:ec2:vpc",
"links": [
{"pred": "is_default", "obj": False, "type": "simple"},
{"pred": "cidr_block", "obj": "10.0.0.0/16", "type": "simple"},
{"pred": "state", "obj": "available", "type": "simple"},
{
"pred": "account",
"obj": "arn:aws::::account/123456789012",
"type": "resource_link",
},
{
"pred": "region",
"obj": "arn:aws:::123456789012:region/us-east-1",
"type": "resource_link",
},
],
},
]
expected_api_call_stats = {
"count": 1,
"123456789012": {
"count": 1,
"us-east-1": {"count": 1, "ec2": {"count": 1, "DescribeVpcs": {"count": 1}}},
},
}
self.assertListEqual([resource.to_dict() for resource in resources], expected_resources)
self.assertDictEqual(scan_accessor.api_call_stats.to_dict(), expected_api_call_stats)
| 2,507 | 55 | 23 |
bd83b68845748ee7a2ef1aeb6236679edf4b0c90 | 743 | py | Python | lib/hiveos.py | SimonLovskog/HiveOS-OffPeak | 7baeaa812d8da415ca6ed5ff6169bff66e501d93 | [
"MIT"
] | null | null | null | lib/hiveos.py | SimonLovskog/HiveOS-OffPeak | 7baeaa812d8da415ca6ed5ff6169bff66e501d93 | [
"MIT"
] | null | null | null | lib/hiveos.py | SimonLovskog/HiveOS-OffPeak | 7baeaa812d8da415ca6ed5ff6169bff66e501d93 | [
"MIT"
] | null | null | null | import aiohttp
apiUrl = "https://api2.hiveos.farm/api/v2"
| 26.535714 | 111 | 0.6393 | import aiohttp
apiUrl = "https://api2.hiveos.farm/api/v2"
async def getMinerStatus(farmID, workerID, APIKey):
session = aiohttp.ClientSession(
headers={"Authorization": "Bearer %s" % APIKey})
data = await session.get("{}/farms/{}/workers/{}".format(apiUrl, farmID, workerID))
data = await data.json()
await session.close()
return data["stats"]["online"]
async def turnOffOS(farmID, workerID, APIKey):
session = aiohttp.ClientSession(
headers={"Authorization": "Bearer %s" % APIKey})
postBody = {
"command": "shutdown",
"data": {}
}
data = await session.post("{}/farms/{}/workers/{}/command".format(apiUrl, farmID, workerID), json=postBody)
await session.close()
| 637 | 0 | 46 |
7772dfc7686bc4aed51f7db0eba9d921f1e22f07 | 2,813 | py | Python | alipay/aop/api/response/MybankCreditLoantradePayeeArConsultResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/MybankCreditLoantradePayeeArConsultResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/MybankCreditLoantradePayeeArConsultResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CreditPayRefuseVO import CreditPayRefuseVO
| 31.255556 | 116 | 0.67899 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CreditPayRefuseVO import CreditPayRefuseVO
class MybankCreditLoantradePayeeArConsultResponse(AlipayResponse):
def __init__(self):
super(MybankCreditLoantradePayeeArConsultResponse, self).__init__()
self._admit = None
self._admit_alipay_login_id = None
self._admit_alipay_user_id = None
self._is_signed = None
self._refuse_info = None
self._scheme_ar_no = None
self._sign_url = None
@property
def admit(self):
return self._admit
@admit.setter
def admit(self, value):
self._admit = value
@property
def admit_alipay_login_id(self):
return self._admit_alipay_login_id
@admit_alipay_login_id.setter
def admit_alipay_login_id(self, value):
self._admit_alipay_login_id = value
@property
def admit_alipay_user_id(self):
return self._admit_alipay_user_id
@admit_alipay_user_id.setter
def admit_alipay_user_id(self, value):
self._admit_alipay_user_id = value
@property
def is_signed(self):
return self._is_signed
@is_signed.setter
def is_signed(self, value):
self._is_signed = value
@property
def refuse_info(self):
return self._refuse_info
@refuse_info.setter
def refuse_info(self, value):
if isinstance(value, CreditPayRefuseVO):
self._refuse_info = value
else:
self._refuse_info = CreditPayRefuseVO.from_alipay_dict(value)
@property
def scheme_ar_no(self):
return self._scheme_ar_no
@scheme_ar_no.setter
def scheme_ar_no(self, value):
self._scheme_ar_no = value
@property
def sign_url(self):
return self._sign_url
@sign_url.setter
def sign_url(self, value):
self._sign_url = value
def parse_response_content(self, response_content):
response = super(MybankCreditLoantradePayeeArConsultResponse, self).parse_response_content(response_content)
if 'admit' in response:
self.admit = response['admit']
if 'admit_alipay_login_id' in response:
self.admit_alipay_login_id = response['admit_alipay_login_id']
if 'admit_alipay_user_id' in response:
self.admit_alipay_user_id = response['admit_alipay_user_id']
if 'is_signed' in response:
self.is_signed = response['is_signed']
if 'refuse_info' in response:
self.refuse_info = response['refuse_info']
if 'scheme_ar_no' in response:
self.scheme_ar_no = response['scheme_ar_no']
if 'sign_url' in response:
self.sign_url = response['sign_url']
| 1,848 | 746 | 23 |
bcd402b633a19185fbd73be1216fa78478a797fb | 681 | py | Python | ledgerplot/ledgerplot/modules/crossover.py | rockwolf/python | 18b4a17136a9c22c77033c5c08a2072df8ed8db0 | [
"BSD-3-Clause"
] | null | null | null | ledgerplot/ledgerplot/modules/crossover.py | rockwolf/python | 18b4a17136a9c22c77033c5c08a2072df8ed8db0 | [
"BSD-3-Clause"
] | null | null | null | ledgerplot/ledgerplot/modules/crossover.py | rockwolf/python | 18b4a17136a9c22c77033c5c08a2072df8ed8db0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
See LICENSE.txt file for copyright and license details.
"""
"""
A plot that shows when break even is reached.
"""
import numpy as np
import matplotlib.pyplot as plt
from decimal import Decimal
import sys
x_array = []
y_array = []
def load_data():
"""
Load data
"""
var_data = open(sys.argv[1].strip(), 'r').read()
var_data_array = var_data.split('\n')
i = 0
for line in var_data_array:
i += 1
# skip the last 2 lines of the output
if (len(line)>1) and (i<len(var_data_array) - 2):
x_array.append(abs(float(line.strip().split(' ')[0].strip())))
y_array.append(i)
| 21.28125 | 74 | 0.596182 | #!/usr/bin/env python
"""
See LICENSE.txt file for copyright and license details.
"""
"""
A plot that shows when break even is reached.
"""
import numpy as np
import matplotlib.pyplot as plt
from decimal import Decimal
import sys
x_array = []
y_array = []
def load_data():
"""
Load data
"""
var_data = open(sys.argv[1].strip(), 'r').read()
var_data_array = var_data.split('\n')
i = 0
for line in var_data_array:
i += 1
# skip the last 2 lines of the output
if (len(line)>1) and (i<len(var_data_array) - 2):
x_array.append(abs(float(line.strip().split(' ')[0].strip())))
y_array.append(i)
| 0 | 0 | 0 |
fc9effe4288eae52043395c21d9984136c52e54c | 140 | py | Python | moai/nn/utils/__init__.py | tzole1155/moai | d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180 | [
"Apache-2.0"
] | 10 | 2021-04-02T11:21:33.000Z | 2022-01-18T18:32:32.000Z | moai/nn/utils/__init__.py | tzole1155/moai | d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180 | [
"Apache-2.0"
] | 1 | 2022-03-22T20:10:55.000Z | 2022-03-24T13:11:02.000Z | moai/nn/utils/__init__.py | tzole1155/moai | d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180 | [
"Apache-2.0"
] | 3 | 2021-05-16T20:47:40.000Z | 2021-12-01T21:15:36.000Z | from moai.nn.utils.instantiate import instantiate
from moai.nn.utils.itertools import repeat
__all__ = [
"instantiate",
"repeat",
] | 20 | 49 | 0.735714 | from moai.nn.utils.instantiate import instantiate
from moai.nn.utils.itertools import repeat
__all__ = [
"instantiate",
"repeat",
] | 0 | 0 | 0 |
799766b2f4fad5cf607a8e0a9e3f866527c2f66d | 1,392 | py | Python | 938.range-sum-of-bst.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 938.range-sum-of-bst.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 938.range-sum-of-bst.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=938 lang=python
#
# [938] Range Sum of BST
#
# https://leetcode.com/problems/range-sum-of-bst/description/
#
# algorithms
# Easy (78.13%)
# Likes: 448
# Dislikes: 85
# Total Accepted: 83.4K
# Total Submissions: 106.7K
# Testcase Example: '[10,5,15,3,7,null,18]\n7\n15'
#
# Given the root node of a binary search tree, return the sum of values of all
# nodes with value between L and R (inclusive).
#
# The binary search tree is guaranteed to have unique values.
#
#
#
#
# Example 1:
#
#
# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
# Output: 32
#
#
#
# Example 2:
#
#
# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10
# Output: 23
#
#
#
#
# Note:
#
#
# The number of nodes in the tree is at most 10000.
# The final answer is guaranteed to be less than 2^31.
#
#
#
#
# Definition for a binary tree node.
| 18.810811 | 93 | 0.576149 | #
# @lc app=leetcode id=938 lang=python
#
# [938] Range Sum of BST
#
# https://leetcode.com/problems/range-sum-of-bst/description/
#
# algorithms
# Easy (78.13%)
# Likes: 448
# Dislikes: 85
# Total Accepted: 83.4K
# Total Submissions: 106.7K
# Testcase Example: '[10,5,15,3,7,null,18]\n7\n15'
#
# Given the root node of a binary search tree, return the sum of values of all
# nodes with value between L and R (inclusive).
#
# The binary search tree is guaranteed to have unique values.
#
#
#
#
# Example 1:
#
#
# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
# Output: 32
#
#
#
# Example 2:
#
#
# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10
# Output: 23
#
#
#
#
# Note:
#
#
# The number of nodes in the tree is at most 10000.
# The final answer is guaranteed to be less than 2^31.
#
#
#
#
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def rangeSumBST(self, root, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: int
"""
if not root:
return 0
value = 0
if L <= root.val <= R:
value = root.val
return value + self.rangeSumBST(root.left, L, R) + self.rangeSumBST(root.right, L, R)
| 73 | 372 | 72 |
5e680ce72985f18321eacb50a2cb032de2a2bdbc | 499 | py | Python | adls/videoCat_workflow.py | blerp-836/natsandbox | bf6f740d04562f1fc5bac5155a6b2665f212e807 | [
"MIT"
] | 1 | 2022-01-19T16:12:00.000Z | 2022-01-19T16:12:00.000Z | adls/videoCat_workflow.py | blerp-836/natsandbox | bf6f740d04562f1fc5bac5155a6b2665f212e807 | [
"MIT"
] | null | null | null | adls/videoCat_workflow.py | blerp-836/natsandbox | bf6f740d04562f1fc5bac5155a6b2665f212e807 | [
"MIT"
] | null | null | null | # Databricks notebook source
dbutils.notebook.run("notebook_workflow", 0, {'action':'landing_load','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
dbutils.notebook.run("notebook_workflow", 0, {'action':'staging_load_videoCat','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
dbutils.notebook.run("notebook_workflow", 0, {'action':'int_load_videoCat','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
| 29.352941 | 136 | 0.671343 | # Databricks notebook source
dbutils.notebook.run("notebook_workflow", 0, {'action':'landing_load','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
dbutils.notebook.run("notebook_workflow", 0, {'action':'staging_load_videoCat','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
dbutils.notebook.run("notebook_workflow", 0, {'action':'int_load_videoCat','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
| 0 | 0 | 0 |
7e2c460c57d61016930a2cd29b733b1220911175 | 329 | py | Python | adventofcode/2020/10/b.py | nevivurn/cp | be2ce55ef6f578cbf606bbc3d85add72993cfde3 | [
"MIT"
] | null | null | null | adventofcode/2020/10/b.py | nevivurn/cp | be2ce55ef6f578cbf606bbc3d85add72993cfde3 | [
"MIT"
] | null | null | null | adventofcode/2020/10/b.py | nevivurn/cp | be2ce55ef6f578cbf606bbc3d85add72993cfde3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
nums = list(sorted([0] + [int(line.rstrip()) for line in sys.stdin]))
nums.append(nums[-1] + 3)
dp = [1]
while len(dp) < len(nums):
cur = len(dp)
i = cur-1
cum = 0
while i >= 0 and nums[cur]-nums[i] <= 3:
cum += dp[i]
i -= 1
dp.append(cum)
print(dp[-1])
| 16.45 | 69 | 0.525836 | #!/usr/bin/env python3
import sys
nums = list(sorted([0] + [int(line.rstrip()) for line in sys.stdin]))
nums.append(nums[-1] + 3)
dp = [1]
while len(dp) < len(nums):
cur = len(dp)
i = cur-1
cum = 0
while i >= 0 and nums[cur]-nums[i] <= 3:
cum += dp[i]
i -= 1
dp.append(cum)
print(dp[-1])
| 0 | 0 | 0 |
f2fe47b358d4d5f20a3e687d02aa1f6487fddd67 | 6,326 | py | Python | ab_test.py | matyasosvath/ab-test | 3ad07a65cc6967284f3c2741460ee14af6564ff9 | [
"MIT"
] | null | null | null | ab_test.py | matyasosvath/ab-test | 3ad07a65cc6967284f3c2741460ee14af6564ff9 | [
"MIT"
] | null | null | null | ab_test.py | matyasosvath/ab-test | 3ad07a65cc6967284f3c2741460ee14af6564ff9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy.stats as ss
import fire
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler() # messages show up in terminal
formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s') # format the message for the terminal output
stream_handler.setFormatter(formatter) # add formatter to the stream handler
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
class ABTest(object):
"""
A simple AB Test for two proportions or averages.
"""
def __t_test(self, col1, col2, ci=True):
"""
Two-sample (Independent Samples) T-test (two-tailed)
Input:
col1: pandas.Series
col2: pandas.Series
Return
t_test_statistic: T test statistic
p_value: P-value for hypothesis test
ci_lower: Confidence Interval Lower limit
ci_upper: Confidence Interval Upper limit
"""
assert type(self.df[col1]) == pd.core.series.Series, "Col1 Should be pandas.Series"
assert type(self.df[col2]) == pd.core.series.Series, "Col1 Should be pandas.Series"
logging.info("Two-sample (Independent Samples) T-test (two-tailed) method running!")
# Means
mean1, mean2 = self.df[col1].mean(), self.df[col2].mean()
# Calculate Standard error
std1, std2 = self.df[col1].std(), self.df[col2].std()
se1 = std1 / np.sqrt(self.df[col1].shape[0])
se2 = std2 / np.sqrt(self.df[col2].shape[0])
standard_error_for_difference_between_means = np.sqrt(se1**2 + se2**2)
mean_diff = abs(mean1 - mean2)
t_test_statistic = np.round((mean_diff / standard_error_for_difference_between_means),3)
degrees_of_freedom = self.df[[col1, col2]].shape[0] - 2
p_value = np.round((1 - ss.t.cdf(abs(t_test_statistic), degrees_of_freedom)) * 2, 3) # two-tailed
# CONFIDENCE INTERVAL
if ci:
t_cl = ss.t.ppf(self.__b, df=degrees_of_freedom) # t value for confidence interval
ci_lower = mean_diff - t_cl * standard_error_for_difference_between_means
ci_upper = mean_diff + t_cl * standard_error_for_difference_between_means
return t_test_statistic, p_value, np.round((ci_lower, ci_upper), 3)
else:
return t_test_statistic, p_value
def __z_test(self, col1, col2, ci=True):
"""
Z-test for two proportions
Input:
col1: pandas.Series
col2: pandas.Series
Return
z_test_statistic: z test statistic
p_value: P-value for hypothesis test
ci_lower: Confidence Interval Lower limit
ci_upper: Confidence Interval Upper limit
"""
assert type(self.df[col1]) == pd.core.series.Series, "Col1 Should be pandas.Series"
assert type(self.df[col2]) == pd.core.series.Series, "Col1 Should be pandas.Series"
logging.info("Z-test for two proportions method running!")
prop_a, n_a = self.df[col1].value_counts(normalize=True)[1], len(self.df[col1])
prop_b, n_b = self.df[col2].value_counts(normalize=True)[1], len(self.df[col2])
prop_a, prop_b, n_a, n_b = float(prop_a), float(prop_b), float(n_a), float(n_b)
# Standard error of two proportions
se1 = np.sqrt((prop_a*(1-prop_a))/n_a)
se2 = np.sqrt((prop_b*(1-prop_b))/n_b)
standard_error_for_difference_between_proportions = np.sqrt(se1**2 + se2**2)
prop_diff = abs(prop_b - prop_a)
z_test_statistic = np.round((prop_diff / standard_error_for_difference_between_proportions),3)
pvalue = np.round((ss.norm.pdf(abs(z_test_statistic)) * 2),3) # two-tailed
# CONFIDENCE INTERVAL
if ci:
z_cl = ss.norm.ppf(self.__b)
ci_lower = prop_diff - z_cl * standard_error_for_difference_between_proportions
ci_upper = prop_diff + z_cl * standard_error_for_difference_between_proportions
return z_test_statistic, pvalue, np.round((ci_lower, ci_upper), 3)
else:
return z_test_statistic, pvalue
def run(self, method: str, data: pd.DataFrame, col1: str, col2: str) -> list:
"""
Run:
python3 ab_test.py run --method=props --data=ab_test_prop.csv --col1=websiteA --col2=websiteB
python3 ab_test.py run --method=avgs --data=ab_test_avg.csv --col1=websiteA --col2=websiteB
"""
try:
self.df = data
except (ValueError, TypeError):
pass
try:
self.df = pd.read_csv(data, delimiter=',')
except (KeyError, ValueError):
#print('Delimeter maybe wrong')
pass
if method=='avgs':
return self.__t_test(col1, col2)
elif method=='props':
return self.__z_test(col1, col2)
else:
raise ValueError("Should not come here.")
# TESTS
import unittest
if __name__ == '__main__':
fire.Fire(ABTest) | 32.947917 | 118 | 0.621088 | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy.stats as ss
import fire
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler() # messages show up in terminal
formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s') # format the message for the terminal output
stream_handler.setFormatter(formatter) # add formatter to the stream handler
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
class ABTest(object):
"""
A simple AB Test for two proportions or averages.
"""
def __init__(self):
self.alpha=0.05
self.__b = 1 - (float(self.alpha)/2)
self.power = 0.8
logging.info("AB Test class initialized!")
def __t_test(self, col1, col2, ci=True):
"""
Two-sample (Independent Samples) T-test (two-tailed)
Input:
col1: pandas.Series
col2: pandas.Series
Return
t_test_statistic: T test statistic
p_value: P-value for hypothesis test
ci_lower: Confidence Interval Lower limit
ci_upper: Confidence Interval Upper limit
"""
assert type(self.df[col1]) == pd.core.series.Series, "Col1 Should be pandas.Series"
assert type(self.df[col2]) == pd.core.series.Series, "Col1 Should be pandas.Series"
logging.info("Two-sample (Independent Samples) T-test (two-tailed) method running!")
# Means
mean1, mean2 = self.df[col1].mean(), self.df[col2].mean()
# Calculate Standard error
std1, std2 = self.df[col1].std(), self.df[col2].std()
se1 = std1 / np.sqrt(self.df[col1].shape[0])
se2 = std2 / np.sqrt(self.df[col2].shape[0])
standard_error_for_difference_between_means = np.sqrt(se1**2 + se2**2)
mean_diff = abs(mean1 - mean2)
t_test_statistic = np.round((mean_diff / standard_error_for_difference_between_means),3)
degrees_of_freedom = self.df[[col1, col2]].shape[0] - 2
p_value = np.round((1 - ss.t.cdf(abs(t_test_statistic), degrees_of_freedom)) * 2, 3) # two-tailed
# CONFIDENCE INTERVAL
if ci:
t_cl = ss.t.ppf(self.__b, df=degrees_of_freedom) # t value for confidence interval
ci_lower = mean_diff - t_cl * standard_error_for_difference_between_means
ci_upper = mean_diff + t_cl * standard_error_for_difference_between_means
return t_test_statistic, p_value, np.round((ci_lower, ci_upper), 3)
else:
return t_test_statistic, p_value
def __z_test(self, col1, col2, ci=True):
"""
Z-test for two proportions
Input:
col1: pandas.Series
col2: pandas.Series
Return
z_test_statistic: z test statistic
p_value: P-value for hypothesis test
ci_lower: Confidence Interval Lower limit
ci_upper: Confidence Interval Upper limit
"""
assert type(self.df[col1]) == pd.core.series.Series, "Col1 Should be pandas.Series"
assert type(self.df[col2]) == pd.core.series.Series, "Col1 Should be pandas.Series"
logging.info("Z-test for two proportions method running!")
prop_a, n_a = self.df[col1].value_counts(normalize=True)[1], len(self.df[col1])
prop_b, n_b = self.df[col2].value_counts(normalize=True)[1], len(self.df[col2])
prop_a, prop_b, n_a, n_b = float(prop_a), float(prop_b), float(n_a), float(n_b)
# Standard error of two proportions
se1 = np.sqrt((prop_a*(1-prop_a))/n_a)
se2 = np.sqrt((prop_b*(1-prop_b))/n_b)
standard_error_for_difference_between_proportions = np.sqrt(se1**2 + se2**2)
prop_diff = abs(prop_b - prop_a)
z_test_statistic = np.round((prop_diff / standard_error_for_difference_between_proportions),3)
pvalue = np.round((ss.norm.pdf(abs(z_test_statistic)) * 2),3) # two-tailed
# CONFIDENCE INTERVAL
if ci:
z_cl = ss.norm.ppf(self.__b)
ci_lower = prop_diff - z_cl * standard_error_for_difference_between_proportions
ci_upper = prop_diff + z_cl * standard_error_for_difference_between_proportions
return z_test_statistic, pvalue, np.round((ci_lower, ci_upper), 3)
else:
return z_test_statistic, pvalue
def run(self, method: str, data: pd.DataFrame, col1: str, col2: str) -> list:
"""
Run:
python3 ab_test.py run --method=props --data=ab_test_prop.csv --col1=websiteA --col2=websiteB
python3 ab_test.py run --method=avgs --data=ab_test_avg.csv --col1=websiteA --col2=websiteB
"""
try:
self.df = data
except (ValueError, TypeError):
pass
try:
self.df = pd.read_csv(data, delimiter=',')
except (KeyError, ValueError):
#print('Delimeter maybe wrong')
pass
if method=='avgs':
return self.__t_test(col1, col2)
elif method=='props':
return self.__z_test(col1, col2)
else:
raise ValueError("Should not come here.")
# TESTS
import unittest
class TestABTest(unittest.TestCase):
def setUp(self) -> None:
np.random.seed(42)
data = {'nominal1': np.random.randint(0,2, size=100),
'nominal2': np.random.randint(0,2, size=100),
'interval1': np.random.randint(0,20, size=100),
'interval2': np.random.randint(0,20, size=100)
}
self.data = pd.DataFrame(data)
self.abtest = ABTest()
def test_t_test(self):
t, p, ci = self.abtest.run('avgs', self.data, 'interval1', 'interval2')
self.assertEqual(t, 0.422, "T test statistic error")
self.assertEqual(p, 0.674, "Pvalue is not looking good")
self.assertEqual(ci[0], -1.405, 'CI problem')
def test_z_test(self):
z, p, ci = self.abtest.run('props', self.data, 'nominal1', 'nominal2')
self.assertEqual(z, 1.709, "T test statistic error")
self.assertEqual(p, 0.185, "Pvalue is not looking good")
self.assertEqual(ci[0], -0.018, 'CI problem')
if __name__ == '__main__':
fire.Fire(ABTest) | 1,026 | 15 | 130 |