hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8288b2e7573b94e2b8720c7f8340a53dcf4d450a
| 1,004
|
py
|
Python
|
api/wait_for_mysql.py
|
sieira/pycont
|
6d1abbf16d22677391bd87ab52a68287fb140d59
|
[
"BSD-3-Clause"
] | null | null | null |
api/wait_for_mysql.py
|
sieira/pycont
|
6d1abbf16d22677391bd87ab52a68287fb140d59
|
[
"BSD-3-Clause"
] | 35
|
2019-11-30T12:03:33.000Z
|
2021-09-17T08:44:37.000Z
|
api/wait_for_mysql.py
|
sieira/pycont
|
6d1abbf16d22677391bd87ab52a68287fb140d59
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/local/bin/python
import time
import MySQLdb
from pycont.settings import DATABASES
TIMEOUT_SECONDS = 30
start = time.time()
host = DATABASES['default']['HOST']
user = DATABASES['default']['USER']
password = DATABASES['default']['PASSWORD']
port = int(DATABASES['default']['PORT'])
db = f'test_{DATABASES["default"]["NAME"]}'
while time.time() - start < TIMEOUT_SECONDS:
try:
conn = MySQLdb.connect(host=host, user=user, passwd=password, port=port)
while time.time() - start < TIMEOUT_SECONDS:
cursor = conn.cursor()
cursor.execute(f"show databases like '{db}'")
result = cursor.fetchone()
if result and len(result) > 0:
print('GOTCHA !!')
exit(0)
else:
time.sleep(1)
cursor.close()
conn.close()
except Exception:
print('Could not connect, sleep 1 sec.')
time.sleep(1)
print('Could not connect, before timeout')
exit(1)
| 23.904762
| 80
| 0.592629
|
af1532c5ddec3a20becddb43e782dd1855911c98
| 219
|
py
|
Python
|
GPSInfo/gpsinfo/__init__.py
|
jgraber/Python_Scripts
|
ffdbf17d28521cf5c2a3f7aadfb817e7811f86b7
|
[
"MIT"
] | null | null | null |
GPSInfo/gpsinfo/__init__.py
|
jgraber/Python_Scripts
|
ffdbf17d28521cf5c2a3f7aadfb817e7811f86b7
|
[
"MIT"
] | 3
|
2021-07-01T19:36:30.000Z
|
2021-08-30T19:59:00.000Z
|
GPSInfo/gpsinfo/__init__.py
|
jgraber/Python_Scripts
|
ffdbf17d28521cf5c2a3f7aadfb817e7811f86b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Shows GPS metadata in JPEG files
.. currentmodule:: gpsinfo
.. moduleauthor:: Johnny Graber <JG@JGraber.ch>
"""
from .version import __version__, __release__ # noqa
| 18.25
| 53
| 0.684932
|
a9ab3fb34e744494bb3911675531637f1497a10a
| 2,046
|
py
|
Python
|
conf/dbConf.py
|
Valuebai/awesome-python-io
|
8bb3400036843975cb41cbfd85ccfe603596930b
|
[
"MIT"
] | 8
|
2019-05-01T05:22:44.000Z
|
2022-03-02T10:04:48.000Z
|
conf/dbConf.py
|
Valuebai/awesome-python-io
|
8bb3400036843975cb41cbfd85ccfe603596930b
|
[
"MIT"
] | 26
|
2019-03-23T05:10:04.000Z
|
2022-03-04T09:57:32.000Z
|
conf/dbConf.py
|
Valuebai/awesome-python-io
|
8bb3400036843975cb41cbfd85ccfe603596930b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''=================================================
@IDE :PyCharm
@Author :LuckyHuibo
@Date :2019/9/3 10:44
@Desc :封装获取数据库,方便使用
@使用:
# 代码上面添加
from conf.dbConf import db_user
db.db_name
=================================================='''
import yaml
from conf.log_config import logger
class GetDBYaml:
def __init__(self, DIR_DB_yaml=r'./yaml_databases.yaml'):
"""
:param DIR_DB_yaml: 数据库配置文件,如没填写则使用默认的
"""
# 打开数据库配置yaml文件
stream = open(DIR_DB_yaml, 'r', encoding='utf-8')
params = yaml.load(stream, Loader=yaml.FullLoader)
# 设置数据库的连接地址、端口号、数据库名、用户名、密码
self.user = params['database_conf']['user']
self.password = params['database_conf']['password']
self.host = params['database_conf']['host']
self.port = params['database_conf']['port']
# 设置charset为中文,有时候读取到的数据是乱码
self.charset = params['database_conf']['charset']
# 连接的数据库
self.db_name = params['database_conf']['db_name']
# 连接的表
self.table_name = params['database_conf']['table_name']
def getUser(self):
return self.user
def getPassWord(self):
return self.password
def getHost(self):
return self.host
def getPort(self):
return self.port
def getCharset(self):
return self.charset
def getDBName(self):
return self.db_name
def getTableName(self):
return self.table_name
db_user = GetDBYaml().getUser()
db_password = GetDBYaml().getPassWord()
db_host = GetDBYaml().getHost()
db_port = GetDBYaml().getPort()
db_charset = GetDBYaml().getCharset()
db_name = GetDBYaml().getDBName()
db_table_name = GetDBYaml().getTableName()
if __name__ == "__main__":
# 对上面代码进行测试
from conf.dbConf import db_user
print(db_user)
print(db_password)
print(db_host)
print(db_port)
print(db_charset)
print(db_name)
print(db_table_name)
# 导入日志系统测试
logger.info('从其他地方导入打印日志看下')
| 22.483516
| 63
| 0.608016
|
2299cac4f6045243c2c1826391629cbc79c8873a
| 180
|
py
|
Python
|
statgetter/urls.py
|
dragonfi/git-repo-stats
|
8a662dfdd14c6e0b1a58f5908eadf47ecfbb1c9c
|
[
"MIT"
] | null | null | null |
statgetter/urls.py
|
dragonfi/git-repo-stats
|
8a662dfdd14c6e0b1a58f5908eadf47ecfbb1c9c
|
[
"MIT"
] | null | null | null |
statgetter/urls.py
|
dragonfi/git-repo-stats
|
8a662dfdd14c6e0b1a58f5908eadf47ecfbb1c9c
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^stats/(?P<repo_url>.*)', views.stats, name='stats'),
url(r'^$', views.index, name='index'),
]
| 20
| 63
| 0.627778
|
b542078a0632d81ab803d71d78497afd10c48d4e
| 2,212
|
py
|
Python
|
astroquery/alfalfa/tests/test_alfalfa.py
|
eteq/astroquery
|
70db53f8f047a2ee3481fd3242e6b364bc1ca639
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T00:07:01.000Z
|
2021-03-20T00:07:01.000Z
|
astroquery/alfalfa/tests/test_alfalfa.py
|
eteq/astroquery
|
70db53f8f047a2ee3481fd3242e6b364bc1ca639
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/alfalfa/tests/test_alfalfa.py
|
eteq/astroquery
|
70db53f8f047a2ee3481fd3242e6b364bc1ca639
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T00:07:05.000Z
|
2021-03-20T00:07:05.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from contextlib import contextmanager
import os
import requests
from astropy import coordinates
from astropy.tests.helper import pytest
from ...utils import commons
from ...utils.testing_tools import MockResponse
from ... import alfalfa
DATA_FILES = {'catalog':'alfalfa_cat_small.txt',
'spectrum':'alfalfa_sp.fits'}
class MockResponseAlfalfa(MockResponse):
def __init__(self, content, **kwargs):
super(MockResponseAlfalfa, self).__init__(content, **kwargs)
def iter_lines(self):
for l in self.text.split("\n"):
yield l
def close(self):
pass
@pytest.fixture
def patch_get(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests, 'get', get_mockreturn)
return mp
@pytest.fixture
def patch_get_readable_fileobj(request):
@contextmanager
def get_readable_fileobj_mockreturn(filename, **kwargs):
file_obj = data_path(DATA_FILES['spectrum']) # TODO: add images option
yield open(file_obj, 'rb') # read as bytes, assuming FITS
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(commons, 'get_readable_fileobj', get_readable_fileobj_mockreturn)
return mp
def get_mockreturn(url, params=None, timeout=10):
filename = data_path(DATA_FILES['catalog'])
content = open(filename, 'rb').read()
return MockResponseAlfalfa(content)
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
# Test Case: A Seyfert 1 galaxy
coords = coordinates.SkyCoord('0h8m05.63s +14d50m23.3s')
ALFALFA = alfalfa.core.Alfalfa()
def test_alfalfa_catalog(patch_get, patch_get_readable_fileobj, coords=coords):
cat = ALFALFA.get_catalog()
assert len(cat) > 0
def test_alfalfa_crossID(patch_get, patch_get_readable_fileobj, coords=coords):
agc = ALFALFA.query_region(coords, optical_counterpart=True)
assert agc == 100051
def test_alfalfa_spectrum(patch_get, patch_get_readable_fileobj, coords=coords):
agc = ALFALFA.query_region(coords, optical_counterpart=True)
sp = ALFALFA.get_spectrum(agc)
assert len(sp) == 3
| 29.105263
| 80
| 0.731465
|
2db1944a2456c3da59cfd5304ee55305a67ec93d
| 856
|
py
|
Python
|
tests/test_boolean_operations.py
|
latera/hyaml
|
4dc020434d25c182f8477ddd5582398e51501274
|
[
"Apache-2.0"
] | 3
|
2020-04-12T15:55:11.000Z
|
2021-08-02T16:26:21.000Z
|
tests/test_boolean_operations.py
|
latera/hyaml
|
4dc020434d25c182f8477ddd5582398e51501274
|
[
"Apache-2.0"
] | null | null | null |
tests/test_boolean_operations.py
|
latera/hyaml
|
4dc020434d25c182f8477ddd5582398e51501274
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from unittest import main
from tests import TranslationCase as TestCase
class TestBooleanOperations(TestCase):
def test_conjuction(self):
self.assertTranslated("true and false", "True and False")
def test_disjunction(self):
self.assertTranslated("true or false", "True or False")
def test_negation(self):
self.assertTranslated("not true", "not True")
def test_complex_expression(self):
self.assertTranslated(
"true or false and not true", "True or False and not True"
)
def test_parens(self):
self.assertTranslated(
"(true and false) or (false and true)",
"(True and False) or (False and True)",
)
self.assertTranslated(
"true and not (false or true)", "True and not (False or True)"
)
| 27.612903
| 74
| 0.630841
|
0713d2f38806d52533d9a6261d83edf814e19d88
| 8,090
|
py
|
Python
|
tests/test_ftry.py
|
bryanwb/smonad
|
ada67fe29b4d5cfa5c7d6b8b7fe6ebe6811fac6b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_ftry.py
|
bryanwb/smonad
|
ada67fe29b4d5cfa5c7d6b8b7fe6ebe6811fac6b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_ftry.py
|
bryanwb/smonad
|
ada67fe29b4d5cfa5c7d6b8b7fe6ebe6811fac6b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014, Philip Xu <pyx@xrefactor.com>
# License: BSD New, see LICENSE for details.
import pytest
from smonad.actions import ftry
from smonad.decorators import failsafe
from smonad.exceptions import ExtractError
from smonad.types import Try, Failure, Success
test_range = range(-100, 100)
unit = Try.unit
error = Failure('Error')
def add_1(n):
if isinstance(n, int):
return unit(n + 1)
else:
return error
def double(n):
if isinstance(n, int):
return unit(n * 2)
else:
return error
def fail(n):
return error
def test_local_helper_function_add_one():
for n in test_range:
assert add_1(n) == unit(n + 1)
assert add_1('1') is error
def test_local_helper_function_double():
for n in test_range:
assert double(n) == unit(n * 2)
assert double('1') is error
def test_local_helper_function_fail():
for n in test_range:
assert fail(n) is error
def test_fmap_functor_laws():
identity = lambda a: a
f = lambda a: a + 1
g = lambda a: a * 2
f_g = lambda n: f(g(n))
for n in test_range:
ft = unit(n)
# fmap id == id
assert ft.fmap(identity) == identity(ft)
# fmap (f . g) == fmap f . fmap g
assert ft.fmap(f_g) == ft.fmap(g).fmap(f)
value = 42
l = Failure('Something wrong.')
r = Success(value)
assert l.fmap(f) is l
assert r.fmap(f) == Success(f(42))
def test_unit():
assert type(unit(42)) is Success
def test_ftry_is_abstract():
with pytest.raises(NotImplementedError):
Try(42)
def test_compare():
for n in test_range:
assert Failure(n) == Failure(n)
assert Success(n) == Success(n)
assert Failure(n) != Success(n)
def test_ordering():
with pytest.raises(TypeError):
Failure(1) < 1
with pytest.raises(TypeError):
Success(1) < 1
for n in test_range:
assert (Failure(n) < Failure(n)) is False
assert Failure(n) > Failure(n - 1)
assert Failure(n) < Failure(n + 1)
assert (Success(n) < Success(n)) is False
assert Success(n) > Success(n - 1)
assert Success(n) < Success(n + 1)
assert Failure(n) < Success(n)
def test_as_context_manager():
for n in test_range:
with pytest.raises(ExtractError):
with unit(n) >> double >> fail >> double as result:
assert False
assert result
with pytest.raises(ExtractError):
with error as n:
assert False
with pytest.raises(ExtractError):
with double(n) as result:
with error as n:
assert False
with pytest.raises(ExtractError):
with double(n) as result, error as n:
assert False
def test_bool():
assert bool(Failure(True)) is False
assert bool(Success(False)) is True
for n in test_range:
assert bool(Failure(n)) is False
assert bool(Success(n)) is True
assert bool(unit(n)) is True
def test_bind():
assert error.bind(add_1) is error
for n in test_range:
m = unit(n)
assert m.bind(fail) is error
def test_bind_operator():
for n in test_range:
m = unit(n)
assert m >> fail is error
assert fail(n) >> add_1 is error
def test_reversed_bind_operator():
for n in test_range:
m = unit(n)
assert fail << m is error
assert add_1 << fail(n) is error
def test_chain_bind_operator():
for n in test_range:
m = unit(n)
assert m >> fail >> add_1 == error
assert m >> add_1 >> fail == error
assert m >> fail >> double == error
assert m >> double >> fail == error
def test_monad_law_left_identity():
for n in test_range:
# unit n >>= f == f n
f = fail
assert unit(n) >> f == f(n)
def test_monad_law_right_identity():
for n in test_range:
# m >>= unit == m
assert error >> unit == error
def test_monad_law_associativity():
for n in test_range:
# m >>= (\x -> k x >>= h) == (m >>= k) >>= h
m = unit(n)
k = add_1
h = fail
assert m >> (lambda x: k(x) >> h) == (m >> k) >> h
k = fail
h = double
assert m >> (lambda x: k(x) >> h) == (m >> k) >> h
k = fail
h = fail
assert m >> (lambda x: k(x) >> h) == (m >> k) >> h
def test_ftry_action():
inc = lambda n: n + 1
dec = lambda n: n - 1
act = ftry(inc)
assert act(Failure(1)) == 2
assert act(Success(1)) == 1
act = ftry(failure_handler=inc, success_handler=dec)
assert act(Failure(1)) == 2
assert act(Success(1)) == 0
def test_ftry_action_with_incompatible_type():
inc = lambda n: n + 1
act = ftry(inc)
assert act(Failure(1)) == 2
with pytest.raises(TypeError):
act(1)
def test_failsafe_decorator():
@failsafe
def div(a, b):
return a / b
assert div(42, 21) == unit(2)
assert isinstance(div(42, 0), Failure)
def test_failsafe_decorator_catch_extract_error():
@failsafe(failure_on_exception=None)
def wrong():
with fail(1) as result:
assert result is False # should not reach here
assert wrong() == error
@failsafe(failure_on_exception=None)
def wrong():
raise ExtractError('not a left')
assert isinstance(wrong(), Failure)
def test_failsafe_decorator_with_predicate():
@failsafe(predicate=bool)
def truth(x):
return x
assert truth(42) == unit(42)
assert truth(None) == Failure(None)
assert add_1(0) >> truth == unit(1)
assert add_1(-1) >> truth == Failure(0)
assert truth(False) >> double == Failure(False)
assert double([]) >> truth == error
def test_failsafe_decorator_with_value():
@failsafe(failure_on_value=None)
def truth(x):
return x
assert truth(42) == unit(42)
assert truth('') == unit('')
assert truth(0) == unit(0)
assert truth(False) == unit(False)
assert truth(None) == Failure(None)
def test_failsafe_decorator_combined():
@failsafe(predicate=bool, failure_on_value=42)
def wrap(x):
return x
assert wrap(True) == Success(True)
assert wrap(False) == Failure(False)
assert wrap('something') == Success('something')
assert wrap('') == Failure('')
assert wrap([False]) == Success([False])
assert wrap([]) == Failure([])
assert wrap(1) == Success(1)
assert wrap(0) == Failure(0)
assert wrap(None) == Failure(None)
assert wrap(42) == Failure(42)
def test_failsafe_decorator_none_exception():
@failsafe(failure_on_exception=None)
def div(a, b):
return a / b
with pytest.raises(ZeroDivisionError):
div(42, 0)
def test_failsafe_decorator_empty_seq_exception():
for empty in ([], tuple(), set()):
@failsafe(failure_on_exception=empty)
def div(a, b):
return a / b
with pytest.raises(ZeroDivisionError):
div(42, 0)
def test_failsafe_decorator_specific_exception():
@failsafe(failure_on_exception=ZeroDivisionError)
def div(a, b):
return a / b
assert isinstance(div(42, 0), Failure)
def test_failsafe_decorator_specific_exception_tuple():
@failsafe(failure_on_exception=(IOError, ZeroDivisionError))
def div(a, b):
if a < 0:
raise IOError
return a / b
assert isinstance(div(42, 0), Failure)
assert isinstance(div(-42, 2), Failure)
def test_match_failure_only():
assert Failure(0).match(lambda v: v + 1) == 1
assert Success(10).match(lambda v: v + 1) == 10
def test_match_failure_and_success():
assert Failure(0).match(lambda v: v + 1, lambda v: v / 2) == 1
assert Success(10).match(lambda v: v + 1, lambda v: v / 2) == 5
def test_recover():
result = Failure(0).recover(lambda v: v + 1)
assert result == 1
result = Success(10).recover(lambda v: v + 1)
assert isinstance(result, Success)
assert result.value == 10
| 23.864307
| 67
| 0.598517
|
70d0cd34f3341c11ec868aefc8033929bb2428ea
| 1,690
|
py
|
Python
|
pcdet/datasets/nuscenes/test.py
|
charlesyz/PCDet
|
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
|
[
"Apache-2.0"
] | null | null | null |
pcdet/datasets/nuscenes/test.py
|
charlesyz/PCDet
|
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
|
[
"Apache-2.0"
] | null | null | null |
pcdet/datasets/nuscenes/test.py
|
charlesyz/PCDet
|
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
|
[
"Apache-2.0"
] | null | null | null |
import os
import yaml
from pcdet.datasets.nuscenes import nuscenes_calibration
import numpy as np
calib = cadc_calibration.Calibration('/home/cyz/cadc_tracker/PCDet/data/cadcd/2018_03_06/calib')
print(calib.lidar_to_rect(np.array([[-17.99813181674196, 32.52045143245261, -0.9221325934825185],
[33.96512040277093, 134.84462147046216,-1.348917510897253],
[6.39555783362782, 61.08589524957631, -1.5948109728734963],
[6.319156313564423, 53.583858332617396,-1.5548908689921164],
[-34.18142897685132, 24.84890557590814, -0.7616092760460385]])))
print(calib.rect_to_lidar(
calib.lidar_to_rect(np.array([[-17.99813181674196, 32.52045143245261, -0.9221325934825185],
[33.96512040277093, 134.84462147046216,-1.348917510897253],
[6.39555783362782, 61.08589524957631, -1.5948109728734963],
[6.319156313564423, 53.583858332617396,-1.5548908689921164],
[-34.18142897685132, 24.84890557590814, -0.7616092760460385]]))))
print(calib.lidar_to_img(np.array([[-17.99813181674196, 32.52045143245261, -0.9221325934825185],
[33.96512040277093, 134.84462147046216,-1.348917510897253],
[6.39555783362782, 61.08589524957631, -1.5948109728734963],
[6.319156313564423, 53.583858332617396,-1.5548908689921164],
[-34.18142897685132, 24.84890557590814, -0.7616092760460385]])))
| 73.478261
| 101
| 0.595266
|
b55834051108e3be652ff6930d1804f16bdd9c33
| 813
|
py
|
Python
|
src/webserver/backend/app/app/backend_pre_start.py
|
badarsebard/OpenSOAR
|
f0169261ef6d1ce4528236708e53d82de894ec56
|
[
"Apache-2.0"
] | 1
|
2022-01-12T02:00:22.000Z
|
2022-01-12T02:00:22.000Z
|
src/webserver/backend/app/app/backend_pre_start.py
|
badarsebard/OpenSOAR
|
f0169261ef6d1ce4528236708e53d82de894ec56
|
[
"Apache-2.0"
] | null | null | null |
src/webserver/backend/app/app/backend_pre_start.py
|
badarsebard/OpenSOAR
|
f0169261ef6d1ce4528236708e53d82de894ec56
|
[
"Apache-2.0"
] | 1
|
2022-01-19T20:56:14.000Z
|
2022-01-19T20:56:14.000Z
|
import logging
from tenacity import after_log, before_log, retry, stop_after_attempt, wait_fixed
from app.database import SessionLocal
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
max_tries = 60 * 5 # 5 minutes
wait_seconds = 1
@retry(
stop=stop_after_attempt(max_tries),
wait=wait_fixed(wait_seconds),
before=before_log(logger, logging.INFO),
after=after_log(logger, logging.WARN),
)
def init() -> None:
try:
db = SessionLocal()
# Try to create session to check if DB is awake
db.execute("SELECT 1")
except Exception as e:
logger.error(e)
raise e
def main() -> None:
logger.info("Initializing service")
init()
logger.info("Service finished initializing")
if __name__ == "__main__":
main()
| 21.394737
| 81
| 0.686347
|
5f851c993a303cbf5eb915fbc608e2db6f98c374
| 6,666
|
py
|
Python
|
grapheme/api.py
|
rlaneyjr/grapheme
|
4e7816c4ec41cd3032b6b9faa44addedf3071b30
|
[
"MIT"
] | 78
|
2017-08-06T23:44:55.000Z
|
2022-03-19T01:20:25.000Z
|
grapheme/api.py
|
rlaneyjr/grapheme
|
4e7816c4ec41cd3032b6b9faa44addedf3071b30
|
[
"MIT"
] | 17
|
2017-07-24T08:39:45.000Z
|
2022-03-19T01:13:54.000Z
|
grapheme/api.py
|
rlaneyjr/grapheme
|
4e7816c4ec41cd3032b6b9faa44addedf3071b30
|
[
"MIT"
] | 8
|
2017-08-06T12:31:55.000Z
|
2022-03-17T22:32:44.000Z
|
# -*- coding: utf-8 -*-
from grapheme.finder import GraphemeIterator, get_last_certain_break_index
UNICODE_VERSION = "13.0.0"
def graphemes(string):
"""
Returns an iterator of all graphemes of given string.
>>> rainbow_flag = "🏳️🌈"
>>> [codepoint for codepoint in rainbow_flag]
['🏳', '️', '\u200d', '🌈']
>>> list(grapheme.graphemes("multi codepoint grapheme: " + rainbow_flag))
['m', 'u', 'l', 't', 'i', ' ', 'c', 'o', 'd', 'e', 'p', 'o', 'i', 'n', 't', ' ', 'g', 'r', 'a', 'p', 'h', 'e', 'm', 'e', ':', ' ', '🏳️🌈']
"""
return iter(GraphemeIterator(string))
def length(string, until=None):
"""
Returns the number of graphemes in the string.
Note that this functions needs to traverse the full string to calculate the length,
unlike `len(string)` and it's time consumption is linear to the length of the string
(up to the `until` value).
Only counts up to the `until` argument, if given. This is useful when testing
the length of a string against some limit and the excess length is not interesting.
>>> rainbow_flag = "🏳️🌈"
>>> len(rainbow_flag)
4
>>> graphemes.length(rainbow_flag)
1
>>> graphemes.length("".join(str(i) for i in range(100)), 30)
30
"""
if until is None:
return sum(1 for _ in GraphemeIterator(string))
iterator = graphemes(string)
count = 0
while True:
try:
if count >= until:
break
next(iterator)
except StopIteration:
break
else:
count += 1
return count
# todo: should probably use an optimized iterator that only deals with code point counts (optimization)
def grapheme_lengths(string):
"""
Returns an iterator of number of code points in each grapheme of the string.
"""
return iter(len(g) for g in graphemes(string))
def slice(string, start=None, end=None):
"""
Returns a substring of the given string, counting graphemes instead of codepoints.
Negative indices is currently not supported.
>>> string = "tamil நி (ni)"
>>> string[:7]
'tamil ந'
>>> grapheme.slice(string, end=7)
'tamil நி'
>>> string[7:]
'ி (ni)'
>>> grapheme.slice(string, 7)
' (ni)'
"""
if start is None:
start = 0
if end is not None and start >= end:
return ""
if start < 0:
raise NotImplementedError("Negative indexing is currently not supported.")
sum_ = 0
start_index = None
for grapheme_index, grapheme_length in enumerate(grapheme_lengths(string)):
if grapheme_index == start:
start_index = sum_
elif grapheme_index == end:
return string[start_index:sum_]
sum_ += grapheme_length
if start_index is not None:
return string[start_index:]
return ""
def contains(string, substring):
"""
Returns true if the sequence of graphemes in substring is also present in string.
This differs from the normal python `in` operator, since the python operator will return
true if the sequence of codepoints are withing the other string without considering
grapheme boundaries.
Performance notes: Very fast if `substring not in string`, since that also means that
the same graphemes can not be in the two strings. Otherwise this function has linear time
complexity in relation to the string length. It will traverse the sequence of graphemes until
a match is found, so it will generally perform better for grapheme sequences that match early.
>>> "🇸🇪" in "🇪🇸🇪🇪"
True
>>> grapheme.contains("🇪🇸🇪🇪", "🇸🇪")
False
"""
if substring not in string:
return False
substr_graphemes = list(graphemes(substring))
if len(substr_graphemes) == 0:
return True
elif len(substr_graphemes) == 1:
return substr_graphemes[0] in graphemes(string)
else:
str_iter = graphemes(string)
str_sub_part = []
for _ in range(len(substr_graphemes)):
try:
str_sub_part.append(next(str_iter))
except StopIteration:
return False
for g in str_iter:
if str_sub_part == substr_graphemes:
return True
str_sub_part.append(g)
str_sub_part.pop(0)
return str_sub_part == substr_graphemes
def startswith(string, prefix):
"""
Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes.
str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster
is continued after the prefix ends.
>>> grapheme.startswith("✊🏾", "✊")
False
>>> "✊🏾".startswith("✊")
True
"""
return string.startswith(prefix) and safe_split_index(string, len(prefix)) == len(prefix)
def endswith(string, suffix):
"""
Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes.
str.endswith may return true for a suffix that is not visually represented as a suffix if a grapheme cluster
is initiated before the suffix starts.
>>> grapheme.endswith("🏳️🌈", "🌈")
False
>>> "🏳️🌈".endswith("🌈")
True
"""
expected_index = len(string) - len(suffix)
return string.endswith(suffix) and safe_split_index(string, expected_index) == expected_index
def safe_split_index(string, max_len):
"""
Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme.
This is useful for when you want to split or take a substring from a string, and don't really care about
the exact grapheme length, but don't want to risk breaking existing graphemes.
This function does normally not traverse the full grapheme sequence up to the given length, so it can be used
for arbitrarily long strings and high `max_len`s. However, some grapheme boundaries depend on the previous state,
so the worst case performance is O(n). In practice, it's only very long non-broken sequences of country flags
(represented as Regional Indicators) that will perform badly.
The return value will always be between `0` and `len(string)`.
>>> string = "tamil நி (ni)"
>>> i = grapheme.safe_split_index(string, 7)
>>> i
6
>>> string[:i]
'tamil '
>>> string[i:]
'நி (ni)'
"""
last_index = get_last_certain_break_index(string, max_len)
for l in grapheme_lengths(string[last_index:]):
if last_index + l > max_len:
break
last_index += l
return last_index
| 31.742857
| 142
| 0.640564
|
06982205f4581e67a9e3374984a2ec68d015a05e
| 26,293
|
py
|
Python
|
py/selenium/webdriver/remote/webelement.py
|
Sindhura8/selenium
|
2d47c4283de2900abc598f608d273bcb04f4c8c1
|
[
"Apache-2.0"
] | 1
|
2021-01-06T07:16:19.000Z
|
2021-01-06T07:16:19.000Z
|
py/selenium/webdriver/remote/webelement.py
|
Sindhura8/selenium
|
2d47c4283de2900abc598f608d273bcb04f4c8c1
|
[
"Apache-2.0"
] | null | null | null |
py/selenium/webdriver/remote/webelement.py
|
Sindhura8/selenium
|
2d47c4283de2900abc598f608d273bcb04f4c8c1
|
[
"Apache-2.0"
] | 2
|
2020-09-14T07:53:33.000Z
|
2020-09-15T06:42:18.000Z
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from base64 import b64decode
from hashlib import md5 as md5_hash
import pkgutil
import warnings
import zipfile
from abc import ABCMeta
from io import BytesIO
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.utils import keys_to_typing
from .command import Command
from six import add_metaclass
# Python 3 imports
try:
str = basestring
except NameError:
pass
try:
from base64 import encodebytes
except ImportError: # Python 2
from base64 import encodestring as encodebytes
# TODO: when dropping Python 2.7, use built in importlib_resources.files
# not relying on __package__ here as it can be `None` in some situations (see #4558)
_pkg = '.'.join(__name__.split('.')[:-1])
getAttribute_js = pkgutil.get_data(_pkg, 'getAttribute.js').decode('utf8')
isDisplayed_js = pkgutil.get_data(_pkg, 'isDisplayed.js').decode('utf8')
@add_metaclass(ABCMeta)
class BaseWebElement(object):
"""
Abstract Base Class for WebElement.
ABC's will allow custom types to be registered as a WebElement to pass type checks.
"""
pass
class WebElement(BaseWebElement):
"""Represents a DOM element.
Generally, all interesting operations that interact with a document will be
performed through this interface.
All method calls will do a freshness check to ensure that the element
reference is still valid. This essentially determines whether or not the
element is still attached to the DOM. If this test fails, then an
``StaleElementReferenceException`` is thrown, and all future calls to this
instance will fail."""
def __init__(self, parent, id_, w3c=False):
self._parent = parent
self._id = id_
self._w3c = w3c
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}", element="{2}")>'.format(
type(self), self._parent.session_id, self._id)
@property
def tag_name(self):
"""This element's ``tagName`` property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""The text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
if self._w3c:
form = self.find_element(By.XPATH, "./ancestor-or-self::form")
self._parent.execute_script(
"var e = arguments[0].ownerDocument.createEvent('Event');"
"e.initEvent('submit', true, true);"
"if (arguments[0].dispatchEvent(e)) { arguments[0].submit() }", form)
else:
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_property(self, name):
"""
Gets the given property of the element.
:Args:
- name - Name of the property to retrieve.
:Usage:
::
text_length = target_element.get_property("text_length")
"""
try:
return self._execute(Command.GET_ELEMENT_PROPERTY, {"name": name})["value"]
except WebDriverException:
# if we hit an end point that doesnt understand getElementProperty lets fake it
return self.parent.execute_script('return arguments[0][arguments[1]]', self, name)
def get_attribute(self, name):
"""Gets the given attribute or property of the element.
This method will first try to return the value of a property with the
given name. If a property with that name doesn't exist, it returns the
value of the attribute with the same name. If there's no attribute with
that name, ``None`` is returned.
Values which are considered truthy, that is equals "true" or "false",
are returned as booleans. All other non-``None`` values are returned
as strings. For attributes or properties which do not exist, ``None``
is returned.
:Args:
- name - Name of the attribute/property to retrieve.
Example::
# Check if the "active" CSS class is applied to an element.
is_active = "active" in target_element.get_attribute("class")
"""
attribute_value = ''
if self._w3c:
attribute_value = self.parent.execute_script(
"return (%s).apply(null, arguments);" % getAttribute_js,
self, name)
else:
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attribute_value = resp.get('value')
if attribute_value:
if name != 'value' and attribute_value.lower() in ('true', 'false'):
attribute_value = attribute_value.lower()
return attribute_value
def is_selected(self):
"""Returns whether the element is selected.
Can be used to check if a checkbox or radio button is selected.
"""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Returns whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element within this element's children by ID.
:Args:
- id\\_ - ID of child element to locate.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
foo_element = element.find_element_by_id('foo')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""Finds a list of elements within this element's children by ID.
Will return a list of webelements if found, or an empty list if not.
:Args:
- id\\_ - Id of child element to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = element.find_elements_by_id('foo')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Finds element within this element's children by name.
:Args:
- name - name property of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_name('foo')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""Finds a list of elements within this element's children by name.
:Args:
- name - name property to search for.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = element.find_elements_by_name('foo')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_link_text('Sign In')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
"""Finds a list of elements within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = element.find_elements_by_link_text('Sign In')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds element within this element's children by partially visible link text.
:Args:
- link_text: The text of the element to partially match on.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_partial_link_text('Sign')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds a list of elements within this element's children by link text.
:Args:
- link_text: The text of the element to partial match on.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = element.find_elements_by_partial_link_text('Sign')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
"""Finds element within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_tag_name('h1')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""Finds a list of elements within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = element.find_elements_by_tag_name('h1')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath.
:Args:
- xpath - xpath of element to locate. "//input[@class='myelement']"
Note: The base path will be relative to this element's location.
This will select the first link under this element.
::
myelement.find_element_by_xpath(".//a")
However, this will select the first link on the page.
::
myelement.find_element_by_xpath("//a")
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_xpath('//div/td[1]')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the element by xpath.
:Args:
- xpath - xpath locator string.
Note: The base path will be relative to this element's location.
This will select all links under this element.
::
myelement.find_elements_by_xpath(".//a")
However, this will select all links in the page itself.
::
myelement.find_elements_by_xpath("//a")
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = element.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds element within this element's children by class name.
:Args:
- name: The class name of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_class_name('foo')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds a list of elements within this element's children by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = element.find_elements_by_class_name('foo')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Finds element within this element's children by CSS selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = element.find_element_by_css_selector('#foo')
"""
warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead")
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Finds a list of elements within this element's children by CSS selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = element.find_elements_by_css_selector('.foo')
"""
warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead")
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element.
:Args:
- value - A string for typing, or setting form fields. For setting
file inputs, this could be a local file path.
Use this to send simple key events or to fill out form fields::
form_textfield = driver.find_element(By.NAME, 'username')
form_textfield.send_keys("admin")
This can also be used to set file inputs.
::
file_input = driver.find_element(By.NAME, 'profilePic')
file_input.send_keys("path/to/profilepic.gif")
# Generally it's better to wrap the file path in one of the methods
# in os.path to return the actual path to support cross OS testing.
# file_input.send_keys(os.path.abspath("path/to/profilepic.gif"))
"""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_files = list(map(lambda keys_to_send:
self.parent.file_detector.is_local_file(keys_to_send),
''.join(value).split('\n')))
if None not in local_files:
remote_files = []
for file in local_files:
remote_files.append(self._upload(file))
value = '\n'.join(remote_files)
self._execute(Command.SEND_KEYS_TO_ELEMENT,
{'text': "".join(keys_to_typing(value)),
'value': keys_to_typing(value)})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element is visible to a user."""
# Only go into this conditional for browsers that don't use the atom themselves
if self._w3c:
return self.parent.execute_script(
"return (%s).apply(null, arguments);" % isDisplayed_js,
self)
else:
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
"""
if self._w3c:
old_loc = self._execute(Command.W3C_EXECUTE_SCRIPT, {
'script': "arguments[0].scrollIntoView(true); return arguments[0].getBoundingClientRect()",
'args': [self]})['value']
return {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
else:
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
"""The size of the element."""
size = {}
if self._w3c:
size = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {"height": size["height"],
"width": size["width"]}
return new_size
def value_of_css_property(self, property_name):
"""The value of a CSS property."""
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY, {
'propertyName': property_name})['value']
@property
def location(self):
"""The location of the element in the renderable canvas."""
if self._w3c:
old_loc = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
return new_loc
@property
def rect(self):
"""A dictionary with the size and location of the element."""
if self._w3c:
return self._execute(Command.GET_ELEMENT_RECT)['value']
else:
rect = self.size.copy()
rect.update(self.location)
return rect
@property
def screenshot_as_base64(self):
"""
Gets the screenshot of the current element as a base64 encoded string.
:Usage:
::
img_b64 = element.screenshot_as_base64
"""
return self._execute(Command.ELEMENT_SCREENSHOT)['value']
@property
def screenshot_as_png(self):
"""
Gets the screenshot of the current element as a binary data.
:Usage:
::
element_png = element.screenshot_as_png
"""
return b64decode(self.screenshot_as_base64.encode('ascii'))
def screenshot(self, filename):
"""
Saves a screenshot of the current element to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
::
element.screenshot('/Screenshots/foo.png')
"""
if not filename.lower().endswith('.png'):
warnings.warn("name used for saved screenshot does not match file "
"type. It should end with a `.png` extension", UserWarning)
png = self.screenshot_as_png
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
@property
def parent(self):
"""Internal reference to the WebDriver instance this element was found from."""
return self._parent
@property
def id(self):
"""Internal ID used by selenium.
This is mainly for internal use. Simple use cases such as checking if 2
webelements refer to the same element, can be done using ``==``::
if element1 == element2:
print("These 2 are equal")
"""
return self._id
def __eq__(self, element):
return hasattr(element, 'id') and self._id == element.id
def __ne__(self, element):
return not self.__eq__(element)
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
"""
Find an element given a By strategy and locator.
:Usage:
::
element = element.find_element(By.ID, 'foo')
:rtype: WebElement
"""
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
"""
Find elements given a By strategy and locator.
:Usage:
::
element = element.find_elements(By.CLASS_NAME, 'foo')
:rtype: list of WebElement
"""
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def __hash__(self):
return int(md5_hash(self._id.encode('utf-8')).hexdigest(), 16)
def _upload(self, filename):
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
content = encodebytes(fp.getvalue())
if not isinstance(content, str):
content = content.decode('utf-8')
try:
return self._execute(Command.UPLOAD_FILE, {'file': content})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
| 33.451654
| 107
| 0.604534
|
12e71de0961a847145dbc4fb2127edc8b86d8b7d
| 3,437
|
py
|
Python
|
controllers/customer_book_controller.py
|
allen-garvey/gae-library
|
e66210f2345c92c09e46b9d402a9e1c26bb46539
|
[
"MIT"
] | null | null | null |
controllers/customer_book_controller.py
|
allen-garvey/gae-library
|
e66210f2345c92c09e46b9d402a9e1c26bb46539
|
[
"MIT"
] | null | null | null |
controllers/customer_book_controller.py
|
allen-garvey/gae-library
|
e66210f2345c92c09e46b9d402a9e1c26bb46539
|
[
"MIT"
] | null | null | null |
from google.appengine.ext import ndb
import webapp2
import json
from controllers.base_controller import BaseController
from models.book import Book
from models.customer import Customer
# functionality getting all of a customer's checked out books
# adding a book to a customer (checking it out) and removing a book
# from a customer (returning the book)
class CustomerBookController(BaseController):
#returns an array of all books for a customer
#or data about a single checked out book
def get(self, customer_id, book_id=None):
#if customer not found will cause an error
#or if key of non-customer object (such as a book) will also cause error
try:
customer = ndb.Key(urlsafe=customer_id).get()
#get a single book
if book_id:
book = ndb.Key(urlsafe=book_id).get()
#check to make sure customer checked out this book
if book.key not in customer.checked_out:
#not found
self.response.set_status(404)
return
response_data = book.to_json()
#get all of customer's checked out books
else:
books = map(lambda book_key: book_key.get(), customer.checked_out)
response_data = Book.all_to_json(books)
except:
#error on customer not found, or book_id was invalid
self.response.set_status(404)
return
self.write_json(response_data)
#checkout a book to a customer
def put(self, customer_id, book_id):
#if customer or not found will cause an error
#or if key of non-customer object or non-book object will also cause error
try:
customer = ndb.Key(urlsafe=customer_id).get()
book = ndb.Key(urlsafe=book_id).get()
#check if book is already checked out
if book.checkedIn == False:
#bad request
self.response.set_status(400)
return
#set book to checked out and save
book.checkedIn = False
book.put()
#add book to customer's checked out books and save
customer.checked_out.append(book.key)
customer.put()
except:
#error on customer not found
self.response.set_status(404)
return
#HTTP created
self.response.set_status(201)
#checkin a book from a customer
def delete(self, customer_id, book_id):
#if customer or not found will cause an error
#or if key of non-customer object or non-book object will also cause error
try:
customer = ndb.Key(urlsafe=customer_id).get()
book = ndb.Key(urlsafe=book_id).get()
#see if customer has checked out this book
if book.key not in customer.checked_out:
#bad request
self.response.set_status(400)
return
#set book to checked in and save
book.checkedIn = True
book.put()
#remove book from customer's checked out books and save
customer.checked_out.remove(book.key)
customer.put()
except:
#error on customer not found
self.response.set_status(404)
return
#HTTP ok
self.response.set_status(200)
| 38.188889
| 82
| 0.598487
|
bd44f58317f4a14c9585d9d918f1d45c1a91f470
| 4,332
|
py
|
Python
|
edith/app/admin/forms.py
|
BAM-PFA/edith
|
a9bca397a7878e76fd6ac148aa122f410751b32c
|
[
"BSD-2-Clause"
] | 8
|
2018-10-18T19:14:55.000Z
|
2020-07-29T08:10:46.000Z
|
edith/app/admin/forms.py
|
BAM-PFA/edith
|
a9bca397a7878e76fd6ac148aa122f410751b32c
|
[
"BSD-2-Clause"
] | 27
|
2018-10-06T22:50:06.000Z
|
2019-07-08T20:12:27.000Z
|
edith/app/admin/forms.py
|
BAM-PFA/resourcespace
|
40aeb1f40f9283d2e452e75cb98d41ea951d33a6
|
[
"BSD-2-Clause"
] | 1
|
2018-11-16T18:52:41.000Z
|
2018-11-16T18:52:41.000Z
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, BooleanField, ValidationError
from wtforms.validators import DataRequired, Email, EqualTo
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from ..models import Department, User, Data_Source, Metadata_Field
class DepartmentForm(FlaskForm):
"""
Form for admin to add or edit a department
"""
deptname = StringField('Department name', validators=[DataRequired()])
description = StringField('Description', validators=[DataRequired()])
submit = SubmitField('Submit')
class AddUserForm(FlaskForm):
"""
Form for admin to create users
"""
department_id = QuerySelectField(
query_factory=lambda: Department.query.all(),
get_pk=lambda x: x.id,
get_label="deptname",
allow_blank=True,
blank_text=''
)
email = StringField('email address', validators=[DataRequired(),Email()])
username = StringField('username', validators=[DataRequired()])
first_name = StringField('First name', validators=[DataRequired()])
last_name = StringField('Last name', validators=[DataRequired()])
RSusername = StringField('ResourceSpace username', validators=[DataRequired()])
RSkey = StringField('ResourceSpace API key', validators=[DataRequired()])
is_admin = BooleanField('Admin?')
password = PasswordField('Password', validators=[EqualTo('confirm_password')])
confirm_password = PasswordField('Confirm Password')
submit = SubmitField('Submit')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email is already in use.')
class EditUserForm(FlaskForm):
"""
Form for admin to create or edit users
Had to create a separate form to avoid the validate_email method above,
otherwise editing an existing user will fail.
There's a better way to do it I know but I;m in a hurry. :/
"""
department_id = QuerySelectField(
query_factory=lambda: Department.query.all(),
get_pk=lambda x: x.id,
get_label="deptname",
allow_blank=True,
blank_text=u'Select a department'
)
email = StringField('email address', validators=[DataRequired(),Email()])
username = StringField('username', validators=[DataRequired()])
first_name = StringField('First name', validators=[DataRequired()])
last_name = StringField('Last name', validators=[DataRequired()])
RSusername = StringField('ResourceSpace username', validators=[DataRequired()])
RSkey = StringField('ResourceSpace API key', validators=[DataRequired()])
is_admin = BooleanField('Admin?')
password = PasswordField('Password', validators=[EqualTo('confirm_password')])
confirm_password = PasswordField('Confirm Password')
submit = SubmitField('Submit')
class DataSourceForm(FlaskForm):
"""
Form for admin to create or edit metadata source
"""
dbName = StringField('Database name', validators=[DataRequired()])
fmpLayout = StringField('FileMaker Layout name')
IPaddress = StringField('IP Address', validators=[DataRequired()])
namespace = StringField('Namespace for XML/XPATH queries')
username = StringField("Database user's username", validators=[DataRequired()])
credentials = StringField('Database user password', validators=[DataRequired()])
description = StringField('Database description')
primaryAssetID = StringField('Field name for primary ID of an asset')
secondaryAssetID = StringField('Field name for secondary ID of an asset')
tertiaryAssetID = StringField('Field name for tertiary ID of an asset')
submit = SubmitField('Submit')
class MetadataFieldForm(FlaskForm):
"""
Form for admin to create or edit metadata field
"""
fieldName = StringField('Field display name', validators=[DataRequired()])
fieldUniqueName = StringField('Unique name for system', validators=[DataRequired()])
fieldSourceName = StringField('Name of field in source as applicable')
fieldCategory = StringField(
'Category field belongs to. Please choose '\
'"Event","Communications", "Film Collection", or "General".')
dataSource_id = QuerySelectField(
query_factory=lambda: Data_Source.query.all(),
get_pk=lambda x: x.id,
get_label="dbName",
allow_blank=True,
blank_text=u'Select a data source'
)
rsFieldID = StringField("ResourceSpace reference ID for field",validators=[DataRequired()])
description = StringField('Database description')
submit = SubmitField('Submit')
| 40.485981
| 92
| 0.758079
|
2a1867ca7c93eb14c3b376ba9f3c58e35486439a
| 2,255
|
py
|
Python
|
miners/ethminer.py
|
thokaka92/EthMonitoringLinux
|
dd94eb17bef8e7e702e567a580e1f73d1388d559
|
[
"MIT"
] | 28
|
2017-07-25T05:55:01.000Z
|
2021-09-11T01:47:06.000Z
|
miners/ethminer.py
|
thokaka92/EthMonitoringLinux
|
dd94eb17bef8e7e702e567a580e1f73d1388d559
|
[
"MIT"
] | 7
|
2018-01-15T11:21:02.000Z
|
2019-01-31T04:46:10.000Z
|
miners/ethminer.py
|
thokaka92/EthMonitoringLinux
|
dd94eb17bef8e7e702e567a580e1f73d1388d559
|
[
"MIT"
] | 14
|
2017-10-22T14:59:29.000Z
|
2019-06-18T20:36:08.000Z
|
#!/usr/bin/python
import json
import socket
import sys
from stats import Stats
from pprint import pprint
class Ethminer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.connected = 0
def getData(self):
try:
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = (self.host, self.port)
self.sock.connect(server_address)
self.connected = 1
command = "{\"id\":0,\"jsonrpc\":\"2.0\",\"method\":\"miner_getstat1\"}\n"
# Send data
self.sock.sendall(command)
# receive data
data = self.sock.recv(12000)
# Close socket
self.sock.close()
return data
except socket.error as msg:
print("Socket error: {0}".format(msg))
def getStats(self):
data = Stats()
data.type = 15
try:
summary_data = self.getData()
result = json.loads(summary_data)
summary_response = result["result"]
miner_stats = summary_response[2].split(";")
data.version = summary_response[0]
data.uptime = summary_response[1]
if len(miner_stats) > 0:
data.total_hashrate = miner_stats[0]
data.accepted = miner_stats[1]
data.rejected = miner_stats[2]
dual_stats = summary_response[4].split(";")
if len(dual_stats) > 0:
data.total_dual_hashrate = dual_stats[0]
data.dual_accepted = dual_stats[1]
data.dual_rejected = dual_stats[2]
data.hashrates = summary_response[3].split(';'); # ETH hashrates
# Temps and fan speeds
temp = summary_response[6].split(';')
i = 0
while i < len(temp) - 1:
data.temps.append(temp[i])
data.fan_speeds.append(temp[i + 1])
i += 2
data.online = self.connected
except Exception, e:
print("Parsing error: " + str(e))
return data.toJSON()
| 28.1875
| 86
| 0.54235
|
06c0dc91a94ae0d8e6dd1f871d2f5d95fe390802
| 660
|
py
|
Python
|
partI_basics/Chapter11_testing_your_code/11.1/test_name_fucntion.py
|
hao-beixi/PythonCrashCourse
|
194736bac3c22976d7e3fbdc8ea1f13fd30e9879
|
[
"MIT"
] | null | null | null |
partI_basics/Chapter11_testing_your_code/11.1/test_name_fucntion.py
|
hao-beixi/PythonCrashCourse
|
194736bac3c22976d7e3fbdc8ea1f13fd30e9879
|
[
"MIT"
] | null | null | null |
partI_basics/Chapter11_testing_your_code/11.1/test_name_fucntion.py
|
hao-beixi/PythonCrashCourse
|
194736bac3c22976d7e3fbdc8ea1f13fd30e9879
|
[
"MIT"
] | null | null | null |
import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py'"""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
if __name__ == '__main__':
unittest.main()
| 36.666667
| 77
| 0.683333
|
b11d74836b68e6ba4cb443e77b4ee3fa7d9c9632
| 130,594
|
py
|
Python
|
pandas/io/stata.py
|
tehunter/pandas
|
c57f883e24405fb4ee561ded1612acf4f4f2bdef
|
[
"BSD-3-Clause"
] | 1
|
2022-01-07T12:43:15.000Z
|
2022-01-07T12:43:15.000Z
|
pandas/io/stata.py
|
oetochi/pandas
|
fa3dfdb41f0a75c937e85059a5983da5e5d5aac6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/io/stata.py
|
oetochi/pandas
|
fa3dfdb41f0a75c937e85059a5983da5e5d5aac6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import (
CompressionOptions,
FilePath,
ReadBuffer,
StorageOptions,
WriteBuffer,
)
from pandas.util._decorators import (
Appender,
doc,
)
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_numeric_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
isna,
to_datetime,
to_timedelta,
)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import _IntegerDtype
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.core.shared_docs import _shared_docs
from pandas.io.common import get_handle
if TYPE_CHECKING:
from typing import Literal
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_shared_docs["decompression_options"]}
{_shared_docs["storage_options"]}
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}}) # doctest: +SKIP
>>> df.to_stata('animals.dta') # doctest: +SKIP
Read a Stata dta file:
>>> df = pd.read_stata('animals.dta') # doctest: +SKIP
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8") # doctest: +SKIP
>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP
>>> df.to_stata('filename.dta') # doctest: +SKIP
>>> itr = pd.read_stata('filename.dta', chunksize=10000) # doctest: +SKIP
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass # doctest: +SKIP
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
DataFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_shared_docs["decompression_options"]}
{_shared_docs["storage_options"]}
{_reader_notes}
"""
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Series and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + to_timedelta(days, unit="d")
else:
index = getattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Series:
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, "index", None)
if unit == "d":
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == "ms":
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Series(values, index=index)
else:
raise ValueError("format not understood")
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO(non-nano): If/when pandas supports more than datetime64[ns], this
# should be improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
quarter_month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
first_month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:
"""
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.dtype):
if delta:
time_delta = dates - stata_epoch
d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds
if days or year:
date_index = DatetimeIndex(dates)
d["year"] = date_index._data.year
d["month"] = date_index._data.month
if days:
days_in_ns = dates.view(np.int64) - to_datetime(
d["year"], format="%Y"
).view(np.int64)
d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates._values - stata_epoch
def f(x: datetime.timedelta) -> float:
return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d["year"] = year_month._values // 100
d["month"] = year_month._values - d["year"] * 100
if days:
def g(x: datetime.datetime) -> int:
return (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(f"Format {fmt} is not a known Stata date format")
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '{0}' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from {0} to {1}, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
class CategoricalConversionWarning(Warning):
pass
categorical_conversion_warning = """
One or more series with value labels are not fully labeled. Reading this
dataset with an iterator results in categorical variable with different
categories. This occurs since it is not possible to know all possible values
until the entire dataset has been read. To avoid this warning, you can either
read dataset without an iterator, or manually convert categorical data by
``convert_categoricals`` to False and then accessing the variable labels
through the value_labels method of the reader.
"""
def _cast_to_stata_types(data: DataFrame) -> DataFrame:
"""
Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ""
# original, if small, if large
conversion_data = (
(np.bool_, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64),
(np.uint64, np.int64, np.float64),
)
float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0]
float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0]
for col in data:
# Cast from unsupported types to supported types
is_nullable_int = isinstance(data[col].dtype, (_IntegerDtype, BooleanDtype))
orig = data[col]
# We need to find orig_missing before altering data below
orig_missing = orig.isna()
if is_nullable_int:
missing_loc = data[col].isna()
if missing_loc.any():
# Replace with always safe value
data.loc[missing_loc, col] = 0
# Replace with NumPy-compatible column
data[col] = data[col].astype(data[col].dtype.numpy_dtype)
dtype = data[col].dtype
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.int64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc.format("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
ws = precision_loss_doc.format("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
f"Column {col} has a maximum value of infinity which is outside "
"the range supported by Stata."
)
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError(
f"Column {col} has a maximum value ({value}) outside the range "
f"supported by Stata ({float64_max})"
)
if is_nullable_int:
if orig_missing.any():
# Replace missing by Stata sentinel value
sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name]
data.loc[orig_missing, col] = sentinel
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
Parameters
----------
catarray : Series
Categorical Series to encode
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
def __init__(self, catarray: Series, encoding: str = "latin-1"):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
self.labname = catarray.name
self._encoding = encoding
categories = catarray.cat.categories
self.value_labels: list[tuple[int | float, str]] = list(
zip(np.arange(len(categories)), categories)
)
self.value_labels.sort(key=lambda x: x[0])
self._prepare_value_labels()
def _prepare_value_labels(self):
"""Encode value labels."""
self.text_len = 0
self.txt: list[bytes] = []
self.n = 0
# Offsets (length of categories), converted to int32
self.off = np.array([], dtype=np.int32)
# Values, converted to int32
self.val = np.array([], dtype=np.int32)
self.len = 0
# Compute lengths and setup lists of offsets and labels
offsets: list[int] = []
values: list[int | float] = []
for vl in self.value_labels:
category: str | bytes = vl[1]
if not isinstance(category, str):
category = str(category)
warnings.warn(
value_label_mismatch_doc.format(self.labname),
ValueLabelTypeMismatch,
)
category = category.encode(self._encoding)
offsets.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
values.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
"have a combined length less than 32,000 characters."
)
# Ensure int32
self.off = np.array(offsets, dtype=np.int32)
self.val = np.array(values, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def generate_value_label(self, byteorder: str) -> bytes:
"""
Generate the binary representation of the value labels.
Parameters
----------
byteorder : str
Byte order of the output
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
encoding = self._encoding
bio = BytesIO()
null_byte = b"\x00"
# len
bio.write(struct.pack(byteorder + "i", self.len))
# labname
labname = str(self.labname)[:32].encode(encoding)
lab_len = 32 if encoding not in ("utf-8", "utf8") else 128
labname = _pad_bytes(labname, lab_len + 1)
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack("c", null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + "i", self.n))
# textlen - int32
bio.write(struct.pack(byteorder + "i", self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + "i", offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + "i", value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(text + null_byte)
return bio.getvalue()
class StataNonCatValueLabel(StataValueLabel):
"""
Prepare formatted version of value labels
Parameters
----------
labname : str
Value label name
value_labels: Dictionary
Mapping of values to labels
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
def __init__(
self,
labname: str,
value_labels: dict[float | int, str],
encoding: Literal["latin-1", "utf-8"] = "latin-1",
):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
self.labname = labname
self._encoding = encoding
self.value_labels: list[tuple[int | float, str]] = sorted(
value_labels.items(), key=lambda x: x[0]
)
self._prepare_value_labels()
class StataMissingValue:
"""
An observation's missing value.
Parameters
----------
value : {int, float}
The Stata missing value code
Notes
-----
More information: <https://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES: dict[float, str] = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[b] = "."
for i in range(1, 27):
MISSING_VALUES[i + b] = "." + chr(96 + i)
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
key = struct.unpack("<f", float32_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
key = struct.unpack("<d", float64_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
"int8": 101,
"int16": 32741,
"int32": 2147483621,
"float32": struct.unpack("<f", float32_base)[0],
"float64": struct.unpack("<d", float64_base)[0],
}
def __init__(self, value: int | float):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
@property
def string(self) -> str:
"""
The Stata representation of the missing value: '.', '.a'..'.z'
Returns
-------
str
The representation of the missing value.
"""
return self._str
@property
def value(self) -> int | float:
"""
The binary representation of the missing value.
Returns
-------
{int, float}
The binary representation of the missing value.
"""
return self._value
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{type(self)}({self})"
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, type(self))
and self.string == other.string
and self.value == other.value
)
@classmethod
def get_base_missing_value(cls, dtype: np.dtype) -> int | float:
if dtype.type is np.int8:
value = cls.BASE_MISSING_VALUES["int8"]
elif dtype.type is np.int16:
value = cls.BASE_MISSING_VALUES["int16"]
elif dtype.type is np.int32:
value = cls.BASE_MISSING_VALUES["int32"]
elif dtype.type is np.float32:
value = cls.BASE_MISSING_VALUES["float32"]
elif dtype.type is np.float64:
value = cls.BASE_MISSING_VALUES["float64"]
else:
raise ValueError("Unsupported dtype")
return value
class StataParser:
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = dict(
list(zip(range(1, 245), [np.dtype("a" + str(i)) for i in range(1, 245)]))
+ [
(251, np.dtype(np.int8)),
(252, np.dtype(np.int16)),
(253, np.dtype(np.int32)),
(254, np.dtype(np.float32)),
(255, np.dtype(np.float64)),
]
)
self.DTYPE_MAP_XML = {
32768: np.dtype(np.uint8), # Keys to GSO
65526: np.dtype(np.float64),
65527: np.dtype(np.float32),
65528: np.dtype(np.int32),
65529: np.dtype(np.int16),
65530: np.dtype(np.int8),
}
# error: Argument 1 to "list" has incompatible type "str";
# expected "Iterable[int]" [arg-type]
self.TYPE_MAP = list(range(251)) + list("bhlfd") # type: ignore[arg-type]
self.TYPE_MAP_XML = {
# Not really a Q, unclear how to handle byteswap
32768: "Q",
65526: "d",
65527: "f",
65528: "l",
65529: "h",
65530: "b",
}
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b"\xff\xff\xff\xfe"
float32_max = b"\xff\xff\xff\x7e"
float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff"
float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f"
self.VALID_RANGE = {
"b": (-127, 100),
"h": (-32767, 32740),
"l": (-2147483647, 2147483620),
"f": (
np.float32(struct.unpack("<f", float32_min)[0]),
np.float32(struct.unpack("<f", float32_max)[0]),
),
"d": (
np.float64(struct.unpack("<d", float64_min)[0]),
np.float64(struct.unpack("<d", float64_max)[0]),
),
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254, # float
100: 255, # double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
"b": 101,
"h": 32741,
"l": 2147483621,
"f": np.float32(struct.unpack("<f", b"\x00\x00\x00\x7f")[0]),
"d": np.float64(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
),
}
self.NUMPY_TYPE_MAP = {
"b": "i1",
"h": "i2",
"l": "i4",
"f": "f4",
"d": "f8",
"Q": "u8",
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = (
"aggregate",
"array",
"boolean",
"break",
"byte",
"case",
"catch",
"class",
"colvector",
"complex",
"const",
"continue",
"default",
"delegate",
"delete",
"do",
"double",
"else",
"eltypedef",
"end",
"enum",
"explicit",
"export",
"external",
"float",
"for",
"friend",
"function",
"global",
"goto",
"if",
"inline",
"int",
"local",
"long",
"NULL",
"pragma",
"protected",
"quad",
"rowvector",
"short",
"typedef",
"typename",
"virtual",
"_all",
"_N",
"_skip",
"_b",
"_pi",
"str#",
"in",
"_pred",
"strL",
"_coef",
"_rc",
"using",
"_cons",
"_se",
"with",
"_n",
)
class StataReader(StataParser, abc.Iterator):
__doc__ = _stata_reader_doc
def __init__(
self,
path_or_buf: FilePath | ReadBuffer[bytes],
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
):
super().__init__()
self.col_sizes: list[int] = []
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = ""
self._chunksize = chunksize
self._using_iterator = False
if self._chunksize is None:
self._chunksize = 1
elif not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer when set.")
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype: np.dtype | None = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
with get_handle(
path_or_buf,
"rb",
storage_options=storage_options,
is_text=False,
compression=compression,
) as handles:
# Copy to BytesIO, and ensure no encoding
self.path_or_buf = BytesIO(handles.handle.read())
self._read_header()
self._setup_dtype()
def __enter__(self) -> StataReader:
"""enter context manager"""
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""exit context manager"""
self.close()
def close(self) -> None:
"""close the handle if its open"""
self.path_or_buf.close()
def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_header(self) -> None:
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
self._read_new_header()
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
def _read_new_header(self) -> None:
# The first part of the header is common to 117 - 119.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
nvar_type = "H" if self.format_version <= 118 else "I"
nvar_size = 2 if self.format_version <= 118 else 4
self.nvar = struct.unpack(
self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
)[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
self._seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
self._seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
def _get_dtypes(
self, seek_vartypes: int
) -> tuple[list[int | str], list[str | np.dtype]]:
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for _ in range(self.nvar)
]
def f(typ: int) -> int | str:
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError as err:
raise ValueError(f"cannot convert stata types [{typ}]") from err
typlist = [f(x) for x in raw_typlist]
def g(typ: int) -> str | np.dtype:
if typ <= 2045:
return str(typ)
try:
# error: Incompatible return value type (got "Type[number]", expected
# "Union[str, dtype]")
return self.DTYPE_MAP_XML[typ] # type: ignore[return-value]
except KeyError as err:
raise ValueError(f"cannot convert stata dtype [{typ}]") from err
dtyplist = [g(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self) -> list[str]:
# 33 in order formats, 129 in formats 118 and 119
b = 33 if self.format_version < 118 else 129
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self) -> list[str]:
if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the label list
def _get_lbllist(self) -> list[str]:
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
def _get_variable_labels(self) -> list[str]:
if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)
]
else:
vlblist = [
self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)
]
return vlblist
def _get_nobs(self) -> int:
if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._decode(self.path_or_buf.read(81))
else:
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._decode(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self) -> int:
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char: bytes) -> None:
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self._data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_types = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata types [{invalid_types}]") from err
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_dtypes = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err
if self.format_version > 108:
self.varlist = [
self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)
]
else:
self.varlist = [
self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP
dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtypes.append(("s" + str(i), "S" + str(typ)))
self._dtype = np.dtype(dtypes)
return self._dtype
def _calcsize(self, fmt: int | str) -> int:
if isinstance(fmt, int):
return fmt
return struct.calcsize(self.byteorder + fmt)
def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
encoding = self._encoding
msg = f"""
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(msg, UnicodeWarning)
return s.decode("latin-1")
def _read_value_labels(self) -> None:
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict: dict[str, dict[float | int, str]] = {}
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
assert self._dtype is not None
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = {}
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._decode(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
off = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
val = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = {}
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self) -> None:
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self.path_or_buf.read(3) != b"GSO":
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
# This path may not be correct, impossible to test
buf = buf[0:v_size] + buf[(4 + v_size) :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
decoded_va = va[0:-1].decode(self._encoding)
else:
# Stata says typ 129 can be binary, so use str
decoded_va = str(va)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = decoded_va
def __next__(self) -> DataFrame:
self._using_iterator = True
return self.read(nrows=self._chunksize)
def get_chunk(self, size: int | None = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(
self,
nrows: int | None = None,
convert_dates: bool | None = None,
convert_categoricals: bool | None = None,
index_col: str | None = None,
convert_missing: bool | None = None,
preserve_dtypes: bool | None = None,
columns: Sequence[str] | None = None,
order_categoricals: bool | None = None,
) -> DataFrame:
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
assert self._dtype is not None
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
raw_data = np.frombuffer(
self.path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
raw_data = raw_data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(raw_data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(raw_data)
data.columns = Index(self.varlist)
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
rng = np.arange(self._lines_read - read_lines, self._lines_read)
data.index = Index(rng) # set attr instead of set_index to avoid copy
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._decode, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where([dtyp is not None for dtyp in self.dtyplist])[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i]))
)
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_dict(dict(data_formatted))
del data_formatted
data = self._do_convert_missing(data, convert_missing)
if convert_dates:
def any_startswith(x: str) -> bool:
return any(x.startswith(fmt) for fmt in _date_formats)
cols = np.where([any_startswith(x) for x in self.fmtlist])[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col], self.fmtlist[i]
)
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(
data, self.value_label_dict, self.lbllist, order_categoricals
)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.dtype(np.float16), np.dtype(np.float32)):
dtype = np.dtype(np.float64)
convert = True
elif dtype in (
np.dtype(np.int8),
np.dtype(np.int16),
np.dtype(np.int32),
):
dtype = np.dtype(np.int64)
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(dict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:
# Check for missing values, and replace if found
replacements = {}
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
fmt = cast(str, fmt) # only strs in VALID_RANGE
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
# appreciably faster to do this with ndarray instead of Series
svals = series._values
missing = (svals < nmin) | (svals > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.nonzero(np.asarray(missing))[0]
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
if not replacement._values.flags["WRITEABLE"]:
# only relevant for ArrayManager; construction
# path for BlockManager ensures writeability
replacement = replacement.copy()
# Note: operating on ._values is much faster than directly
# TODO: can we fix that?
replacement._values[missing] = np.nan
replacements[colname] = replacement
if replacements:
for col in replacements:
data[col] = replacements[col]
return data
def _insert_strls(self, data: DataFrame) -> DataFrame:
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != "Q":
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
joined = ", ".join(list(unmatched))
raise ValueError(
"The following columns were not "
f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(
self,
data: DataFrame,
value_label_dict: dict[str, dict[float | int, str]],
lbllist: Sequence[str],
order_categoricals: bool,
) -> DataFrame:
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(value_label_dict.keys())
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
vl = value_label_dict[label]
keys = np.array(list(vl.keys()))
column = data[col]
key_matches = column.isin(keys)
if self._using_iterator and key_matches.all():
initial_categories: np.ndarray | None = keys
# If all categories are in the keys and we are iterating,
# use the same keys for all chunks. If some are missing
# value labels, then we will fall back to the categories
# varying across chunks.
else:
if self._using_iterator:
# warn is using an iterator
warnings.warn(
categorical_conversion_warning, CategoricalConversionWarning
)
initial_categories = None
cat_data = Categorical(
column, categories=initial_categories, ordered=order_categoricals
)
if initial_categories is None:
# If None here, then we need to match the cats in the Categorical
categories = []
for category in cat_data.categories:
if category in vl:
categories.append(vl[category])
else:
categories.append(category)
else:
# If all cats are matched, we can use the values
categories = list(vl.values())
try:
# Try to catch duplicate categories
cat_data.categories = categories
except ValueError as err:
vc = Series(categories).value_counts()
repeated_cats = list(vc.index[vc > 1])
repeats = "-" * 80 + "\n" + "\n".join(repeated_cats)
# GH 25772
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
raise ValueError(msg) from err
# TODO: is the next line needed above in the data(...) method?
cat_series = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_series))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame(dict(cat_converted_data), copy=False)
return data
@property
def data_label(self) -> str:
"""
Return data label of Stata file.
"""
return self._data_label
def variable_labels(self) -> dict[str, str]:
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
Returns
-------
dict
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self) -> dict[str, dict[float | int, str]]:
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
Returns
-------
dict
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
@Appender(_read_stata_doc)
def read_stata(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> DataFrame | StataReader:
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
storage_options=storage_options,
compression=compression,
)
if iterator or chunksize:
return reader
with reader:
return reader.read()
def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError(f"Endianness {endianness} not understood")
def _pad_bytes(name: AnyStr, length: int) -> AnyStr:
"""
Take a char string and pads it with null bytes until it's length chars.
"""
if isinstance(name, bytes):
return name + b"\x00" * (length - len(name))
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in [
"tc",
"%tc",
"td",
"%td",
"tw",
"%tw",
"tm",
"%tm",
"tq",
"%tq",
"th",
"%th",
"ty",
"%ty",
]:
return np.dtype(np.float64) # Stata expects doubles for SIFs
else:
raise NotImplementedError(f"Format {fmt} not implemented")
def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict:
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type is np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column._values))
return max(itemsize, 1)
elif dtype.type is np.float64:
return 255
elif dtype.type is np.float32:
return 254
elif dtype.type is np.int32:
return 253
elif dtype.type is np.int16:
return 252
elif dtype.type is np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
def _dtype_to_default_stata_fmt(
dtype, column: Series, dta_version: int = 114, force_strl: bool = False
) -> str:
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return "%9s"
if dtype.type is np.object_:
itemsize = max_len_string_array(ensure_object(column._values))
if itemsize > max_str_len:
if dta_version >= 117:
return "%9s"
else:
raise ValueError(excessive_string_length_error.format(column.name))
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "fname",
)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
{compression_options}
.. versionadded:: 1.1.0
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
.. versionadded:: 1.2.0
value_labels : dict of dicts
Dictionary containing columns as keys and dictionaries of column value
to labels as values. The combined length of all labels for a single
variable must be 32,000 characters or smaller.
.. versionadded:: 1.4.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {{"method": "zip", "archive_name": "data_file.dta"}}
>>> writer = StataWriter('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Save a DataFrame with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {{'date' : 'tw'}})
>>> writer.write_file()
"""
_max_string_length = 244
_encoding = "latin-1"
def __init__(
self,
fname: FilePath | WriteBuffer[bytes],
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
value_labels: dict[Hashable, dict[float | int, str]] | None = None,
):
super().__init__()
self.data = data
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
self._non_cat_value_labels = value_labels
self._value_labels: list[StataValueLabel] = []
self._has_value_labels = np.array([], dtype=bool)
self._compression = compression
self._output_file: IO[bytes] | None = None
self._converted_names: dict[Hashable, str] = {}
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
self.storage_options = storage_options
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write: str) -> None:
"""
Helper to call encode before writing to file for Python 3 compat.
"""
self.handles.handle.write(to_write.encode(self._encoding))
def _write_bytes(self, value: bytes) -> None:
"""
Helper to assert file is open before writing.
"""
self.handles.handle.write(value)
def _prepare_non_cat_value_labels(
self, data: DataFrame
) -> list[StataNonCatValueLabel]:
"""
Check for value labels provided for non-categorical columns. Value
labels
"""
non_cat_value_labels: list[StataNonCatValueLabel] = []
if self._non_cat_value_labels is None:
return non_cat_value_labels
for labname, labels in self._non_cat_value_labels.items():
if labname in self._converted_names:
colname = self._converted_names[labname]
elif labname in data.columns:
colname = str(labname)
else:
raise KeyError(
f"Can't create value labels for {labname}, it wasn't "
"found in the dataset."
)
if not is_numeric_dtype(data[colname].dtype):
# Labels should not be passed explicitly for categorical
# columns that will be converted to int
raise ValueError(
f"Can't create value labels for {labname}, value labels "
"can only be applied to numeric columns."
)
svl = StataNonCatValueLabel(colname, labels)
non_cat_value_labels.append(svl)
return non_cat_value_labels
def _prepare_categoricals(self, data: DataFrame) -> DataFrame:
"""
Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int
"""
is_cat = [is_categorical_dtype(data[col].dtype) for col in data]
if not any(is_cat):
return data
self._has_value_labels |= np.array(is_cat)
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
svl = StataValueLabel(data[col], encoding=self._encoding)
self._value_labels.append(svl)
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError(
"It is not possible to export "
"int64-based categorical data to Stata."
)
values = data[col].cat.codes._values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.dtype(np.int16)
elif dtype == np.int16:
dtype = np.dtype(np.int32)
else:
dtype = np.dtype(np.float64)
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(dict(data_formatted))
def _replace_nans(self, data: DataFrame) -> DataFrame:
# return data
"""
Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)
"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES["f"]
else:
replacement = self.MISSING_VALUES["d"]
data[c] = data[c].fillna(replacement)
return data
def _update_strl_names(self) -> None:
"""No-op, forward compatibility"""
pass
def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
Parameters
----------
name : str
Variable name
Returns
-------
str
The validated name with invalid characters replaced with
underscores.
Notes
-----
Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9
and _.
"""
for c in name:
if (
(c < "A" or c > "Z")
and (c < "a" or c > "z")
and (c < "0" or c > "9")
and c != "_"
):
name = name.replace(c, "_")
return name
def _check_column_names(self, data: DataFrame) -> DataFrame:
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names: dict[Hashable, str] = {}
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, str):
name = str(name)
name = self._validate_variable_name(name)
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = "_" + name
# Variable name may not start with a number
if "0" <= name[0] <= "9":
name = "_" + name
name = name[: min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = "_" + str(duplicate_var_id) + name
name = name[: min(len(name), 32)]
duplicate_var_id += 1
converted_names[orig_name] = name
columns[j] = name
data.columns = Index(columns)
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
conversion_warning = []
for orig_name, name in converted_names.items():
msg = f"{orig_name} -> {name}"
conversion_warning.append(msg)
ws = invalid_name_doc.format("\n ".join(conversion_warning))
warnings.warn(ws, InvalidColumnName)
self._converted_names = converted_names
self._update_strl_names()
return data
def _set_formats_and_types(self, dtypes: Series) -> None:
self.fmtlist: list[str] = []
self.typlist: list[int] = []
for col, dtype in dtypes.items():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, self.data[col]))
def _prepare_pandas(self, data: DataFrame) -> None:
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
temp = data.reset_index()
if isinstance(temp, DataFrame):
data = temp
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Set all columns to initially unlabelled
self._has_value_labels = np.repeat(False, data.shape[1])
# Create value labels for non-categorical data
non_cat_value_labels = self._prepare_non_cat_value_labels(data)
non_cat_columns = [svl.labname for svl in non_cat_value_labels]
has_non_cat_val_labels = data.columns.isin(non_cat_columns)
self._has_value_labels |= has_non_cat_val_labels
self._value_labels.extend(non_cat_value_labels)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = "tc"
self._convert_dates = _maybe_convert_to_int_keys(
self._convert_dates, self.varlist
)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(self._convert_dates[key])
dtypes[key] = np.dtype(new_type)
# Verify object arrays are strings and encode to bytes
self._encode_strings()
self._set_formats_and_types(dtypes)
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
if isinstance(key, int):
self.fmtlist[key] = self._convert_dates[key]
def _encode_strings(self) -> None:
"""
Encode strings in dta-specific encoding
Do not encode columns marked for date conversion or for strL
conversion. The strL converter independently handles conversion and
also accepts empty string arrays.
"""
convert_dates = self._convert_dates
# _convert_strl is not available in dta 114
convert_strl = getattr(self, "_convert_strl", [])
for i, col in enumerate(self.data):
# Skip columns marked for date conversion or strl conversion
if i in convert_dates or col in convert_strl:
continue
column = self.data[col]
dtype = column.dtype
if dtype.type is np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
if not ((inferred_dtype == "string") or len(column) == 0):
col = column.name
raise ValueError(
f"""\
Column `{col}` cannot be exported.\n\nOnly string-like object arrays
containing all strings or a mix of strings and None can be exported.
Object arrays containing only null values are prohibited. Other object
types cannot be exported and must first be converted to one of the
supported types."""
)
encoded = self.data[col].str.encode(self._encoding)
# If larger than _max_string_length do nothing
if (
max_len_string_array(ensure_object(encoded._values))
<= self._max_string_length
):
self.data[col] = encoded
def write_file(self) -> None:
"""
Export DataFrame object to Stata dta format.
"""
with get_handle(
self._fname,
"wb",
compression=self._compression,
is_text=False,
storage_options=self.storage_options,
) as self.handles:
if self.handles.compression["method"] is not None:
# ZipFile creates a file (with the same name) for each write call.
# Write it first into a buffer and then write the buffer to the ZipFile.
self._output_file, self.handles.handle = self.handles.handle, BytesIO()
self.handles.created_handles.append(self.handles.handle)
try:
self._write_header(
data_label=self._data_label, time_stamp=self._time_stamp
)
self._write_map()
self._write_variable_types()
self._write_varnames()
self._write_sortlist()
self._write_formats()
self._write_value_label_names()
self._write_variable_labels()
self._write_expansion_fields()
self._write_characteristics()
records = self._prepare_data()
self._write_data(records)
self._write_strls()
self._write_value_labels()
self._write_file_close_tag()
self._write_map()
self._close()
except Exception as exc:
self.handles.close()
if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile(
self._fname
):
try:
os.unlink(self._fname)
except OSError:
warnings.warn(
f"This save was not successful but {self._fname} could not "
"be deleted. This file is not valid.",
ResourceWarning,
)
raise exc
def _close(self) -> None:
"""
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close.
"""
# write compression
if self._output_file is not None:
assert isinstance(self.handles.handle, BytesIO)
bio, self.handles.handle = self.handles.handle, self._output_file
self.handles.handle.write(bio.getvalue())
def _write_map(self) -> None:
"""No-op, future compatibility"""
pass
def _write_file_close_tag(self) -> None:
"""No-op, future compatibility"""
pass
def _write_characteristics(self) -> None:
"""No-op, future compatibility"""
pass
def _write_strls(self) -> None:
"""No-op, future compatibility"""
pass
def _write_expansion_fields(self) -> None:
"""Write 5 zeros for expansion fields"""
self._write(_pad_bytes("", 5))
def _write_value_labels(self) -> None:
for vl in self._value_labels:
self._write_bytes(vl.generate_value_label(self._byteorder))
def _write_header(
self,
data_label: str | None = None,
time_stamp: datetime.datetime | None = None,
) -> None:
byteorder = self._byteorder
# ds_format - just use 114
self._write_bytes(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._write_bytes(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._write_bytes(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._write_bytes(self._null_terminate_bytes(_pad_bytes("", 80)))
else:
self._write_bytes(
self._null_terminate_bytes(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (
time_stamp.strftime("%d ")
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
self._write_bytes(self._null_terminate_bytes(ts))
def _write_variable_types(self) -> None:
for typ in self.typlist:
self._write_bytes(struct.pack("B", typ))
def _write_varnames(self) -> None:
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate_str(name)
name = _pad_bytes(name[:32], 33)
self._write(name)
def _write_sortlist(self) -> None:
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (self.nvar + 1))
self._write(srtlist)
def _write_formats(self) -> None:
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
def _write_value_label_names(self) -> None:
# lbllist, 33*nvar, char array
for i in range(self.nvar):
# Use variable name when categorical
if self._has_value_labels[i]:
name = self.varlist[i]
name = self._null_terminate_str(name)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self) -> None:
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes("", 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError("Variable labels must be 80 characters or fewer")
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError(
"Variable labels must contain only characters that "
"can be encoded in Latin-1"
)
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _convert_strls(self, data: DataFrame) -> DataFrame:
"""No-op, future compatibility"""
return data
def _prepare_data(self) -> np.recarray:
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(
data[col], self.fmtlist[i]
)
# 2. Convert strls
data = self._convert_strls(data)
# 3. Convert bad string data to '' and pad to correct length
dtypes = {}
native_byteorder = self._byteorder == _set_endianness(sys.byteorder)
for i, col in enumerate(data):
typ = typlist[i]
if typ <= self._max_string_length:
data[col] = data[col].fillna("").apply(_pad_bytes, args=(typ,))
stype = f"S{typ}"
dtypes[col] = stype
data[col] = data[col].astype(stype)
else:
dtype = data[col].dtype
if not native_byteorder:
dtype = dtype.newbyteorder(self._byteorder)
dtypes[col] = dtype
return data.to_records(index=False, column_dtypes=dtypes)
def _write_data(self, records: np.recarray) -> None:
self._write_bytes(records.tobytes())
@staticmethod
def _null_terminate_str(s: str) -> str:
s += "\x00"
return s
def _null_terminate_bytes(self, s: str) -> bytes:
return self._null_terminate_str(s).encode(self._encoding)
def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int:
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 2045 are strings of this length
Pandas Stata
32768 - for object strL
65526 - for int8 byte
65527 - for int16 int
65528 - for int32 long
65529 - for float32 float
65530 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if force_strl:
return 32768
if dtype.type is np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column._values))
itemsize = max(itemsize, 1)
if itemsize <= 2045:
return itemsize
return 32768
elif dtype.type is np.float64:
return 65526
elif dtype.type is np.float32:
return 65527
elif dtype.type is np.int32:
return 65528
elif dtype.type is np.int16:
return 65529
elif dtype.type is np.int8:
return 65530
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
def _pad_bytes_new(name: str | bytes, length: int) -> bytes:
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, "utf-8")
return name + b"\x00" * (length - len(name))
class StataStrLWriter:
"""
Converter for Stata StrLs
Stata StrLs map 8 byte values to strings which are stored using a
dictionary-like format where strings are keyed to two values.
Parameters
----------
df : DataFrame
DataFrame to convert
columns : Sequence[str]
List of columns names to convert to StrL
version : int, optional
dta version. Currently supports 117, 118 and 119
byteorder : str, optional
Can be ">", "<", "little", or "big". default is `sys.byteorder`
Notes
-----
Supports creation of the StrL block of a dta file for dta versions
117, 118 and 119. These differ in how the GSO is stored. 118 and
119 store the GSO lookup value as a uint32 and a uint64, while 117
uses two uint32s. 118 and 119 also encode all strings as unicode
which is required by the format. 117 uses 'latin-1' a fixed width
encoding that extends the 7-bit ascii table with an additional 128
characters.
"""
def __init__(
self,
df: DataFrame,
columns: Sequence[str],
version: int = 117,
byteorder: str | None = None,
):
if version not in (117, 118, 119):
raise ValueError("Only dta versions 117, 118 and 119 supported")
self._dta_ver = version
self.df = df
self.columns = columns
self._gso_table = {"": (0, 0)}
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
gso_v_type = "I" # uint32
gso_o_type = "Q" # uint64
self._encoding = "utf-8"
if version == 117:
o_size = 4
gso_o_type = "I" # 117 used uint32
self._encoding = "latin-1"
elif version == 118:
o_size = 6
else: # version == 119
o_size = 5
self._o_offet = 2 ** (8 * (8 - o_size))
self._gso_o_type = gso_o_type
self._gso_v_type = gso_v_type
def _convert_key(self, key: tuple[int, int]) -> int:
v, o = key
return v + self._o_offet * o
def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]:
"""
Generates the GSO lookup table for the DataFrame
Returns
-------
gso_table : dict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on the dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None (GH 23633)
val = "" if val is None else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes:
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : dict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes("GSO", "ascii")
gso_type = struct.pack(self._byteorder + "B", 130)
null = struct.pack(self._byteorder + "B", 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + "I"
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
utf8_string = bytes(strl, "utf-8")
bio.write(struct.pack(len_type, len(utf8_string) + 1))
# xxx...xxx
bio.write(utf8_string)
bio.write(null)
return bio.getvalue()
class StataWriter117(StataWriter):
"""
A class for writing Stata binary dta files in Stata 13 format (117)
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
convert_strl : list
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are automatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
{compression_options}
.. versionadded:: 1.1.0
.. versionchanged:: 1.4.0 Zstandard support.
value_labels : dict of dicts
Dictionary containing columns as keys and dictionaries of column value
to labels as values. The combined length of all labels for a single
variable must be 32,000 characters or smaller.
.. versionadded:: 1.4.0
Returns
-------
writer : StataWriter117 instance
The StataWriter117 instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> from pandas.io.stata import StataWriter117
>>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])
>>> writer = StataWriter117('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriter117('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['A relatively long string'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriter117('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
_max_string_length = 2045
_dta_version = 117
def __init__(
self,
fname: FilePath | WriteBuffer[bytes],
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
convert_strl: Sequence[Hashable] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
value_labels: dict[Hashable, dict[float | int, str]] | None = None,
):
# Copy to new list since convert_strl might be modified later
self._convert_strl: list[Hashable] = []
if convert_strl is not None:
self._convert_strl.extend(convert_strl)
super().__init__(
fname,
data,
convert_dates,
write_index,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels,
value_labels=value_labels,
compression=compression,
storage_options=storage_options,
)
self._map: dict[str, int] = {}
self._strl_blob = b""
@staticmethod
def _tag(val: str | bytes, tag: str) -> bytes:
"""Surround val with <tag></tag>"""
if isinstance(val, str):
val = bytes(val, "utf-8")
return bytes("<" + tag + ">", "utf-8") + val + bytes("</" + tag + ">", "utf-8")
def _update_map(self, tag: str) -> None:
"""Update map location for tag with file position"""
assert self.handles.handle is not None
self._map[tag] = self.handles.handle.tell()
def _write_header(
self,
data_label: str | None = None,
time_stamp: datetime.datetime | None = None,
) -> None:
"""Write the file header"""
byteorder = self._byteorder
self._write_bytes(bytes("<stata_dta>", "utf-8"))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(bytes(str(self._dta_version), "utf-8"), "release"))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", "byteorder"))
# number of vars, 2 bytes in 117 and 118, 4 byte in 119
nvar_type = "H" if self._dta_version <= 118 else "I"
bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), "K"))
# 117 uses 4 bytes, 118 uses 8
nobs_size = "I" if self._dta_version == 117 else "Q"
bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), "N"))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ""
encoded_label = label.encode(self._encoding)
label_size = "B" if self._dta_version == 117 else "H"
label_len = struct.pack(byteorder + label_size, len(encoded_label))
encoded_label = label_len + encoded_label
bio.write(self._tag(encoded_label, "label"))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# Avoid locale-specific month conversion
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (
time_stamp.strftime("%d ")
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
# '\x11' added due to inspection of Stata file
stata_ts = b"\x11" + bytes(ts, "utf-8")
bio.write(self._tag(stata_ts, "timestamp"))
self._write_bytes(self._tag(bio.getvalue(), "header"))
def _write_map(self) -> None:
"""
Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written.
"""
if not self._map:
self._map = {
"stata_data": 0,
"map": self.handles.handle.tell(),
"variable_types": 0,
"varnames": 0,
"sortlist": 0,
"formats": 0,
"value_label_names": 0,
"variable_labels": 0,
"characteristics": 0,
"data": 0,
"strls": 0,
"value_labels": 0,
"stata_data_close": 0,
"end-of-file": 0,
}
# Move to start of map
self.handles.handle.seek(self._map["map"])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + "Q", val))
self._write_bytes(self._tag(bio.getvalue(), "map"))
def _write_variable_types(self) -> None:
self._update_map("variable_types")
bio = BytesIO()
for typ in self.typlist:
bio.write(struct.pack(self._byteorder + "H", typ))
self._write_bytes(self._tag(bio.getvalue(), "variable_types"))
def _write_varnames(self) -> None:
self._update_map("varnames")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vn_len = 32 if self._dta_version == 117 else 128
for name in self.varlist:
name = self._null_terminate_str(name)
name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1)
bio.write(name)
self._write_bytes(self._tag(bio.getvalue(), "varnames"))
def _write_sortlist(self) -> None:
self._update_map("sortlist")
sort_size = 2 if self._dta_version < 119 else 4
self._write_bytes(self._tag(b"\x00" * sort_size * (self.nvar + 1), "sortlist"))
def _write_formats(self) -> None:
self._update_map("formats")
bio = BytesIO()
fmt_len = 49 if self._dta_version == 117 else 57
for fmt in self.fmtlist:
bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len))
self._write_bytes(self._tag(bio.getvalue(), "formats"))
def _write_value_label_names(self) -> None:
self._update_map("value_label_names")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vl_len = 32 if self._dta_version == 117 else 128
for i in range(self.nvar):
# Use variable name when categorical
name = "" # default name
if self._has_value_labels[i]:
name = self.varlist[i]
name = self._null_terminate_str(name)
encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1)
bio.write(encoded_name)
self._write_bytes(self._tag(bio.getvalue(), "value_label_names"))
def _write_variable_labels(self) -> None:
# Missing labels are 80 blank characters plus null termination
self._update_map("variable_labels")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vl_len = 80 if self._dta_version == 117 else 320
blank = _pad_bytes_new("", vl_len + 1)
if self._variable_labels is None:
for _ in range(self.nvar):
bio.write(blank)
self._write_bytes(self._tag(bio.getvalue(), "variable_labels"))
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError("Variable labels must be 80 characters or fewer")
try:
encoded = label.encode(self._encoding)
except UnicodeEncodeError as err:
raise ValueError(
"Variable labels must contain only characters that "
f"can be encoded in {self._encoding}"
) from err
bio.write(_pad_bytes_new(encoded, vl_len + 1))
else:
bio.write(blank)
self._write_bytes(self._tag(bio.getvalue(), "variable_labels"))
def _write_characteristics(self) -> None:
self._update_map("characteristics")
self._write_bytes(self._tag(b"", "characteristics"))
def _write_data(self, records) -> None:
self._update_map("data")
self._write_bytes(b"<data>")
self._write_bytes(records.tobytes())
self._write_bytes(b"</data>")
def _write_strls(self) -> None:
self._update_map("strls")
self._write_bytes(self._tag(self._strl_blob, "strls"))
def _write_expansion_fields(self) -> None:
"""No-op in dta 117+"""
pass
def _write_value_labels(self) -> None:
self._update_map("value_labels")
bio = BytesIO()
for vl in self._value_labels:
lab = vl.generate_value_label(self._byteorder)
lab = self._tag(lab, "lbl")
bio.write(lab)
self._write_bytes(self._tag(bio.getvalue(), "value_labels"))
def _write_file_close_tag(self) -> None:
self._update_map("stata_data_close")
self._write_bytes(bytes("</stata_dta>", "utf-8"))
self._update_map("end-of-file")
def _update_strl_names(self) -> None:
"""
Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules
"""
# Update convert_strl if names changed
for orig, new in self._converted_names.items():
if orig in self._convert_strl:
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
def _convert_strls(self, data: DataFrame) -> DataFrame:
"""
Convert columns to StrLs if either very large or in the
convert_strl variable
"""
convert_cols = [
col
for i, col in enumerate(data)
if self.typlist[i] == 32768 or col in self._convert_strl
]
if convert_cols:
ssw = StataStrLWriter(data, convert_cols, version=self._dta_version)
tab, new_data = ssw.generate_table()
data = new_data
self._strl_blob = ssw.generate_blob(tab)
return data
def _set_formats_and_types(self, dtypes: Series) -> None:
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.items():
force_strl = col in self._convert_strl
fmt = _dtype_to_default_stata_fmt(
dtype,
self.data[col],
dta_version=self._dta_version,
force_strl=force_strl,
)
self.fmtlist.append(fmt)
self.typlist.append(
_dtype_to_stata_type_117(dtype, self.data[col], force_strl)
)
class StataWriterUTF8(StataWriter117):
"""
Stata binary dta file writing in Stata 15 (118) and 16 (119) formats
DTA 118 and 119 format files support unicode string data (both fixed
and strL) format. Unicode is also supported in value labels, variable
labels and the dataset label. Format 119 is automatically used if the
file contains more than 32,767 variables.
.. versionadded:: 1.0.0
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict, default None
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool, default True
Write the index to Stata dataset.
byteorder : str, default None
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime, default None
A datetime to use as file creation date. Default is the current time
data_label : str, default None
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict, default None
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
convert_strl : list, default None
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are automatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
version : int, default None
The dta version to use. By default, uses the size of data to determine
the version. 118 is used if data.shape[1] <= 32767, and 119 is used
for storing larger DataFrames.
{compression_options}
.. versionadded:: 1.1.0
.. versionchanged:: 1.4.0 Zstandard support.
value_labels : dict of dicts
Dictionary containing columns as keys and dictionaries of column value
to labels as values. The combined length of all labels for a single
variable must be 32,000 characters or smaller.
.. versionadded:: 1.4.0
Returns
-------
StataWriterUTF8
The instance has a write_file method, which will write the file to the
given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
Using Unicode data and column names
>>> from pandas.io.stata import StataWriterUTF8
>>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ'])
>>> writer = StataWriterUTF8('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriterUTF8('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
_encoding = "utf-8"
def __init__(
self,
fname: FilePath | WriteBuffer[bytes],
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
convert_strl: Sequence[Hashable] | None = None,
version: int | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
value_labels: dict[Hashable, dict[float | int, str]] | None = None,
):
if version is None:
version = 118 if data.shape[1] <= 32767 else 119
elif version not in (118, 119):
raise ValueError("version must be either 118 or 119.")
elif version == 118 and data.shape[1] > 32767:
raise ValueError(
"You must use version 119 for data sets containing more than"
"32,767 variables"
)
super().__init__(
fname,
data,
convert_dates=convert_dates,
write_index=write_index,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels,
value_labels=value_labels,
convert_strl=convert_strl,
compression=compression,
storage_options=storage_options,
)
# Override version set in StataWriter117 init
self._dta_version = version
def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
Parameters
----------
name : str
Variable name
Returns
-------
str
The validated name with invalid characters replaced with
underscores.
Notes
-----
Stata 118+ support most unicode characters. The only limitation is in
the ascii range where the characters supported are a-z, A-Z, 0-9 and _.
"""
# High code points appear to be acceptable
for c in name:
if (
ord(c) < 128
and (c < "A" or c > "Z")
and (c < "a" or c > "z")
and (c < "0" or c > "9")
and c != "_"
) or 128 <= ord(c) < 256:
name = name.replace(c, "_")
return name
| 35.759584
| 88
| 0.578112
|
e1e9da9d25c5563a41a62520c842c6f5153f6558
| 1,138
|
py
|
Python
|
Spell Compendium/scr/Spell1077 - Cloud of Bewilderment.py
|
Sagenlicht/ToEE_Mods
|
a4b07f300df6067f834e09fcbc4c788f1f4e417b
|
[
"MIT"
] | 1
|
2021-04-26T08:03:56.000Z
|
2021-04-26T08:03:56.000Z
|
Spell Compendium/scr/Spell1077 - Cloud of Bewilderment.py
|
Sagenlicht/ToEE_Mods
|
a4b07f300df6067f834e09fcbc4c788f1f4e417b
|
[
"MIT"
] | 2
|
2021-06-11T05:55:01.000Z
|
2021-08-03T23:41:02.000Z
|
Spell Compendium/scr/Spell1077 - Cloud of Bewilderment.py
|
Sagenlicht/ToEE_Mods
|
a4b07f300df6067f834e09fcbc4c788f1f4e417b
|
[
"MIT"
] | 1
|
2021-05-17T15:37:58.000Z
|
2021-05-17T15:37:58.000Z
|
from toee import *
def OnBeginSpellCast(spell):
print "Cloud of Bewilderment OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect(spell):
print "Cloud of Bewilderment OnSpellEffect"
targetsToRemove = []
spell.duration = 1 * spell.caster_level # 1 round/cl
for spellTarget in spell.target_list:
targetsToRemove.append(spellTarget.obj)
cloudOfBewildermentObject = game.obj_create(OBJECT_SPELL_GENERIC, spell.target_loc)
casterInitiative = spell.caster.get_initiative()
cloudOfBewildermentObject.d20_status_init()
cloudOfBewildermentObject.set_initiative(casterInitiative)
cloudOfBewildermentObject.condition_add_with_args('sp-Cloud of Bewilderment', spell.id, spell.duration, spell.dc)
spell.target_list.remove_list(targetsToRemove)
#spell.spell_end(spell.id) #Spell end handled by cloudOfBewildermentObject
def OnBeginRound(spell):
print "Cloud of Bewilderment OnBeginRound"
def OnEndSpellCast(spell):
print "Cloud of Bewilderment OnEndSpellCast"
| 35.5625
| 117
| 0.769772
|
b7ed01940d1f73518fb1c9b324a8d177525333cb
| 5,080
|
py
|
Python
|
statarb/src/python/lib/data_handlers/yearn.py
|
mikimaus78/ml_monorepo
|
b2c2627ff0e86e27f6829170d0dac168d8e5783b
|
[
"BSD-3-Clause"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
statarb/src/python/lib/data_handlers/yearn.py
|
mikimaus78/ml_monorepo
|
b2c2627ff0e86e27f6829170d0dac168d8e5783b
|
[
"BSD-3-Clause"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
statarb/src/python/lib/data_handlers/yearn.py
|
mikimaus78/ml_monorepo
|
b2c2627ff0e86e27f6829170d0dac168d8e5783b
|
[
"BSD-3-Clause"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
import datetime
import os
import os.path
import util
import config
import datafiles
import newdb
import newdb.xrefsolve
database = newdb.get_db()
SOURCE = "yahoo"
def _parseFile(filepath):
#this should only happen when we process the first file ever
if filepath is None:
return {},None,None,None
info = datafiles.read_info_file(filepath)
if os.path.basename(filepath).startswith("yearn_archive.txt"):
backfill = 1
archive = True
elif info['date_last_absent'] is None:
timestamp = util.convert_date_to_millis(info['date_modified'])
backfill = 1
archive = False
else:
timestamp = util.convert_date_to_millis(info['date_first_present'])
backfill = 0
archive = False
file = open(filepath, "r")
data={}
for line in file:
line = line.rstrip("\n")
# Parse date
# XXX all dates need to be in UTC based on exchange of stock
annDate, name, ticker, value, time = line.split("\t")
if time == 'Time Not Supplied':
exactAnnDate = annDate + ' 00:00 UTC'
elif time == 'Before Market Open':
exactAnnDate = annDate + ' 08:00 EST'
elif time == 'After Market Close':
exactAnnDate = annDate + ' 17:00 EST'
else:
exactAnnDate = annDate +" "+ time.replace("ET", "EST")
#annDate to millis
try:
exactAnnDate = util.convert_date_to_millis(exactAnnDate)
except:
util.warning("Failed to parse {}".format(exactAnnDate))
print "Failed to parse {}".format(exactAnnDate)
continue
if archive:
timestamp = util.convert_date_to_millis(annDate) - util.convert_date_to_millis(datetime.timedelta(days=30))
secid = database.getSecidFromXref("TIC", ticker, timestamp, "compustat_idhist", newdb.xrefsolve.preferUS)
if secid is None:
util.warning("Failed to map ticker {}".format(ticker))
continue
coid, issueid = database.getCsidFromSecid(secid)
assert coid is not None
data[(coid,exactAnnDate,backfill)]=annDate
#data.append((coid,exactAnnDate,backfill,timestamp))
file.close()
#get the file start date from the filename
if not archive:
startDate=os.path.normpath(filepath).split("/")[-1][0:8] #split the filepath, take last token and its first 8 chars
else:
startDate="20060101"
return (data,archive,startDate,timestamp)
#A yearn file
def _getDeltas(filepath, source):
localDir=config.load_source_config(source)["local_dir"]
lastFilepath=database.getLastProcessedFile(source)
if lastFilepath is not None:
lastFilepath="/".join((os.environ["DATA_DIR"],localDir,lastFilepath))
(lastData,lastArchive,lastStartDate,lastBornMillis)=_parseFile(lastFilepath)
currentData,currentArchive,currentStartDate,currentBornMillis=_parseFile(filepath)
assert (lastArchive is None and currentArchive is True) or currentArchive is False
assert (currentStartDate>=lastStartDate)
#get the data that need to be killed. these are data that were in the previous file, but not in
#the current. The death time can be the timestamp in any item in currentData, since, except for
#the very first archive file, all data should have the same timestamp
remove={}
for entry,annDate in lastData.iteritems():
if annDate<currentStartDate: #entry[1] is the annMillis
continue
if entry not in currentData:
remove[entry]=currentBornMillis
#get the data that need be inserted. similar to above
insert={}
for entry,annDate in currentData.iteritems():
if entry not in lastData:
insert[entry]=currentBornMillis
return insert,remove
def process(filepath, source):
insert,remove=_getDeltas(filepath, source)
database.setAttributeAutoCreate(True)
for k,v in remove.iteritems():
coid=k[0]
annMillis=k[1]
died=v
database.deleteAttribute("co", "d", coid, annMillis, source, "FUTURE_ANN_DATE", died)
for k,v in insert.iteritems():
coid=k[0]
annMillis=k[1]
backfill=k[2]
born=v
database.insertAttribute("co", "d", coid, annMillis, source, "FUTURE_ANN_DATE", annMillis, born, None,backfill)
if __name__ == "__main__":
newdb.init_db()
database = newdb.get_db()
try:
database.start_transaction()
process("/apps/logs/ase/data/yahoo/yearn/2009/01/01/yearn_archive.txt.9aaa0838", "yearn")
database.addProcessedFiles("yearn", "2009/01/01/yearn_archive.txt.9aaa0838", None)
process("/apps/logs/ase/data/yahoo/yearn/2009/02/19/20090219.txt.f2b89c95", "yearn")
database.addProcessedFiles("yearn","2009/02/19/20090219.txt.f2b89c95",None)
process("/apps/logs/ase/data/yahoo/yearn/2009/02/20/20090220.txt.b7027c6c", "yearn")
finally:
database.rollback()
| 36.028369
| 123
| 0.651378
|
dcba72a3a010c5052b8857a507d8ae1258bb7d77
| 3,090
|
py
|
Python
|
nova/api/openstack/compute/plugins/v3/multinic.py
|
bopopescu/nova-master
|
58809056f3a219c6ea3667003f906eeaf581fa95
|
[
"Apache-2.0"
] | 7
|
2017-06-19T19:37:00.000Z
|
2019-06-16T02:06:14.000Z
|
nova/api/openstack/compute/plugins/v3/multinic.py
|
bopopescu/nova-master
|
58809056f3a219c6ea3667003f906eeaf581fa95
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/multinic.py
|
bopopescu/nova-master
|
58809056f3a219c6ea3667003f906eeaf581fa95
|
[
"Apache-2.0"
] | 6
|
2015-06-20T16:07:28.000Z
|
2020-08-19T14:57:59.000Z
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import multinic
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-multinic"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class MultinicController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(MultinicController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('add_fixed_ip')
@validation.schema(multinic.add_fixed_ip)
def _add_fixed_ip(self, req, id, body):
"""Adds an IP on a given network to an instance."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
network_id = body['add_fixed_ip']['network_id']
self.compute_api.add_fixed_ip(context, instance, network_id)
return webob.Response(status_int=202)
@wsgi.action('remove_fixed_ip')
@validation.schema(multinic.remove_fixed_ip)
def _remove_fixed_ip(self, req, id, body):
"""Removes an IP from an instance."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
address = body['remove_fixed_ip']['address']
try:
self.compute_api.remove_fixed_ip(context, instance, address)
except exception.FixedIpNotFoundForSpecificInstance as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
# Note: The class name is as it has to be for this to be loaded as an
# extension--only first character capitalized.
class Multinic(extensions.V3APIExtensionBase):
"""Multiple network support."""
name = "Multinic"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = MultinicController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| 34.719101
| 79
| 0.694175
|
c3093ad40de216c9d0c67b479f4ade319671ab1e
| 449
|
py
|
Python
|
class/product.py
|
selimppc/python-learning
|
4bf906b4a8773b6605bc30c4270f6eff3cca7fe7
|
[
"MIT"
] | null | null | null |
class/product.py
|
selimppc/python-learning
|
4bf906b4a8773b6605bc30c4270f6eff3cca7fe7
|
[
"MIT"
] | null | null | null |
class/product.py
|
selimppc/python-learning
|
4bf906b4a8773b6605bc30c4270f6eff3cca7fe7
|
[
"MIT"
] | null | null | null |
class Product:
def __init__(self, price):
# behave as regular property price
self.price = price
# adding property decorator to call price.
@property
def price(self):
return self.__price
# price setter
@price.setter
def price(self, value):
if value < 0:
raise ValueError("Price can not be negative.")
self.__price = value
product = Product(50)
print(product.price)
| 20.409091
| 58
| 0.616927
|
fcccc58e663977de19f2456fc7b76c2253b2b6d4
| 1,891
|
py
|
Python
|
python/ntp_code/scapy_wrapper.py
|
ntpdrop/ieeesp2021
|
084ac380774351cb032e9c1f48c5c6f7b58372fa
|
[
"MIT"
] | null | null | null |
python/ntp_code/scapy_wrapper.py
|
ntpdrop/ieeesp2021
|
084ac380774351cb032e9c1f48c5c6f7b58372fa
|
[
"MIT"
] | null | null | null |
python/ntp_code/scapy_wrapper.py
|
ntpdrop/ieeesp2021
|
084ac380774351cb032e9c1f48c5c6f7b58372fa
|
[
"MIT"
] | null | null | null |
from scapy.layers.inet import IP, UDP
from scapy.layers.ntp import NTP
from scapy.packet import Packet
from scapy.sendrecv import sniff, send, sr1
class ScapyWrapper:
"""
A wrapper class for scapy functionality.
"""
def next_ntp_packet(self, sniff_interface) -> Packet:
"""
Sniffs for the next incoming ntp package. This method is blocking
:return: the sniffed package (with OSI layer 3 and 4 still attached).
"""
results = sniff(filter='udp and port 123', count=1, iface=sniff_interface)
pck = (results[0])
return pck
def next_ntp_packet_for_target(self, sniff_interface: str, target_ip_addr: str) -> Packet:
"""
Sniffs for the next incoming ntp package with was send to the specific ip addr. This method is blocking
:return: the sniffed package (with OSI layer 3 and 4 still attached).
"""
results = sniff(filter='udp and dst port 123 and dst ' + str(target_ip_addr), count=1, iface=sniff_interface)
pck = (results[0])
return pck
def send(self, pck: Packet):
"""
Sends the given Scapy Packet without waiting for a response.
:param pck:
:return:
"""
send(pck)
def get_upstream_ntp(self, server_addr: str = 'pool.ntp.org') -> Packet:
request = IP(dst=server_addr) / UDP() / NTP()
response = sr1(request, timeout=2)
return response
def restore_ntp_mitm_pck(self, pck: Packet, sport: int, dst_ip: str):
"""
Prepares a IP()/UDP() packet which was changend by a MITM to be send back to the original sender.
"""
pck = IP(src=pck[IP].dst, dst=dst_ip) / UDP(dport=sport, sport=123) / pck[NTP]
return pck
# pck[UDP].dport = sport
# pck[UDP].sport = 123
# pck[IP].src = pck[IP].dst
# pck[IP].dst = dst_ip
| 35.679245
| 117
| 0.619778
|
515b5ee66d18565f1e177d8dae67fc4605459961
| 6,188
|
py
|
Python
|
plugins/modules/oci_waas_captchas_facts.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_waas_captchas_facts.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_waas_captchas_facts.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_captchas_facts
short_description: Fetches details about one or multiple Captchas resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Captchas resources in Oracle Cloud Infrastructure
- Gets the list of currently configured CAPTCHA challenges in the Web
Application Firewall configuration of a WAAS policy.
- The order of the CAPTCHA challenges is important. The URL for each
CAPTCHA will be checked in the order they are created.
version_added: "2.9"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List captchas
oci_waas_captchas_facts:
waas_policy_id: ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
captchas:
description:
- List of Captchas resources
returned: on success
type: complex
contains:
url:
description:
- The unique URL path at which to show the CAPTCHA challenge.
returned: on success
type: string
sample: url_example
session_expiration_in_seconds:
description:
- The amount of time before the CAPTCHA expires, in seconds. If unspecified, defaults to `300`.
returned: on success
type: int
sample: 56
title:
description:
- The title used when displaying a CAPTCHA challenge. If unspecified, defaults to `Are you human?`
returned: on success
type: string
sample: title_example
header_text:
description:
- The text to show in the header when showing a CAPTCHA challenge. If unspecified, defaults to 'We have detected an increased number of attempts
to access this website. To help us keep this site secure, please let us know that you are not a robot by entering the text from the image
below.'
returned: on success
type: string
sample: header_text_example
footer_text:
description:
- The text to show in the footer when showing a CAPTCHA challenge. If unspecified, defaults to 'Enter the letters and numbers as they are shown
in the image above.'
returned: on success
type: string
sample: footer_text_example
failure_message:
description:
- The text to show when incorrect CAPTCHA text is entered. If unspecified, defaults to `The CAPTCHA was incorrect. Try again.`
returned: on success
type: string
sample: failure_message_example
submit_label:
description:
- The text to show on the label of the CAPTCHA challenge submit button. If unspecified, defaults to `Yes, I am human`.
returned: on success
type: string
sample: submit_label_example
sample: [{
"url": "url_example",
"session_expiration_in_seconds": 56,
"title": "title_example",
"header_text": "header_text_example",
"footer_text": "footer_text_example",
"failure_message": "failure_message_example",
"submit_label": "submit_label_example"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class CaptchasFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"waas_policy_id",
]
def list_resources(self):
optional_list_method_params = []
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_captchas,
waas_policy_id=self.module.params.get("waas_policy_id"),
**optional_kwargs
)
CaptchasFactsHelperCustom = get_custom_class("CaptchasFactsHelperCustom")
class ResourceFactsHelper(CaptchasFactsHelperCustom, CaptchasFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict(waas_policy_id=dict(type="str", required=True),))
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="captchas",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(captchas=result)
if __name__ == "__main__":
main()
| 33.448649
| 160
| 0.666613
|
acd731ca79dcf6e3169b9f8777339343d8b265a7
| 813
|
py
|
Python
|
movies/movies/urls.py
|
Mpreyzner/django-movies
|
b578285ee7717c39383f6a07b5b0b46a4d66c617
|
[
"MIT"
] | null | null | null |
movies/movies/urls.py
|
Mpreyzner/django-movies
|
b578285ee7717c39383f6a07b5b0b46a4d66c617
|
[
"MIT"
] | null | null | null |
movies/movies/urls.py
|
Mpreyzner/django-movies
|
b578285ee7717c39383f6a07b5b0b46a4d66c617
|
[
"MIT"
] | null | null | null |
"""movies URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('api.urls')),
]
| 35.347826
| 77
| 0.698647
|
ac0f8f3fee6a526e1c440612498bff13e374dbb7
| 10,261
|
py
|
Python
|
tests/handling/daemons/test_daemon_termination.py
|
bradfair/kopf
|
06d00944be947bf63a56b10fd4fdc3269d48feb5
|
[
"MIT"
] | 855
|
2020-08-19T09:40:38.000Z
|
2022-03-31T19:13:29.000Z
|
tests/handling/daemons/test_daemon_termination.py
|
bradfair/kopf
|
06d00944be947bf63a56b10fd4fdc3269d48feb5
|
[
"MIT"
] | 715
|
2019-12-23T14:17:35.000Z
|
2022-03-30T20:54:45.000Z
|
tests/handling/daemons/test_daemon_termination.py
|
bradfair/kopf
|
06d00944be947bf63a56b10fd4fdc3269d48feb5
|
[
"MIT"
] | 97
|
2019-04-25T09:32:54.000Z
|
2022-03-30T10:15:30.000Z
|
import asyncio
import contextlib
import logging
import pytest
import kopf
async def test_daemon_exits_gracefully_and_instantly_on_resource_deletion(
settings, resource, dummy, simulate_cycle,
caplog, assert_logs, k8s_mocked, frozen_time, mocker, timer):
caplog.set_level(logging.DEBUG)
# A daemon-under-test.
@kopf.daemon(*resource, id='fn')
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
await kwargs['stopped'].wait()
# 0th cycle: trigger spawning and wait until ready. Assume the finalizers are already added.
finalizer = settings.persistence.finalizer
event_object = {'metadata': {'finalizers': [finalizer]}}
await simulate_cycle(event_object)
await dummy.steps['called'].wait()
# 1st stage: trigger termination due to resource deletion.
mocker.resetall()
event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'})
await simulate_cycle(event_object)
# Check that the daemon has exited near-instantly, with no delays.
with timer:
await dummy.wait_for_daemon_done()
assert timer.seconds < 0.01 # near-instantly
assert k8s_mocked.sleep.call_count == 0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == []
async def test_daemon_exits_gracefully_and_instantly_on_operator_exiting(
settings, resource, dummy, simulate_cycle, background_daemon_killer,
caplog, assert_logs, k8s_mocked, frozen_time, mocker, timer):
caplog.set_level(logging.DEBUG)
# A daemon-under-test.
@kopf.daemon(*resource, id='fn')
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
await kwargs['stopped'].wait()
# 0th cycle: trigger spawning and wait until ready. Assume the finalizers are already added.
finalizer = settings.persistence.finalizer
event_object = {'metadata': {'finalizers': [finalizer]}}
await simulate_cycle(event_object)
await dummy.steps['called'].wait()
# 1st stage: trigger termination due to operator exiting.
mocker.resetall()
background_daemon_killer.cancel()
# Check that the daemon has exited near-instantly, with no delays.
with timer:
await dummy.wait_for_daemon_done()
assert timer.seconds < 0.01 # near-instantly
assert k8s_mocked.sleep.call_count == 0
assert k8s_mocked.patch.call_count == 0
# To prevent double-cancelling of the scheduler's system tasks in the fixture, let them finish:
with contextlib.suppress(asyncio.CancelledError):
await background_daemon_killer
@pytest.mark.usefixtures('background_daemon_killer')
async def test_daemon_exits_gracefully_and_instantly_on_operator_pausing(
settings, memories, resource, dummy, simulate_cycle, conflicts_found,
caplog, assert_logs, k8s_mocked, frozen_time, mocker, timer):
caplog.set_level(logging.DEBUG)
# A daemon-under-test.
@kopf.daemon(*resource, id='fn')
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
await kwargs['stopped'].wait()
# 0th cycle: trigger spawning and wait until ready. Assume the finalizers are already added.
finalizer = settings.persistence.finalizer
event_object = {'metadata': {'finalizers': [finalizer]}}
await simulate_cycle(event_object)
await dummy.steps['called'].wait()
# 1st stage: trigger termination due to the operator's pause.
mocker.resetall()
await conflicts_found.turn_to(True)
# Check that the daemon has exited near-instantly, with no delays.
with timer:
await dummy.wait_for_daemon_done()
assert timer.seconds < 0.01 # near-instantly
# There is no way to test for re-spawning here: it is done by watch-events,
# which are tested by the paused operators elsewhere (test_daemon_spawning.py).
# We only test that it is capable for respawning (not forever-stopped):
memory = await memories.recall(event_object)
assert not memory.daemons_memory.forever_stopped
async def test_daemon_exits_instantly_via_cancellation_with_backoff(
settings, resource, dummy, simulate_cycle,
caplog, assert_logs, k8s_mocked, frozen_time, mocker):
caplog.set_level(logging.DEBUG)
dummy.steps['finish'].set()
# A daemon-under-test.
@kopf.daemon(*resource, id='fn', cancellation_backoff=5, cancellation_timeout=10)
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
try:
await asyncio.Event().wait() # this one is cancelled.
except asyncio.CancelledError:
await dummy.steps['finish'].wait() # simulated slow (non-instant) exiting.
# Trigger spawning and wait until ready. Assume the finalizers are already added.
finalizer = settings.persistence.finalizer
event_object = {'metadata': {'finalizers': [finalizer]}}
await simulate_cycle(event_object)
await dummy.steps['called'].wait()
# 1st stage: trigger termination due to resource deletion. Wait for backoff.
mocker.resetall()
event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'})
await simulate_cycle(event_object)
assert k8s_mocked.sleep.call_count == 1
assert k8s_mocked.sleep.call_args_list[0][0][0] == 5.0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy']
# 2nd cycle: cancelling after the backoff is reached. Wait for cancellation timeout.
mocker.resetall()
frozen_time.tick(5) # backoff time or slightly above it
await simulate_cycle(event_object)
assert k8s_mocked.sleep.call_count == 0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == []
# Cleanup.
await dummy.wait_for_daemon_done()
async def test_daemon_exits_slowly_via_cancellation_with_backoff(
settings, resource, dummy, simulate_cycle,
caplog, assert_logs, k8s_mocked, frozen_time, mocker):
caplog.set_level(logging.DEBUG)
# A daemon-under-test.
@kopf.daemon(*resource, id='fn', cancellation_backoff=5, cancellation_timeout=10)
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
try:
await asyncio.Event().wait() # this one is cancelled.
except asyncio.CancelledError:
await dummy.steps['finish'].wait() # simulated slow (non-instant) exiting.
# Trigger spawning and wait until ready. Assume the finalizers are already added.
finalizer = settings.persistence.finalizer
event_object = {'metadata': {'finalizers': [finalizer]}}
await simulate_cycle(event_object)
await dummy.steps['called'].wait()
# 1st stage: trigger termination due to resource deletion. Wait for backoff.
mocker.resetall()
event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'})
await simulate_cycle(event_object)
assert k8s_mocked.sleep.call_count == 1
assert k8s_mocked.sleep.call_args_list[0][0][0] == 5.0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy']
# 2nd cycle: cancelling after the backoff is reached. Wait for cancellation timeout.
mocker.resetall()
frozen_time.tick(5) # backoff time or slightly above it
await simulate_cycle(event_object)
assert k8s_mocked.sleep.call_count == 1
assert k8s_mocked.sleep.call_args_list[0][0][0] == 10.0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy']
# 3rd cycle: the daemon has exited, the resource should be unblocked from actual deletion.
mocker.resetall()
frozen_time.tick(1) # any time below timeout
dummy.steps['finish'].set()
await asyncio.sleep(0)
await simulate_cycle(event_object)
await dummy.wait_for_daemon_done()
assert k8s_mocked.sleep.call_count == 0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == []
async def test_daemon_is_abandoned_due_to_cancellation_timeout_reached(
settings, resource, dummy, simulate_cycle,
caplog, assert_logs, k8s_mocked, frozen_time, mocker):
caplog.set_level(logging.DEBUG)
# A daemon-under-test.
@kopf.daemon(*resource, id='fn', cancellation_timeout=10)
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
try:
await dummy.steps['finish'].wait() # this one is cancelled.
except asyncio.CancelledError:
await dummy.steps['finish'].wait() # simulated disobedience to be cancelled.
# 0th cycle:tTrigger spawning and wait until ready. Assume the finalizers are already added.
finalizer = settings.persistence.finalizer
event_object = {'metadata': {'finalizers': [finalizer]}}
await simulate_cycle(event_object)
await dummy.steps['called'].wait()
# 1st stage: trigger termination due to resource deletion. Wait for backoff.
mocker.resetall()
event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'})
await simulate_cycle(event_object)
assert k8s_mocked.sleep.call_count == 1
assert k8s_mocked.sleep.call_args_list[0][0][0] == 10.0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy']
# 2rd cycle: the daemon has exited, the resource should be unblocked from actual deletion.
mocker.resetall()
frozen_time.tick(50)
with pytest.warns(ResourceWarning, match=r"Daemon .+ did not exit in time"):
await simulate_cycle(event_object)
assert k8s_mocked.sleep.call_count == 0
assert k8s_mocked.patch.call_count == 1
assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == []
assert_logs(["Daemon 'fn' did not exit in time. Leaving it orphaned."])
# Cleanup.
dummy.steps['finish'].set()
await dummy.wait_for_daemon_done()
| 39.92607
| 99
| 0.704025
|
478e2b82f6b7001f0bbc41ed94838719a3d91096
| 3,218
|
py
|
Python
|
sandbox/apps/python/stencil/jacobi2D/app_tuner.py
|
rcodin/polymage
|
653487be125dec4950d1c65da4f736fa05fb938f
|
[
"Apache-2.0"
] | 10
|
2016-07-22T06:53:11.000Z
|
2021-02-19T06:22:00.000Z
|
sandbox/apps/python/stencil/jacobi2D/app_tuner.py
|
rcodin/polymage
|
653487be125dec4950d1c65da4f736fa05fb938f
|
[
"Apache-2.0"
] | null | null | null |
sandbox/apps/python/stencil/jacobi2D/app_tuner.py
|
rcodin/polymage
|
653487be125dec4950d1c65da4f736fa05fb938f
|
[
"Apache-2.0"
] | 2
|
2017-11-21T20:29:36.000Z
|
2021-05-21T01:52:05.000Z
|
from __init__ import *
import sys
sys.path.insert(0, ROOT+"/apps/python/")
from cpp_compiler import *
from polymage_jacobi import stencil_jacobi
from exec_pipe import custom_exec_jacobi
from constructs import *
from compiler import *
import tuner
def auto_tune(app_data):
pipe_data = app_data['pipe_data']
app_name = app_data['app']
stencil = stencil_jacobi(app_data)
live_outs = [stencil]
N = pipe_data['N']
param_estimates = [(N, app_data['N'])]
param_constraints = [ Condition(N, '==', app_data['N']) ]
dst_path = "/tmp"
group_size_configs = [2, 4, 6, 8]
tile_size_configs = []
tile_size_configs.append([8, 32])
tile_size_configs.append([8, 64])
tile_size_configs.append([8, 128])
tile_size_configs.append([8, 256])
tile_size_configs.append([8, 512])
tile_size_configs.append([16, 64])
tile_size_configs.append([16, 128])
tile_size_configs.append([16, 256])
tile_size_configs.append([16, 512])
tile_size_configs.append([32, 64])
tile_size_configs.append([32, 128])
tile_size_configs.append([32, 256])
tile_size_configs.append([32, 512])
tile_size_configs.append([64, 128])
tile_size_configs.append([64, 256])
# relative path to root directory from app dir
ROOT = app_data['ROOT']
opts = []
if app_data['early_free']:
opts += ['early_free']
if app_data['optimize_storage']:
opts += ['optimize_storage']
if app_data['pool_alloc']:
opts += ['pool_alloc']
if app_data['multipar']:
opts += ['multipar']
gen_compile_string(app_data)
cxx_string = app_data['cxx_string']
# Generate Variants for Tuning
# ============================
gen_config = {"_tuner_app_name": app_name,
"_tuner_live_outs": live_outs,
"_tuner_param_constraints": param_constraints, #optional
"_tuner_param_estimates": param_estimates, #optional
"_tuner_tile_size_configs": tile_size_configs, #optional
"_tuner_group_size_configs": group_size_configs, #optional
"_tuner_opts": opts, #optional
"_tuner_dst_path" : dst_path, # optional
"_tuner_cxx_string" : cxx_string, # optional
"_tuner_root_path" : ROOT, # needed if pool_alloc is set
"_tuner_debug_flag": True, # optional
"_tuner_opt_datadict": app_data
}
_tuner_src_path, _tuner_configs_count, _tuner_pipe = \
tuner.generate(gen_config)
# Execute the generated variants
# ==============================
exec_config = {"_tuner_app_name": app_name,
"_tuner_pipe": _tuner_pipe,
"_tuner_src_path": _tuner_src_path, # optional
"_tuner_configs_count": _tuner_configs_count, # optional
"_tuner_omp_threads": 4, # optional
"_tuner_nruns": 1, # optional
"_tuner_debug_flag": True, # optional
"_tuner_custom_executor": custom_exec_jacobi,
"_tuner_app_data": app_data
}
tuner.execute(exec_config)
| 32.505051
| 76
| 0.610938
|
9b43144bf2bfddba615215c1bc3dba435056647f
| 914
|
py
|
Python
|
kernel/blog/migrations/0008_auto_20180605_2110.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
kernel/blog/migrations/0008_auto_20180605_2110.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
kernel/blog/migrations/0008_auto_20180605_2110.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-05 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20180605_2105'),
]
operations = [
migrations.AlterField(
model_name='news',
name='sku',
field=models.CharField(default='Qbwv-rsmLnQ', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='newsmovies',
name='sku',
field=models.CharField(default='tnOIclcIgNI', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='newsphotos',
name='sku',
field=models.CharField(default='QNjsuMgrQOU', help_text='Unique code for refrence to supervisors', max_length=15),
),
]
| 31.517241
| 126
| 0.615974
|
f2f97eff60a83c945a682155d09d706b652b68a5
| 734
|
py
|
Python
|
hummingbot/strategy/__utils__/trailing_indicators/historical_volatility.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 3,027
|
2019-04-04T18:52:17.000Z
|
2022-03-30T09:38:34.000Z
|
hummingbot/strategy/__utils__/trailing_indicators/historical_volatility.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 4,080
|
2019-04-04T19:51:11.000Z
|
2022-03-31T23:45:21.000Z
|
hummingbot/strategy/__utils__/trailing_indicators/historical_volatility.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1,342
|
2019-04-04T20:50:53.000Z
|
2022-03-31T15:22:36.000Z
|
from .base_trailing_indicator import BaseTrailingIndicator
import numpy as np
class HistoricalVolatilityIndicator(BaseTrailingIndicator):
def __init__(self, sampling_length: int = 30, processing_length: int = 15):
super().__init__(sampling_length, processing_length)
def _indicator_calculation(self) -> float:
prices = self._sampling_buffer.get_as_numpy_array()
if prices.size > 0:
log_returns = np.diff(np.log(prices))
return np.var(log_returns)
def _processing_calculation(self) -> float:
processing_array = self._processing_buffer.get_as_numpy_array()
if processing_array.size > 0:
return np.sqrt(np.mean(np.nan_to_num(processing_array)))
| 38.631579
| 79
| 0.717984
|
5d57940dc33e5253f9b78389558de900624c042a
| 1,860
|
py
|
Python
|
pytorch-frontend/caffe2/python/operator_test/trigonometric_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
pytorch-frontend/caffe2/python/operator_test/trigonometric_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
pytorch-frontend/caffe2/python/operator_test/trigonometric_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
class TestTrigonometricOp(serial.SerializedTestCase):
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_asin(self, X, gc, dc):
self.assertTrigonometricChecks("Asin", X, lambda x: (np.arcsin(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-100, max_value=100)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_atan(self, X, gc, dc):
self.assertTrigonometricChecks("Atan", X, lambda x: (np.arctan(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.5, max_value=0.5)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_tan(self, X, gc, dc):
self.assertTrigonometricChecks("Tan", X, lambda x: (np.tan(X),), gc, dc)
def assertTrigonometricChecks(self, op_name, input, reference, gc, dc):
op = core.CreateOperator(op_name, ["X"], ["Y"])
self.assertReferenceChecks(gc, op, [input], reference)
self.assertDeviceChecks(dc, op, [input], [0])
self.assertGradientChecks(gc, op, [input], 0, [0])
if __name__ == "__main__":
unittest.main()
| 35.09434
| 84
| 0.676882
|
98b8653e5ebf2e409799bcf8d7310e8127c4a76e
| 2,668
|
py
|
Python
|
tutorial-contents/302_classification.py
|
violaciao/PyTorch-Tutorial
|
b2922fd3f3c8d49be3cae275375a88e844278d2a
|
[
"MIT"
] | null | null | null |
tutorial-contents/302_classification.py
|
violaciao/PyTorch-Tutorial
|
b2922fd3f3c8d49be3cae275375a88e844278d2a
|
[
"MIT"
] | null | null | null |
tutorial-contents/302_classification.py
|
violaciao/PyTorch-Tutorial
|
b2922fd3f3c8d49be3cae275375a88e844278d2a
|
[
"MIT"
] | 1
|
2019-07-15T03:10:29.000Z
|
2019-07-15T03:10:29.000Z
|
"""
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
Dependencies:
torch: 0.1.11
matplotlib
"""
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
torch.manual_seed(1) # reproducible
# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
# torch can only train on Variable, so convert them to Variable
x, y = Variable(x), Variable(y)
# plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
# plt.show()
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.out(x)
return x
net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
print(net) # net architecture
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
plt.ion() # something about plotting
for t in range(100):
out = net(x) # input x and predict based on x
loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 2 == 0:
# plot and show learning process
plt.cla()
prediction = torch.max(F.softmax(out), 1)[1]
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/200
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
| 37.577465
| 104
| 0.636807
|
a7da7e2e1e20960405db827c80352a66550e0abd
| 3,981
|
py
|
Python
|
src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/disk.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/disk.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | 2
|
2021-03-25T21:38:56.000Z
|
2021-11-15T17:46:45.000Z
|
src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/disk.py
|
Visual-Studio-China/azure-cli-int
|
48c7c7f371a0ecc4ebfd4dcfdc72764beddf5c31
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# coding: utf-8
# pylint: skip-file
from msrest.serialization import Model
class Disk(Model):
"""A Disk.
:param disk_type: The storage type for the disk (i.e. Standard, Premium).
Possible values include: 'Standard', 'Premium'
:type disk_type: str or :class:`StorageType
<azure.mgmt.devtestlabs.models.StorageType>`
:param disk_size_gi_b: The size of the disk in GibiBytes.
:type disk_size_gi_b: int
:param leased_by_lab_vm_id: The resource ID of the VM to which this disk
is leased.
:type leased_by_lab_vm_id: str
:param disk_blob_name: When backed by a blob, the name of the VHD blob
without extension.
:type disk_blob_name: str
:param disk_uri: When backed by a blob, the URI of underlying blob.
:type disk_uri: str
:param created_date: The creation date of the disk.
:type created_date: datetime
:param host_caching: The host caching policy of the disk (i.e. None,
ReadOnly, ReadWrite).
:type host_caching: str
:param managed_disk_id: When backed by managed disk, this is the ID of the
compute disk resource.
:type managed_disk_id: str
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
:param id: The identifier of the resource.
:type id: str
:param name: The name of the resource.
:type name: str
:param type: The type of the resource.
:type type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict
"""
_attribute_map = {
'disk_type': {'key': 'properties.diskType', 'type': 'str'},
'disk_size_gi_b': {'key': 'properties.diskSizeGiB', 'type': 'int'},
'leased_by_lab_vm_id': {'key': 'properties.leasedByLabVmId', 'type': 'str'},
'disk_blob_name': {'key': 'properties.diskBlobName', 'type': 'str'},
'disk_uri': {'key': 'properties.diskUri', 'type': 'str'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
'host_caching': {'key': 'properties.hostCaching', 'type': 'str'},
'managed_disk_id': {'key': 'properties.managedDiskId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, disk_type=None, disk_size_gi_b=None, leased_by_lab_vm_id=None, disk_blob_name=None, disk_uri=None, created_date=None, host_caching=None, managed_disk_id=None, provisioning_state=None, unique_identifier=None, id=None, name=None, type=None, location=None, tags=None):
self.disk_type = disk_type
self.disk_size_gi_b = disk_size_gi_b
self.leased_by_lab_vm_id = leased_by_lab_vm_id
self.disk_blob_name = disk_blob_name
self.disk_uri = disk_uri
self.created_date = created_date
self.host_caching = host_caching
self.managed_disk_id = managed_disk_id
self.provisioning_state = provisioning_state
self.unique_identifier = unique_identifier
self.id = id
self.name = name
self.type = type
self.location = location
self.tags = tags
| 45.758621
| 288
| 0.632002
|
ea2bb504948534f70dfd8c64176136a0d48e6120
| 400
|
py
|
Python
|
50down/titleToNumber.py
|
NeroCube/leetcode-python-practice
|
c173add1bd66c385de70f19bc005a635c6584f20
|
[
"MIT"
] | null | null | null |
50down/titleToNumber.py
|
NeroCube/leetcode-python-practice
|
c173add1bd66c385de70f19bc005a635c6584f20
|
[
"MIT"
] | null | null | null |
50down/titleToNumber.py
|
NeroCube/leetcode-python-practice
|
c173add1bd66c385de70f19bc005a635c6584f20
|
[
"MIT"
] | null | null | null |
'''
Excel Sheet Column Number
題目:
將 Excel 中的標題轉換成十進制的數字
想法:
- Excel 中 Z 代表26,AA 代表27,AB 代表28
- 所以是一個26近制變10進制的轉換(餘數定理)
- ord('A') = 65
Time: O(n)
Space: O(1)
'''
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
for char in s:
result = result*26 + ord(char) - 64
return result
| 16
| 47
| 0.54
|
6def40abd0ce21874cd9f4c2047f01a4e10a3fb3
| 7,608
|
py
|
Python
|
core/polyaxon/utils/date_utils.py
|
admariner/polyaxon
|
ba355c38166047eb11e60de4cee4d7c3b48db323
|
[
"Apache-2.0"
] | 3,200
|
2017-05-09T11:35:31.000Z
|
2022-03-28T05:43:22.000Z
|
core/polyaxon/utils/date_utils.py
|
admariner/polyaxon
|
ba355c38166047eb11e60de4cee4d7c3b48db323
|
[
"Apache-2.0"
] | 1,324
|
2017-06-29T07:21:27.000Z
|
2022-03-27T12:41:10.000Z
|
core/polyaxon/utils/date_utils.py
|
admariner/polyaxon
|
ba355c38166047eb11e60de4cee4d7c3b48db323
|
[
"Apache-2.0"
] | 341
|
2017-01-10T23:06:53.000Z
|
2022-03-10T08:15:18.000Z
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytz
from datetime import date, datetime, timedelta
from dateutil import parser as dt_parser
from polyaxon.exceptions import PolyaxonDateTimeFormatterException
epoch = datetime(1970, 1, 1, tzinfo=pytz.utc)
def parse_datetime(value):
if isinstance(value, str):
return dt_parser.parse(value)
return value
def to_timestamp(value):
"""
Convert a time zone aware datetime to a POSIX timestamp (with fractional component.)
"""
value = parse_datetime(value)
return (value - epoch).total_seconds()
def to_datetime(value):
"""
Convert a POSIX timestamp to a time zone aware datetime.
The timestamp value must be a numeric type (either a integer or float,
since it may contain a fractional component.)
"""
return epoch + timedelta(seconds=value)
def path_last_modified(filepath: str) -> datetime:
return to_datetime(os.stat(filepath).st_mtime)
def file_modified_since(filepath: str, last_time: datetime) -> bool:
if not last_time:
return True
return path_last_modified(filepath) > last_time
class DateTimeFormatter:
"""
The `DateTimeFormatter` class implements a utility used to create
timestamps from strings and vice-versa.
"""
DATE_FORMAT = "%Y-%m-%d"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATETIME_HOUR_FORMAT = "%Y-%m-%d %H:%M"
@classmethod
def format_date(cls, timestamp):
"""
Creates a string representing the date information provided by the
given `timestamp` object.
"""
if not timestamp:
raise PolyaxonDateTimeFormatterException(
"timestamp must a valid string {}".format(timestamp)
)
return timestamp.strftime(cls.DATE_FORMAT)
@classmethod
def format_datetime(cls, timestamp):
"""
Creates a string representing the date and time information provided by
the given `timestamp` object.
"""
if not timestamp:
raise PolyaxonDateTimeFormatterException(
"timestamp must a valid string {}".format(timestamp)
)
return timestamp.strftime(cls.DATETIME_FORMAT)
@classmethod
def extract_date(cls, date_str, timezone):
"""
Tries to extract a `datetime` object from the given string, expecting
date information only.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not date_str:
raise PolyaxonDateTimeFormatterException(
"date_str must a valid string {}.".format(date_str)
)
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
try:
return cls.extract_iso_timestamp(date_str, timezone=timezone)
except (TypeError, ValueError, AttributeError):
pass
try:
return cls.extract_timestamp(date_str, cls.DATE_FORMAT, timezone=timezone)
except (TypeError, ValueError):
raise PolyaxonDateTimeFormatterException(
"Invalid date string {}.".format(date_str)
)
@classmethod
def extract_datetime(cls, datetime_str, timezone):
"""
Tries to extract a `datetime` object from the given string, including
time information.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not datetime_str:
raise PolyaxonDateTimeFormatterException("datetime_str must a valid string")
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
try:
return cls.extract_iso_timestamp(datetime_str, timezone=timezone)
except (TypeError, ValueError, AttributeError):
pass
try:
return cls.extract_timestamp(
datetime_str, cls.DATETIME_FORMAT, timezone=timezone
)
except (TypeError, ValueError):
raise PolyaxonDateTimeFormatterException(
"Invalid datetime string {}.".format(datetime_str)
)
@classmethod
def extract_datetime_hour(cls, datetime_str, timezone):
"""
Tries to extract a `datetime` object from the given string, including only hours.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not datetime_str:
raise PolyaxonDateTimeFormatterException("datetime_str must a valid string")
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
try:
return cls.extract_iso_timestamp(datetime_str, timezone=timezone)
except (TypeError, ValueError, AttributeError):
pass
try:
return cls.extract_timestamp(
datetime_str, cls.DATETIME_HOUR_FORMAT, timezone=timezone
)
except (TypeError, ValueError):
raise PolyaxonDateTimeFormatterException(
"Invalid datetime string {}.".format(datetime_str)
)
@classmethod
def extract(cls, timestamp_str, timezone):
"""
Tries to extract a `datetime` object from the given string. First the
datetime format is tried, if it fails, the date format is used for
extraction.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not timestamp_str:
raise PolyaxonDateTimeFormatterException(
"timestamp_str must a valid string, received {}".format(timestamp_str)
)
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
if isinstance(timestamp_str, (date, datetime)):
return timestamp_str
try:
return cls.extract_datetime(timestamp_str, timezone=timezone)
except PolyaxonDateTimeFormatterException:
pass
try:
return cls.extract_datetime_hour(timestamp_str, timezone=timezone)
except PolyaxonDateTimeFormatterException:
pass
# We leave it to raise
return cls.extract_date(timestamp_str, timezone=timezone)
@staticmethod
def extract_iso_timestamp(timestamp_str, timezone):
timestamp = datetime.fromisoformat(timestamp_str)
if not timestamp.tzinfo and timezone:
timestamp = timestamp.replace(tzinfo=pytz.timezone(timezone))
return timestamp
@staticmethod
def extract_timestamp(timestamp_str, dt_format, timezone):
timestamp = datetime.strptime(timestamp_str, dt_format)
timestamp = timestamp.replace(tzinfo=pytz.timezone(timezone))
return timestamp
| 32.512821
| 89
| 0.65234
|
637163b866d80d5aef1e3c2f9a2ba34caca86ba0
| 23
|
py
|
Python
|
donation_play/games/gothic_2/__init__.py
|
tmarenko/donation-play
|
548a87af48245c8fde7daf95915af606ba6c456e
|
[
"MIT"
] | null | null | null |
donation_play/games/gothic_2/__init__.py
|
tmarenko/donation-play
|
548a87af48245c8fde7daf95915af606ba6c456e
|
[
"MIT"
] | null | null | null |
donation_play/games/gothic_2/__init__.py
|
tmarenko/donation-play
|
548a87af48245c8fde7daf95915af606ba6c456e
|
[
"MIT"
] | null | null | null |
from .cheater import *
| 11.5
| 22
| 0.73913
|
d95b7573a3f7d66827a6e71825f2259eacf4bcdb
| 10,239
|
py
|
Python
|
mmdnn/conversion/caffe/mapper.py
|
trishitapingolia/MMdnn
|
80f7a46774c2b76dc157030c52a86a0be7595739
|
[
"MIT"
] | 1
|
2021-12-07T01:27:04.000Z
|
2021-12-07T01:27:04.000Z
|
mmdnn/conversion/caffe/mapper.py
|
Ontheroad123/MMdnn
|
80f7a46774c2b76dc157030c52a86a0be7595739
|
[
"MIT"
] | null | null | null |
mmdnn/conversion/caffe/mapper.py
|
Ontheroad123/MMdnn
|
80f7a46774c2b76dc157030c52a86a0be7595739
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
import numpy as np
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.caffe.common_graph import Node
from mmdnn.conversion.caffe.network import DEFAULT_PADDING
from mmdnn.conversion.caffe.utils import get_lower_case
from mmdnn.conversion.common.IR.graph_pb2 import TensorShape
def get_handler_name(node_kind):
return node_kind.lower() if len(node_kind) <= 4 else get_lower_case(node_kind)
class NodeMapper(object):
@classmethod
def _convert_output_shape(cls, kwargs, node):
shape = TensorShape()
dim = shape.dim.add()
dim.size = -1
if len(node.output_shape) > 2:
for i in node.output_shape[2:]:
dim = shape.dim.add()
dim.size = i
dim = shape.dim.add()
dim.size = node.output_shape.channels
else:
dim = shape.dim.add()
dim.size = node.output_shape[1]
kwargs['_output_shapes'] = [shape]
@classmethod
def get_kernel_params(cls, node, input_shape):
kwargs = {}
if node.kernel_parameters.global_pooling:
kwargs['kernel_shape'] = [1, input_shape.height, input_shape.width, 1]
kwargs['pads'] = [0] * 8
else:
from mmdnn.conversion.caffe.graph import NodeKind
if node.kind == NodeKind.Pooling:
kwargs['kernel_shape'] = [1, node.kernel_parameters.k_h, node.kernel_parameters.k_w, 1]
elif node.kind in [NodeKind.Convolution, NodeKind.Deconvolution]:
pass
else:
raise ValueError
dilation = node.parameters.dilation[0] if hasattr(node.parameters, 'dilation') and node.parameters.dilation else 1
o_h_caffe = node.output_shape.height
o_w_caffe = node.output_shape.width
ko_h = dilation * (int(node.kernel_parameters.k_h) - 1) + 1
ko_w = dilation * (int(node.kernel_parameters.k_w) - 1) + 1
if node.kind == NodeKind.Deconvolution:
o_h_tf = int(node.kernel_parameters.s_h) * (input_shape.height - 1) + ko_h - 2 * int(node.kernel_parameters.p_h)
o_w_tf = int(node.kernel_parameters.s_w) * (input_shape.width - 1) + ko_w - 2 * int(node.kernel_parameters.p_w)
else:
o_h_tf = (input_shape.height + node.kernel_parameters.p_h * 2 - ko_h + 1) // node.kernel_parameters.s_h
o_w_tf = (input_shape.width + node.kernel_parameters.p_w * 2 - ko_w + 1) // node.kernel_parameters.s_w
kwargs['pads'] = [0, node.kernel_parameters.p_h, node.kernel_parameters.p_w, 0] + \
[0, node.kernel_parameters.p_h + o_h_caffe - o_h_tf, node.kernel_parameters.p_w + o_w_caffe - o_w_tf, 0]
kwargs['strides'] = [1, node.kernel_parameters.s_h, node.kernel_parameters.s_w, 1]
cls._convert_output_shape(kwargs, node)
return kwargs
@classmethod
def map_data(cls, node):
# TODO: We need to identify whether this is 4D image data, otherwise we shouldn't change the dimension order
shape = TensorShape()
dim = shape.dim.add()
dim.size = -1
for i in node.output_shape[2:]:
dim = shape.dim.add()
dim.size = i
dim = shape.dim.add()
dim.size = node.output_shape.channels
kwargs = {'shape': shape} # Ignore the dimension of batch size
cls._convert_output_shape(kwargs, node)
return Node.create('DataInput', **kwargs)
@classmethod
def map_input(cls, node):
return cls.map_data(node)
@classmethod
def map_convolution(cls, node):
parent, _ = node.get_only_parent()
kwargs = cls.get_kernel_params(node, parent.output_shape)
kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output]
kwargs['use_bias'] = node.parameters.bias_term
if node.parameters.dilation:
dilation = node.parameters.dilation[0]
if dilation != 1:
kwargs['dilations'] = [1, dilation, dilation, 1]
kwargs['group'] = node.parameters.group
return Node.create('Conv', **kwargs)
@classmethod
def map_deconvolution(cls, node):
parent, _ = node.get_only_parent()
kwargs = cls.get_kernel_params(node, parent.output_shape)
kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, node.parameters.num_output, parent.output_shape.channels]
kwargs['use_bias'] = node.parameters.bias_term
if node.parameters.dilation:
dilation = node.parameters.dilation[0]
if dilation != 1:
kwargs['dilations'] = [1, dilation, dilation, 1]
kwargs['group'] = node.parameters.group
return Node.create('ConvTranspose', **kwargs)
@classmethod
def map_crop(cls, node):
kwargs = {}
cls._convert_output_shape(kwargs, node)
offset = node.parameters.offset
if offset:
if len(offset) == 1:
kwargs['border'] = [offset[0], offset[0], 0, 0]
else:
kwargs['border'] = [offset[0], offset[1], 0, 0]
return Node.create('Crop', **kwargs)
@classmethod
def map_relu(cls, node):
kwargs = {}
cls._convert_output_shape(kwargs, node)
return Node.create('Relu', **kwargs)
@classmethod
def map_p_re_lu(cls, node):
# print(node.parameters)
# assert False
try:
scale_value = float(node.parameters.filler.value)
kwargs = {'gamma' : scale_value}
except ConversionError:
kwargs = {'gamma' : 0.25}
cls._convert_output_shape(kwargs, node)
return Node.create('PRelu', **kwargs)
@classmethod
def map_pooling(cls, node):
parent, _ = node.get_only_parent()
kwargs = cls.get_kernel_params(node, parent.output_shape)
if node.parameters.pool == 0:
kwargs['pooling_type'] = 'MAX'
elif node.parameters.pool == 1:
kwargs['pooling_type'] = 'AVG'
else:
# Stochastic pooling, for instance.
raise ConversionError('Unsupported pooling type.')
cls._convert_output_shape(kwargs, node)
return Node.create('Pool', **kwargs)
@classmethod
def _add_flatten_layer(cls, node):
shape = TensorShape()
dim = shape.dim.add()
dim.size = -1
dim = shape.dim.add()
dim.size = 1
for i in node.output_shape[1:]:
dim.size *= i
kwargs = {'_output_shapes' : [shape]}
return Node.create('Flatten', **kwargs)
@classmethod
def map_inner_product(cls, node):
#TODO: Axis
assert node.parameters.axis == 1
#TODO: Unbiased
shape = TensorShape()
dim = shape.dim.add()
dim.size = -1
dim = shape.dim.add()
dim.size = 1
for i in node.output_shape[1:]:
dim.size *= i
kwargs = {'use_bias' : node.parameters.bias_term, 'units' : node.parameters.num_output,
'_output_shapes': [shape]}
# check if need the Flatten layer
parent, _ = node.get_only_parent()
ret = []
# if parent.output_shape.height > 1 or parent.output_shape.width > 1:
ret.append(cls._add_flatten_layer(parent))
ret.append(Node.create('FullyConnected', **kwargs))
return ret
@classmethod
def map_softmax(cls, node):
kwargs = {}
cls._convert_output_shape(kwargs, node)
return Node.create('Softmax', **kwargs)
@classmethod
def map_lrn(cls, node):
params = node.parameters
assert params.local_size % 2 == 1
kwargs = {'size': int((params.local_size + 1) / 2), 'alpha': params.alpha, 'beta': params.beta, 'k' : params.k}
cls._convert_output_shape(kwargs, node)
return Node.create('LRN', **kwargs)
@classmethod
def map_concat(cls, node):
kwargs = {'axis': (2, 3, 1, 0)[node.parameters.axis]}
cls._convert_output_shape(kwargs, node)
return Node.create('Concat', **kwargs)
@classmethod
def map_dropout(cls, node):
kwargs = {'keep_prob': node.parameters.dropout_ratio}
cls._convert_output_shape(kwargs, node)
return Node.create('Dropout', **kwargs)
@classmethod
def map_batch_norm(cls, node):
kwargs = {'scale' : len(node.data) >= 3, 'bias' : len(node.data) == 4}
epsilon = node.parameters.eps
kwargs['epsilon'] = epsilon
cls._convert_output_shape(kwargs, node)
return Node.create('BatchNorm', **kwargs)
@classmethod
def map_scale(cls, node):
raise NotImplementedError
# TODO: The gamma parameter has to be set (in node.data?) and this should work.
# Also, mean should be set to 0, and var to 1, just to be safe.
scale_value = float(node.parameters.filler.value)
kwargs = {'scale' : True, 'bias' : False, 'gamma' : scale_value, 'epsilon': 0}
return Node.create('BatchNorm', **kwargs)
@classmethod
def map_eltwise(cls, node):
operations = {0: 'Mul', 1: 'Add', 2: 'Max'}
op_code = node.parameters.operation
try:
return Node.create(operations[op_code])
except KeyError:
raise ConversionError('Unknown elementwise operation: {}'.format(op_code))
@classmethod
def map_abs_val(cls, node):
return Node.create('Abs')
@classmethod
def map_tanh(cls, node):
return Node.create('Tanh')
@classmethod
def map_sigmoid(cls, node):
return Node.create('Sigmoid')
@classmethod
def map_reshape(cls, node):
kwargs = {'shape' : [dim for dim in node.output_shape]}
cls._convert_output_shape(kwargs, node)
return Node.create('Reshape', **kwargs)
@classmethod
def map_flatten(cls, node):
return cls._add_flatten_layer(node)
@classmethod
def map_split(cls, node):
# skip the split node
return
| 35.552083
| 147
| 0.611974
|
d51fbb6bdc30da844ea224f9dc34f5465ff4843f
| 17,224
|
py
|
Python
|
adafruit_lsm9ds1.py
|
Flodip/Adafruit_CircuitPython_LSM9DS1
|
e5ca448cb2009aa82eb557c7a73dc1317cf9b657
|
[
"MIT"
] | null | null | null |
adafruit_lsm9ds1.py
|
Flodip/Adafruit_CircuitPython_LSM9DS1
|
e5ca448cb2009aa82eb557c7a73dc1317cf9b657
|
[
"MIT"
] | null | null | null |
adafruit_lsm9ds1.py
|
Flodip/Adafruit_CircuitPython_LSM9DS1
|
e5ca448cb2009aa82eb557c7a73dc1317cf9b657
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2017 Tony DiCola for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_lsm9ds1`
====================================================
CircuitPython module for the LSM9DS1 accelerometer, magnetometer, gyroscope.
Based on the driver from:
https://github.com/adafruit/Adafruit_LSM9DS1
See examples/simpletest.py for a demo of the usage.
* Author(s): Tony DiCola
Implementation Notes
--------------------
**Hardware:**
* Adafruit `9-DOF Accel/Mag/Gyro+Temp Breakout Board - LSM9DS1
<https://www.adafruit.com/product/3387>`_ (Product ID: 3387)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_LSM9DS1.git"
import time
try:
import struct
except ImportError:
import ustruct as struct
import adafruit_bus_device.i2c_device as i2c_device
import adafruit_bus_device.spi_device as spi_device
from micropython import const
# Internal constants and register values:
_LSM9DS1_ADDRESS_ACCELGYRO = const(0x6B)
_LSM9DS1_ADDRESS_MAG = const(0x1E)
_LSM9DS1_XG_ID = const(0b01101000)
_LSM9DS1_MAG_ID = const(0b00111101)
_LSM9DS1_ACCEL_MG_LSB_2G = 0.061
_LSM9DS1_ACCEL_MG_LSB_4G = 0.122
_LSM9DS1_ACCEL_MG_LSB_8G = 0.244
_LSM9DS1_ACCEL_MG_LSB_16G = 0.732
_LSM9DS1_MAG_MGAUSS_4GAUSS = 0.14
_LSM9DS1_MAG_MGAUSS_8GAUSS = 0.29
_LSM9DS1_MAG_MGAUSS_12GAUSS = 0.43
_LSM9DS1_MAG_MGAUSS_16GAUSS = 0.58
_LSM9DS1_GYRO_DPS_DIGIT_245DPS = 0.00875
_LSM9DS1_GYRO_DPS_DIGIT_500DPS = 0.01750
_LSM9DS1_GYRO_DPS_DIGIT_2000DPS = 0.07000
_LSM9DS1_TEMP_LSB_DEGREE_CELSIUS = 8 # 1C = 8, 25C = 200, etc.
_LSM9DS1_REGISTER_WHO_AM_I_XG = const(0x0F)
_LSM9DS1_REGISTER_CTRL_REG1_G = const(0x10)
_LSM9DS1_REGISTER_CTRL_REG2_G = const(0x11)
_LSM9DS1_REGISTER_CTRL_REG3_G = const(0x12)
_LSM9DS1_REGISTER_TEMP_OUT_L = const(0x15)
_LSM9DS1_REGISTER_TEMP_OUT_H = const(0x16)
_LSM9DS1_REGISTER_STATUS_REG = const(0x17)
_LSM9DS1_REGISTER_OUT_X_L_G = const(0x18)
_LSM9DS1_REGISTER_OUT_X_H_G = const(0x19)
_LSM9DS1_REGISTER_OUT_Y_L_G = const(0x1A)
_LSM9DS1_REGISTER_OUT_Y_H_G = const(0x1B)
_LSM9DS1_REGISTER_OUT_Z_L_G = const(0x1C)
_LSM9DS1_REGISTER_OUT_Z_H_G = const(0x1D)
_LSM9DS1_REGISTER_CTRL_REG4 = const(0x1E)
_LSM9DS1_REGISTER_CTRL_REG5_XL = const(0x1F)
_LSM9DS1_REGISTER_CTRL_REG6_XL = const(0x20)
_LSM9DS1_REGISTER_CTRL_REG7_XL = const(0x21)
_LSM9DS1_REGISTER_CTRL_REG8 = const(0x22)
_LSM9DS1_REGISTER_CTRL_REG9 = const(0x23)
_LSM9DS1_REGISTER_CTRL_REG10 = const(0x24)
_LSM9DS1_REGISTER_OUT_X_L_XL = const(0x28)
_LSM9DS1_REGISTER_OUT_X_H_XL = const(0x29)
_LSM9DS1_REGISTER_OUT_Y_L_XL = const(0x2A)
_LSM9DS1_REGISTER_OUT_Y_H_XL = const(0x2B)
_LSM9DS1_REGISTER_OUT_Z_L_XL = const(0x2C)
_LSM9DS1_REGISTER_OUT_Z_H_XL = const(0x2D)
_LSM9DS1_REGISTER_WHO_AM_I_M = const(0x0F)
_LSM9DS1_REGISTER_CTRL_REG1_M = const(0x20)
_LSM9DS1_REGISTER_CTRL_REG2_M = const(0x21)
_LSM9DS1_REGISTER_CTRL_REG3_M = const(0x22)
_LSM9DS1_REGISTER_CTRL_REG4_M = const(0x23)
_LSM9DS1_REGISTER_CTRL_REG5_M = const(0x24)
_LSM9DS1_REGISTER_STATUS_REG_M = const(0x27)
_LSM9DS1_REGISTER_OUT_X_L_M = const(0x28)
_LSM9DS1_REGISTER_OUT_X_H_M = const(0x29)
_LSM9DS1_REGISTER_OUT_Y_L_M = const(0x2A)
_LSM9DS1_REGISTER_OUT_Y_H_M = const(0x2B)
_LSM9DS1_REGISTER_OUT_Z_L_M = const(0x2C)
_LSM9DS1_REGISTER_OUT_Z_H_M = const(0x2D)
_LSM9DS1_REGISTER_CFG_M = const(0x30)
_LSM9DS1_REGISTER_INT_SRC_M = const(0x31)
_MAGTYPE = True
_XGTYPE = False
_SENSORS_GRAVITY_STANDARD = 9.80665
# User facing constants/module globals.
ACCELRANGE_2G = 0b00 << 3
ACCELRANGE_16G = 0b01 << 3
ACCELRANGE_4G = 0b10 << 3
ACCELRANGE_8G = 0b11 << 3
MAGGAIN_4GAUSS = 0b00 << 5 # +/- 4 gauss
MAGGAIN_8GAUSS = 0b01 << 5 # +/- 8 gauss
MAGGAIN_12GAUSS = 0b10 << 5 # +/- 12 gauss
MAGGAIN_16GAUSS = 0b11 << 5 # +/- 16 gauss
GYROSCALE_245DPS = 0b00 << 3 # +/- 245 degrees/s rotation
GYROSCALE_500DPS = 0b01 << 3 # +/- 500 degrees/s rotation
GYROSCALE_2000DPS = 0b11 << 3 # +/- 2000 degrees/s rotation
def _twos_comp(val, bits):
# Convert an unsigned integer in 2's compliment form of the specified bit
# length to its signed integer value and return it.
if val & (1 << (bits - 1)) != 0:
return val - (1 << bits)
return val
class LSM9DS1:
"""Driver for the LSM9DS1 accelerometer, magnetometer, gyroscope."""
# Class-level buffer for reading and writing data with the sensor.
# This reduces memory allocations but means the code is not re-entrant or
# thread safe!
_BUFFER = bytearray(6)
def __init__(self):
# soft reset & reboot accel/gyro
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG8, 0x05)
# soft reset & reboot magnetometer
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M, 0x0C)
time.sleep(0.01)
# Check ID registers.
if (
self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_WHO_AM_I_XG) != _LSM9DS1_XG_ID
or self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_WHO_AM_I_M) != _LSM9DS1_MAG_ID
):
raise RuntimeError("Could not find LSM9DS1, check wiring!")
# enable gyro continuous
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G, 0xC0) # on XYZ
# Enable the accelerometer continous
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG5_XL, 0x38)
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL, 0xC0)
# enable mag continuous
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG3_M, 0x00)
# Set default ranges for the various sensors
self._accel_mg_lsb = None
self._mag_mgauss_lsb = None
self._gyro_dps_digit = None
self.accel_range = ACCELRANGE_2G
self.mag_gain = MAGGAIN_4GAUSS
self.gyro_scale = GYROSCALE_245DPS
def set_property_mag(self, address, val):
self._write_u8(_MAGTYPE, address, val)
def set_property_accel(self, address, val):
self._write_u8(_XGTYPE, address, val)
@property
def accel_range(self):
"""The accelerometer range. Must be a value of:
- ACCELRANGE_2G
- ACCELRANGE_4G
- ACCELRANGE_8G
- ACCELRANGE_16G
"""
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL)
return (reg & 0b00011000) & 0xFF
@accel_range.setter
def accel_range(self, val):
assert val in (ACCELRANGE_2G, ACCELRANGE_4G, ACCELRANGE_8G, ACCELRANGE_16G)
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL)
reg = (reg & ~(0b00011000)) & 0xFF
reg |= val
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL, reg)
if val == ACCELRANGE_2G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_2G
elif val == ACCELRANGE_4G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_4G
elif val == ACCELRANGE_8G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_8G
elif val == ACCELRANGE_16G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_16G
@property
def mag_gain(self):
"""The magnetometer gain. Must be a value of:
- MAGGAIN_4GAUSS
- MAGGAIN_8GAUSS
- MAGGAIN_12GAUSS
- MAGGAIN_16GAUSS
"""
reg = self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M)
return (reg & 0b01100000) & 0xFF
@mag_gain.setter
def mag_gain(self, val):
assert val in (MAGGAIN_4GAUSS, MAGGAIN_8GAUSS, MAGGAIN_12GAUSS, MAGGAIN_16GAUSS)
reg = self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M)
reg = (reg & ~(0b01100000)) & 0xFF
reg |= val
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M, reg)
if val == MAGGAIN_4GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_4GAUSS
elif val == MAGGAIN_8GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_8GAUSS
elif val == MAGGAIN_12GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_12GAUSS
elif val == MAGGAIN_16GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_16GAUSS
@property
def gyro_scale(self):
"""The gyroscope scale. Must be a value of:
- GYROSCALE_245DPS
- GYROSCALE_500DPS
- GYROSCALE_2000DPS
"""
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G)
return (reg & 0b00011000) & 0xFF
@gyro_scale.setter
def gyro_scale(self, val):
assert val in (GYROSCALE_245DPS, GYROSCALE_500DPS, GYROSCALE_2000DPS)
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G)
reg = (reg & ~(0b00011000)) & 0xFF
reg |= val
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G, reg)
if val == GYROSCALE_245DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_245DPS
elif val == GYROSCALE_500DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_500DPS
elif val == GYROSCALE_2000DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_2000DPS
def read_accel_raw(self):
"""Read the raw accelerometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the acceleration in nice units you probably want to use the
accelerometer property!
"""
# Read the accelerometer
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_XL, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from("<hhh", self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def acceleration(self):
"""The accelerometer X, Y, Z axis values as a 3-tuple of
m/s^2 values.
"""
raw = self.read_accel_raw()
return map(
lambda x: x * self._accel_mg_lsb / 1000.0 * _SENSORS_GRAVITY_STANDARD, raw
)
def read_mag_raw(self):
"""Read the raw magnetometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the magnetometer in nice units you probably want to use the
magnetometer property!
"""
# Read the magnetometer
self._read_bytes(_MAGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_M, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from("<hhh", self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def magnetic(self):
"""The magnetometer X, Y, Z axis values as a 3-tuple of
gauss values.
"""
raw = self.read_mag_raw()
return map(lambda x: x * self._mag_mgauss_lsb / 1000.0, raw)
def read_gyro_raw(self):
"""Read the raw gyroscope sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the gyroscope in nice units you probably want to use the
gyroscope property!
"""
# Read the gyroscope
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_G, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from("<hhh", self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def gyro(self):
"""The gyroscope X, Y, Z axis values as a 3-tuple of
degrees/second values.
"""
raw = self.read_gyro_raw()
return map(lambda x: x * self._gyro_dps_digit, raw)
def read_temp_raw(self):
"""Read the raw temperature sensor value and return it as a 12-bit
signed value. If you want the temperature in nice units you probably
want to use the temperature property!
"""
# Read temp sensor
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_TEMP_OUT_L, 2, self._BUFFER)
temp = ((self._BUFFER[1] << 8) | self._BUFFER[0]) >> 4
return _twos_comp(temp, 12)
@property
def temperature(self):
"""The temperature of the sensor in degrees Celsius."""
# This is just a guess since the starting point (21C here) isn't documented :(
# See discussion from:
# https://github.com/kriswiner/LSM9DS1/issues/3
temp = self.read_temp_raw()
temp = 27.5 + temp / 16
return temp
def _read_u8(self, sensor_type, address):
# Read an 8-bit unsigned value from the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
def _read_bytes(self, sensor_type, address, count, buf):
# Read a count number of bytes into buffer from the provided 8-bit
# register address. The sensor_type boolean should be _MAGTYPE when
# talking to the magnetometer, or _XGTYPE when talking to the accel or
# gyro. MUST be implemented by subclasses!
raise NotImplementedError()
def _write_u8(self, sensor_type, address, val):
# Write an 8-bit unsigned value to the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
def set_property_mag(self, address, val):
self._write_u8(_MAGTYPE, address, val)
def set_property_accel(self, address, val):
self._write_u8(_XGTYPE, address, val)
class LSM9DS1_I2C(LSM9DS1):
"""Driver for the LSM9DS1 connect over I2C.
:param ~busio.I2C i2c: The I2C bus object used to connect to the LSM9DS1.
.. note:: This object should be shared among other driver classes that use the
same I2C bus (SDA & SCL pins) to connect to different I2C devices.
:param int mag_address: A 8-bit integer that represents the i2c address of the
LSM9DS1's magnetometer. Options are limited to ``0x1C`` or ``0x1E``.
Defaults to ``0x1E``.
:param int xg_address: A 8-bit integer that represents the i2c address of the
LSM9DS1's accelerometer and gyroscope. Options are limited to ``0x6A`` or ``0x6B``.
Defaults to ``0x6B``.
"""
def __init__(
self,
i2c,
mag_address=_LSM9DS1_ADDRESS_MAG,
xg_address=_LSM9DS1_ADDRESS_ACCELGYRO,
):
if mag_address in (0x1C, 0x1E) and xg_address in (0x6A, 0x6B):
self._mag_device = i2c_device.I2CDevice(i2c, mag_address)
self._xg_device = i2c_device.I2CDevice(i2c, xg_address)
super().__init__()
else:
raise ValueError(
"address parmeters are incorrect. Read the docs at "
"circuitpython.rtfd.io/projects/lsm9ds1/en/latest"
"/api.html#adafruit_lsm9ds1.LSM9DS1_I2C"
)
def _read_u8(self, sensor_type, address):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as i2c:
self._BUFFER[0] = address & 0xFF
i2c.write_then_readinto(
self._BUFFER, self._BUFFER, out_end=1, in_start=1, in_end=2
)
return self._BUFFER[1]
def _read_bytes(self, sensor_type, address, count, buf):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as i2c:
buf[0] = address & 0xFF
i2c.write_then_readinto(buf, buf, out_end=1, in_end=count)
def _write_u8(self, sensor_type, address, val):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as i2c:
self._BUFFER[0] = address & 0xFF
self._BUFFER[1] = val & 0xFF
i2c.write(self._BUFFER, end=2)
class LSM9DS1_SPI(LSM9DS1):
"""Driver for the LSM9DS1 connect over SPI.
:param ~busio.SPI spi: The SPI bus object used to connect to the LSM9DS1.
.. note:: This object should be shared among other driver classes that use the
same SPI bus (SCK, MISO, MOSI pins) to connect to different SPI devices.
:param ~digitalio.DigitalInOut mcs: The digital output pin connected to the
LSM9DS1's CSM (Chip Select Magnetometer) pin.
:param ~digitalio.DigitalInOut xgcs: The digital output pin connected to the
LSM9DS1's CSAG (Chip Select Accelerometer/Gyroscope) pin.
"""
# pylint: disable=no-member
def __init__(self, spi, xgcs, mcs):
self._mag_device = spi_device.SPIDevice(
spi, mcs, baudrate=200000, phase=1, polarity=1
)
self._xg_device = spi_device.SPIDevice(
spi, xgcs, baudrate=200000, phase=1, polarity=1
)
super().__init__()
def _read_u8(self, sensor_type, address):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as spi:
self._BUFFER[0] = (address | 0x80) & 0xFF
spi.write(self._BUFFER, end=1)
spi.readinto(self._BUFFER, end=1)
return self._BUFFER[0]
def _read_bytes(self, sensor_type, address, count, buf):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as spi:
buf[0] = (address | 0x80) & 0xFF
spi.write(buf, end=1)
spi.readinto(buf, end=count)
def _write_u8(self, sensor_type, address, val):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
with device as spi:
self._BUFFER[0] = (address & 0x7F) & 0xFF
self._BUFFER[1] = val & 0xFF
spi.write(self._BUFFER, end=2)
| 34.866397
| 93
| 0.751045
|
87189ebb9fc567311ff59b9fa6af34bdadf6e9bd
| 229
|
py
|
Python
|
Regex/Metacharacters/dolar.py
|
beatrizflorenccio/Projects-Python
|
fc584167a2816dc89f22baef0fa0f780af796c98
|
[
"MIT"
] | 1
|
2021-10-10T08:18:45.000Z
|
2021-10-10T08:18:45.000Z
|
Regex/Metacharacters/dolar.py
|
beatrizflorenccio/Projects-Python
|
fc584167a2816dc89f22baef0fa0f780af796c98
|
[
"MIT"
] | null | null | null |
Regex/Metacharacters/dolar.py
|
beatrizflorenccio/Projects-Python
|
fc584167a2816dc89f22baef0fa0f780af796c98
|
[
"MIT"
] | null | null | null |
# Metacharacter dolar
# Usado para verificar correspondencia no final da expressão
import re
txt = 'robocup'
x = re.search('[p$]', txt)
if x != None:
print('correspondencia encontrada')
else:
print('nao corresponde')
| 16.357143
| 60
| 0.69869
|
63da2aac4d0f4304dc4d35435e16c5405922fda3
| 1,113
|
py
|
Python
|
server/model_utils.py
|
Herr-Whit/covid_nlp
|
0b84829f6b2d64978807d1c18e8298d8990d780b
|
[
"MIT"
] | null | null | null |
server/model_utils.py
|
Herr-Whit/covid_nlp
|
0b84829f6b2d64978807d1c18e8298d8990d780b
|
[
"MIT"
] | null | null | null |
server/model_utils.py
|
Herr-Whit/covid_nlp
|
0b84829f6b2d64978807d1c18e8298d8990d780b
|
[
"MIT"
] | null | null | null |
import pathlib
from glob import glob
import pickle
from sklearn.pipeline import Pipeline
from spacy.lang.en import English
from spacy.lang.en.stop_words import STOP_WORDS
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
# nlp = English()
#
#
# def featurize_text(text):
# text = nlp(text)
# filtered_text = list()
# for word in text:
# if (word.is_stop == False) and (word.is_punct == False):
# filtered_text.append(word.lemma)
# return filtered_text
def load_models(directory):
models = list()
print('.' + directory + '*.pkl')
print(glob('.' + directory + '*.pkl'))
for file in glob('.' + directory + '*.pkl'):
with open(file, 'rb') as f:
pipeline = pickle.load(f)
if type(pipeline) == Pipeline:
models.append(pipeline)
else:
print(f'{file} could not be loaded as a model as it is not a sklearn.pipeline.Pipeline type.')
return models
int_to_label_dict = {
0: 'negative',
1: 'neutral',
2: 'positive'
}
| 28.538462
| 110
| 0.63522
|
d693f7998cd119d66af6095b4f984a0707d4c19f
| 95,669
|
py
|
Python
|
src/toil_vg/vg_construct.py
|
xchang1/toil-vg
|
15eb6cd679590f3a326c6bf6194ff63aad32f108
|
[
"Apache-2.0"
] | null | null | null |
src/toil_vg/vg_construct.py
|
xchang1/toil-vg
|
15eb6cd679590f3a326c6bf6194ff63aad32f108
|
[
"Apache-2.0"
] | null | null | null |
src/toil_vg/vg_construct.py
|
xchang1/toil-vg
|
15eb6cd679590f3a326c6bf6194ff63aad32f108
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
vg_construct.py: construct a graph from a vcf and fasta
"""
import argparse, sys, os, os.path, errno, random, subprocess, shutil, itertools, glob, tarfile
import doctest, re, json, collections, time, timeit
import logging, logging.handlers, struct, socket, threading
import string
import getpass
import pdb
import logging
import os
from math import ceil
from subprocess import Popen, PIPE
from toil.common import Toil
from toil.job import Job
from toil.realtimeLogger import RealtimeLogger
from toil_vg.vg_common import *
from toil_vg.context import Context, run_write_info_to_outstore
from toil_vg.vg_index import run_xg_indexing, run_indexing, run_bwa_index, index_parse_args, index_toggle_parse_args, validate_shared_index_options
from toil_vg.vg_msga import run_msga, msga_parse_args
logger = logging.getLogger(__name__)
# from ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/NA12878/analysis/Illumina_PlatinumGenomes_NA12877_NA12878_09162015/IlluminaPlatinumGenomes-user-guide.pdf
CEPH_SAMPLES="NA12889 NA12890 NA12891 NA12892 NA12877 NA12878 NA12879 NA12880 NA12881 NA12882 NA12883 NA12884 NA12885 NA12886 NA12887 NA12888 NA12893".split()
def construct_subparser(parser):
"""
Create a subparser for construction. Should pass in results of subparsers.add_parser()
"""
# Add the Toil options so the job store is the first argument
Job.Runner.addToilOptions(parser)
# General options
parser.add_argument("out_store",
help="output store. All output written here. Path specified using same syntax as toil jobStore")
parser.add_argument("--fasta", default=[], type=make_url, nargs='+',
help="Reference sequence in fasta or fasta.gz (single fasta or 1/region in same order as --regions)")
parser.add_argument("--vcf", default=[], nargs='+',
help="Variants to make graph from (single vcf or 1/region IN SAME ORDER AS REGIONS (as passed in by --regions"
" or scanned in from --fasta_regions or --regions_file)). "
"VCFs separated by commas will be merged together and treated as one using bcftools merge "
"(so must be over distinct sample sets).")
parser.add_argument("--regions", default=[], nargs='+',
help="1-based inclusive VCF coordinates in the form of SEQ or SEQ:START-END")
parser.add_argument("--regions_file", default=None,
help="List of regions (replaces --regions). Only first column of each line considered (so .fai acceptable)")
parser.add_argument("--fasta_regions", action="store_true",
help="Infer regions from fasta file. If multiple vcfs specified, any regions found that are not in --regions will be added without variants (useful for decoy sequences)")
parser.add_argument("--regions_regex", default=[], nargs='+',
help="Ignore sequence names not fully matching (union of) given regexes when using --fasta_regions or --regions_file"
" (ex: --regions_regex \'chr[1-9,M,X,Y,EBV][0-9]{0,1}\' \'chr.*decoy\' to keep only chroms and decoys from hs38d1)")
parser.add_argument("--alt_regions_bed", type=make_url,
help="BED file mapping alt regions (cols 1-3) to sequence names (col 4) from the FASTA. "
"Alt regions will be aligned to the graph using vg msga")
parser.add_argument("--max_node_size", type=int, default=32,
help="Maximum node length")
parser.add_argument("--alt_paths", action="store_true",
help="Save paths for alts with variant ID")
parser.add_argument("--flat_alts", action="store_true",
help="flat alts")
parser.add_argument("--handle_svs", action="store_true",
help="pass --handle-sv to vg construct to parse symbolic SV alts")
parser.add_argument("--construct_cores", type=int,
help="Number of cores for vg construct")
parser.add_argument("--out_name", default='graph',
help="Name used for output graphs and indexes")
parser.add_argument("--merge_graphs", action="store_true",
help="Merge all regions into one graph")
parser.add_argument("--normalize", action="store_true",
help="Normalize the graphs")
parser.add_argument("--validate", action="store_true",
help="Run vg validate on constructed graphs")
# useful for mixing, say, UCSC references with 1KG VCFs
parser.add_argument("--add_chr_prefix", action="store_true",
help="add \"chr\" prefix to chromosome names if not already present")
parser.add_argument("--remove_chr_prefix", action="store_true",
help="remove \"chr\" prefix from chromosome names")
parser.add_argument("--keep_vcfs", action="store_true",
help="write the VCFs created to make the filtered and control graphs to the output store")
# Toggles for the different types of graph(s) that can be made. Indexing and above options
# will be applied to each one. The output names will be prefixed with out_name.
parser.add_argument("--primary", action="store_true",
help="Make the primary graph (no variants) using just the FASTA")
parser.add_argument("--pangenome", action="store_true",
help="Make the pangenome graph using the input VCF(s)")
parser.add_argument("--pos_control", type=str,
help="Make a positive control (ref path plus sample variants) using this sample")
parser.add_argument("--neg_control", type=str,
help="Make a negative control (exclude all sample variants) using this sample")
parser.add_argument("--sample_graph", type=str,
help="Make a sample graph (only contains sample haplotypes) using this sample. Only "
" phased variants will be included. Will also make a _withref version that includes reference")
parser.add_argument("--haplo_sample", type=str,
help="Make two haplotype thread graphs (for simulating from) for this sample. Phasing"
" information required in the input vcf.")
parser.add_argument("--filter_ceph", action="store_true",
help="Make a graph where all variants private to the CEPH pedigree, which includes "
"NA12878 are excluded")
parser.add_argument("--filter_samples", nargs='+',
help="Make a graph where all variants private to the the listed smaples are excluded")
parser.add_argument("--min_af", type=float, default=[], nargs='+',
help="Create a graph including only variants with given minium allele frequency."
" If multiple frequencies given, a graph will be made for each one")
parser.add_argument("--bwa_reference", type=make_url,
help="Make a BWA reference (set of indexes) from the given FASTA (not the --fasta FASTAs).")
parser.add_argument("--pre_min_af", type=float, default=None,
help="Run minimum allele frequency filter as preprocessing step on each input VCF. "
"Unlike --min_af, this will be applied before merging and any control graph construction")
parser.add_argument("--mask_ambiguous", action="store_true",
help="Convert IUPAC ambiguous characters in FASTA to Ns")
# Add common indexing options shared with vg_index
index_toggle_parse_args(parser)
index_parse_args(parser)
# Add common msga options shared with vg_msga
msga_parse_args(parser)
# Add common options shared with everybody
add_common_vg_parse_args(parser)
# Add common docker options
add_container_tool_parse_args(parser)
def re_fullmatch(regex, string, flags=0):
"""Emulate python-3.4 re.fullmatch().
https://stackoverflow.com/questions/30212413/backport-python-3-4s-regular-expression-fullmatch-to-python-2
"""
return re.match("(?:" + regex + r")\Z", string, flags=flags)
def validate_construct_options(options):
"""
Throw an error if an invalid combination of options has been selected.
"""
require(options.regions or options.fasta_regions or options.regions_file,
'--regions or --fasta_regions required')
require(not options.regions_file or not (options.fasta_regions or options.regions),
'--regions_file cannot be used with --regions or --fasta_regions')
require(not options.regions_regex or (options.fasta_regions or options.regions_file),
'--regions_regex can only be used with --fasta_regions or --regions_file')
require(not options.add_chr_prefix or not options.remove_chr_prefix,
'--add_chr_prefix cannot be used with --remove_chr_prefix')
require(options.vcf == [] or len(options.vcf) == 1 or not options.regions or
len(options.vcf) <= len(options.regions),
'if many vcfs specified, cannot have more vcfs than --regions')
require(len(options.fasta) == 1 or len(options.fasta) == len(options.regions),
'if many fastas specified, must be same number as --regions')
require(len(options.fasta) == 1 or not options.fasta_regions,
'--fasta_regions currently only works when single fasta specified with --fasta')
require(len(options.fasta) > 0 or options.bwa_reference,
'either --fasta or --bwa_reference must be set to give something to construct')
require('gbwt' not in options.indexes or 'xg' in options.indexes,
'--xg_index required with --gbwt_index')
# TODO: It seems like some of this code is designed to run multiple regions
# in parallel, but the indexing code always indexes them together.
require('gbwt' not in options.indexes or (not options.pangenome and not options.pos_control and
not options.neg_control and not options.sample_graph and not options.haplo_sample and
not options.min_af) or len(options.vcf) >= 1,
'--gbwt_index with any graph other than --primary requires --vcf')
require(not options.sample_graph or (options.regions or options.fasta_regions),
'--regions or --fasta_regions required with --sample_graph')
require(options.primary or options.pangenome or options.pos_control or options.neg_control or
options.sample_graph or options.haplo_sample or options.filter_ceph or options.filter_samples or
options.min_af or options.bwa_reference,
'At least one kind of graph or reference must be specified for construction')
require(not options.vcf or options.pangenome or options.pos_control or options.neg_control or
options.sample_graph or options.haplo_sample or options.filter_ceph or options.filter_samples or
options.min_af,
'At least one kind of non-primary graph must be specified for construction with --vcf')
require(options.vcf or not (options.pangenome or options.pos_control or options.neg_control or
options.sample_graph or options.haplo_sample or options.filter_ceph or
options.filter_samples or options.min_af),
'--vcf required for construction of non-primary graph')
# TODO: support new, more general CLI properly
require(options.pos_control is None or options.neg_control is None or
options.pos_control == options.neg_control,
'--pos_control_sample and --neg_control_sample must be the same')
require(not options.haplo_sample or not options.sample_graph or
(options.haplo_sample == options.sample_graph),
'--haplo_sample and --sample_graph must be the same')
validate_shared_index_options(options)
def chr_name_map(to_ucsc, max_chrom=22):
"""
Return a name map for chromosome name conversion in dict and string format,
and a TSV string version of the same.
Will contain mappings for chromosomes 1 to max_chrom, inclusive.
"""
name_map = {}
name_str = ''
# TODO: Should we do something with MT <==> chrM ?
for i in list(range(1, max_chrom + 1)) + ['X', 'Y']:
if to_ucsc:
name_str += '{}\tchr{}\n'.format(i, i)
name_map[str(i)] = 'chr{}'.format(i)
else:
name_str += 'chr{}\t{}\n'.format(i, i)
name_map['chr{}'.format(i)] = str(i)
return name_map, name_str
def run_merge_all_vcfs(job, context, vcf_file_ids_list, vcf_names_list, tbi_file_ids_list):
"""
takes a lists of lists of input vcfs. make child merge job for each list
"""
out_vcf_ids_list = []
out_names_list = []
out_tbi_ids_list = []
for vcf_file_ids, vcf_names, tbi_file_ids in zip(vcf_file_ids_list, vcf_names_list, tbi_file_ids_list):
if len(vcf_file_ids) > 1:
merge_job = job.addChildJobFn(run_merge_vcfs, context, vcf_file_ids, vcf_names, tbi_file_ids,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
out_vcf_ids_list.append(merge_job.rv(0))
out_names_list.append(merge_job.rv(1))
out_tbi_ids_list.append(merge_job.rv(2))
else:
out_vcf_ids_list.append(vcf_file_ids[0])
out_names_list.append(vcf_names[0])
out_tbi_ids_list.append(tbi_file_ids[0])
return out_vcf_ids_list, out_names_list, out_tbi_ids_list
def run_merge_vcfs(job, context, vcf_file_ids, vcf_names, tbi_file_ids):
"""
run bctools merge on a list of vcfs and return just one. note that
bcftools merge expectes non-overlapping sample sets
"""
assert len(vcf_file_ids) == len(vcf_names) == len(tbi_file_ids)
if len(vcf_file_ids) == 1:
return vcf_file_ids[0], vcf_names[0], tbi_file_ids[0]
work_dir = job.fileStore.getLocalTempDir()
names = []
for vcf_id, vcf_name, tbi_id in zip(vcf_file_ids, vcf_names, tbi_file_ids):
job.fileStore.readGlobalFile(vcf_id, os.path.join(work_dir, vcf_name))
job.fileStore.readGlobalFile(tbi_id, os.path.join(work_dir, vcf_name) + '.tbi')
names.append(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
if len(names) != len(set(names)):
raise RuntimeError('vcf merging expects unique filenames')
merged_name = '_'.join(names) + '.vcf.gz'
with open(os.path.join(work_dir, merged_name), 'wb') as merged_file:
cmd = [['bcftools', 'merge', '--missing-to-ref', '--force-samples'] + vcf_names]
# phase the ref/ref calls added by --missing-to-ref
cmd.append(['sed', '-e', 's/0\/0/0\|0/g'])
cmd.append(['bcftools', 'view', '-', '--output-type', 'z'])
context.runner.call(job, cmd, work_dir = work_dir, outfile = merged_file)
context.runner.call(job, ['tabix', '--preset', 'vcf', merged_name], work_dir = work_dir)
return (context.write_intermediate_file(job, os.path.join(work_dir, merged_name)),
merged_name,
context.write_intermediate_file(job, os.path.join(work_dir, merged_name) + '.tbi'))
def run_unzip_fasta(job, context, fasta_id, fasta_name):
"""
vg construct doesn't work with zipped fasta, so we run this on input fasta that end in .gz
"""
work_dir = job.fileStore.getLocalTempDir()
# Download input files
fasta_file = os.path.join(work_dir, os.path.basename(fasta_name))
job.fileStore.readGlobalFile(fasta_id, fasta_file, mutable=True)
context.runner.call(job, ['bgzip', '-d', os.path.basename(fasta_file)], work_dir=work_dir)
return context.write_intermediate_file(job, fasta_file[:-3])
def run_mask_ambiguous(job, context, fasta_id, fasta_name):
"""
Replace IUPAC characters (of any case) with Ns. That's how they end up in the XG anyway,
and it will prevent some errors in vg construct if the VCF has N's but the fasta has something else.
(todo: would need to apply same thing to the VCF to be more robust, but will hold off until having
a use case)
"""
work_dir = job.fileStore.getLocalTempDir()
fasta_file = os.path.join(work_dir, os.path.basename(fasta_name))
mask_file = os.path.splitext(fasta_file)[0] + '-mask.fa'
job.fileStore.readGlobalFile(fasta_id, fasta_file, mutable=True)
fa_mask_cmd = ['awk', 'BEGIN{FS=\" \"}{if(!/>/){ gsub(/[YRWSKMDVHBXyrwskmdvhbx]/,"N"); print }else{print $1}}',
os.path.basename(fasta_file)]
with open(mask_file, 'wb') as mf:
context.runner.call(job, fa_mask_cmd, outfile=mf, work_dir=work_dir)
return context.write_intermediate_file(job, mask_file), os.path.basename(mask_file)
def run_scan_fasta_sequence_names(job, context, fasta_id, fasta_name, regions = None, regions_regex = None):
"""
scrape regions out of the (uncompressed) fasta, appending them to given regions list if provided
"""
work_dir = job.fileStore.getLocalTempDir()
# Download input files
fasta_file = os.path.join(work_dir, os.path.basename(fasta_name))
job.fileStore.readGlobalFile(fasta_id, fasta_file)
# reluctant to use slow python library, so just running grep instead
cmd = ['grep', '>', os.path.basename(fasta_file)]
grep_output = context.runner.call(job, cmd, work_dir = work_dir,
check_output = True, tool_name='bgzip')
# just taking first whitespace-separated token. that's what corresponds to hs37d5 vcf
seq_names = [] if not regions else regions
for line in grep_output.decode().split('\n'):
if len(line) > 1:
name = line.split()[0]
if name.startswith('>') and (not regions or name[1:] not in regions) and \
(not regions_regex or re_fullmatch(regions_regex, name[1:])):
seq_names.append(name[1:])
return seq_names
def run_scan_regions_file(job, context, regions_id, regions_regex = None):
"""
Read a list of regions
"""
work_dir = job.fileStore.getLocalTempDir()
regions_path = os.path.join(work_dir, 'regions.tsv')
job.fileStore.readGlobalFile(regions_id, regions_path)
out_regions = []
with open(regions_path) as regions_file:
for line in regions_file:
region_name = line.strip().split()[0]
if len(region_name) > 0 and (not regions_regex or re_fullmatch(regions_regex, region_name)):
out_regions.append(region_name)
return out_regions
def run_fix_chrom_names(job, context, to_ucsc, regions, fasta_ids, fasta_names,
vcf_ids_list, vcf_names_list, tbi_ids_list, alt_regions_id):
"""
Apply name mappings to regions list, fasta files and vcf files. if to_ucsc is true we convert
1 -> chr1 etc. otherwise, we go the other way.
"""
work_dir = job.fileStore.getLocalTempDir()
# How many chromosomes should we generate name mappings for?
# No more than there are regions certainly.
# But also never less than the 22 we expect in humans.
max_chrom = max(22, len(regions))
name_map, name_str = chr_name_map(to_ucsc, max_chrom)
out_regions = []
something_to_rename = False
# map the regions
for region in regions:
region_name = region.split(':')[0]
if region_name in name_map:
something_to_rename = True
out_regions.append(name_map[region_name] + region[len(region_name):])
else:
something_to_rename = something_to_rename or region_name in list(name_map.values())
out_regions.append(region)
# map the vcf
out_vcf_ids = []
out_vcf_names = []
out_tbi_ids = []
if something_to_rename:
# make our name mapping file
name_map_path = os.path.join(work_dir, 'name_map.tsv')
with open(name_map_path, 'w') as name_map_file:
name_map_file.write(name_str)
name_map_id = context.write_intermediate_file(job, name_map_path)
for vcf_ids, vcf_names, tbi_ids in zip(vcf_ids_list, vcf_names_list, tbi_ids_list):
out_vcf_ids.append([])
out_vcf_names.append([])
out_tbi_ids.append([])
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
vcf_rename_job = job.addChildJobFn(run_fix_vcf_chrom_names, context, vcf_id, vcf_name, tbi_id, name_map_id,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
out_vcf_ids[-1].append(vcf_rename_job.rv(0))
out_vcf_names[-1].append(vcf_rename_job.rv(1))
out_tbi_ids[-1].append(vcf_rename_job.rv(2))
else:
out_vcf_ids = vcf_ids_list
out_vcf_names = vcf_names_list
out_tbi_ids = tbi_ids_list
# map the fasta
out_fasta_ids = []
out_fasta_names = []
if something_to_rename:
for fasta_id, fasta_name in zip(fasta_ids, fasta_names):
assert not fasta_name.endswith('.gz')
in_fasta_name = os.path.basename(fasta_name)
job.fileStore.readGlobalFile(fasta_id, os.path.join(work_dir, in_fasta_name))
out_fasta_name = os.path.splitext(fasta_name)[0] + '-renamed' + os.path.splitext(fasta_name)[1]
with open(os.path.join(work_dir, out_fasta_name), 'w') as out_fasta_file, \
open(os.path.join(work_dir, in_fasta_name)) as in_fasta_file:
# TODO: is this too slow in python?
for line in in_fasta_file:
if line.startswith('>'):
region_name = line[1:].split()[0]
if region_name in name_map:
out_fasta_file.write('>{}\n'.format(name_map[region_name]))
else:
out_fasta_file.write(line)
else:
out_fasta_file.write(line)
out_fasta_ids.append(context.write_intermediate_file(job, os.path.join(work_dir, out_fasta_name)))
out_fasta_names.append(out_fasta_name)
else:
out_fasta_ids = fasta_ids
out_fasta_names = fasta_names
# map the alt regions
if alt_regions_id:
alt_regions_path = os.path.join(work_dir, 'alt-regions.bed')
alt_regions_out_path = os.path.join(work_dir, 'alt-regions-fix.bed')
job.fileStore.readGlobalFile(alt_regions_id, alt_regions_path)
with open(alt_regions_path) as in_regions, open(alt_regions_out_path, 'w') as out_alt_regions:
for line in in_regions:
toks = line.strip().split('\t')
if len(toks) >= 4 and toks[0] != '#':
if toks[0] in name_map:
out_alt_regions.write('{}\t{}\t{}\t{}\n'.format(name_map[toks[0]], toks[1], toks[2], toks[3]))
else:
out_alt_regions.write(line)
out_alt_regions_id = context.write_intermediate_file(job, alt_regions_out_path)
else:
out_alt_regions_id = None
return out_regions, out_fasta_ids, out_fasta_names, out_vcf_ids, out_vcf_names, out_tbi_ids, out_alt_regions_id
def run_fix_vcf_chrom_names(job, context, vcf_id, vcf_name, tbi_id, name_file_id):
"""
use bcftools annotate to rename chromosomes in a vcf
"""
work_dir = job.fileStore.getLocalTempDir()
name_map_path = os.path.join(work_dir, 'name_map.tsv')
job.fileStore.readGlobalFile(name_file_id, name_map_path)
assert vcf_name.endswith('.vcf.gz')
in_vcf_name = os.path.basename(vcf_name)
job.fileStore.readGlobalFile(vcf_id, os.path.join(work_dir, in_vcf_name))
job.fileStore.readGlobalFile(tbi_id, os.path.join(work_dir, in_vcf_name + '.tbi'))
out_vcf_name = in_vcf_name[:-7] + '-renamed.vcf.gz'
context.runner.call(job, ['bcftools', 'annotate', '--rename-chrs', os.path.basename(name_map_path),
'--output-type', 'z', '--output', out_vcf_name, os.path.basename(in_vcf_name)],
work_dir = work_dir)
context.runner.call(job, ['tabix', '--force', '--preset', 'vcf', out_vcf_name], work_dir = work_dir)
return (context.write_intermediate_file(job, os.path.join(work_dir, out_vcf_name)),
out_vcf_name,
context.write_intermediate_file(job, os.path.join(work_dir, out_vcf_name + '.tbi')))
def run_subtract_alt_regions(job, context, alt_regions_id, regions):
"""
make sure that alt contigs don't wind up in our regions names, as we want them
to get aligned into chromosomes rather than form their own components
"""
work_dir = job.fileStore.getLocalTempDir()
alt_regions_path = os.path.join(work_dir, 'alt-regions.bed')
job.fileStore.readGlobalFile(alt_regions_id, alt_regions_path)
alt_regions = set()
with open(alt_regions_path) as in_regions:
for line in in_regions:
toks = line.strip().split('\t')
if len(toks) >= 4 and toks[0] != '#':
alt_regions.add(toks[3])
return [region for region in regions if region not in alt_regions], list(alt_regions)
def run_generate_input_vcfs(job, context, vcf_ids, vcf_names, tbi_ids,
regions, output_name,
do_primary = False,
do_pan = False,
pos_control_sample = None,
neg_control_sample = None,
sample_graph = None,
haplo_sample = None,
filter_samples = [],
min_afs = [],
vcf_subdir = None):
"""
Preprocessing step to make a bunch of vcfs if wanted:
- positive control
- negative control
- family filter
- primary
- thresholded by a given minimum allele frequency
returns a dictionary of name -> (vcf_id, vcf_name, tbi_id, merge_name, region_names) tuples
where name can be used to, ex, tell the controls apart
if vcf_subdir is specified, the various created vcfs will be stored in a subfolder of that
name in the output store. if it's not specified, then these intermediate vcfs will not be saved
"""
output = dict()
# primary graph, containing just the input fasta
if do_primary:
if regions:
primary_region_names = ['primary' + '_' + c.replace(':','-') for c in regions]
else:
primary_region_names = None
primary_output_name = 'primary.vg' if '_' not in output_name else 'primary' + output_name[output_name.find('_')+1:]
output['primary'] = [[], [], [], primary_output_name, primary_region_names]
# a straight-up pangenome graph from the input vcf
if do_pan:
output[output_name] = [vcf_ids, vcf_names, tbi_ids, output_name,
[output_name + '_' + c.replace(':','-') for c in regions] if regions else None]
# our positive control consists of the reference path and any variant in the sample
if pos_control_sample or neg_control_sample:
control_sample = pos_control_sample if pos_control_sample else neg_control_sample
assert not neg_control_sample or neg_control_sample == control_sample
assert not pos_control_sample or pos_control_sample == control_sample
pos_control_vcf_ids, pos_control_tbi_ids = [], []
neg_control_vcf_ids, neg_control_tbi_ids = [], []
pos_control_vcf_names, neg_control_vcf_names = [], []
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
make_controls = job.addChildJobFn(run_make_control_vcfs, context, vcf_id, vcf_name, tbi_id,
control_sample,
pos_only = not neg_control_sample,
vcf_subdir = vcf_subdir,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
pos_control_vcf_ids.append(make_controls.rv(0))
pos_control_tbi_ids.append(make_controls.rv(1))
neg_control_vcf_ids.append(make_controls.rv(2))
neg_control_tbi_ids.append(make_controls.rv(3))
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
pos_control_vcf_names.append('{}_{}.vcf.gz'.format(vcf_base, control_sample))
neg_control_vcf_names.append('{}_minus_{}.vcf.gz'.format(vcf_base, control_sample))
if regions:
pos_region_names = [output_name + '_{}'.format(control_sample) + '_' + c.replace(':','-') for c in regions]
neg_region_names = [output_name + '_minus_{}'.format(control_sample) + '_' + c.replace(':','-') for c in regions]
else:
pos_region_names = None
neg_region_names = None
pos_output_name = remove_ext(output_name, '.vg') + '_{}.vg'.format(control_sample)
neg_output_name = remove_ext(output_name, '.vg') + '_minus_{}.vg'.format(control_sample)
if pos_control_sample:
output['pos-control'] = [pos_control_vcf_ids, pos_control_vcf_names, pos_control_tbi_ids,
pos_output_name, pos_region_names]
if neg_control_sample:
output['neg-control'] = [neg_control_vcf_ids, neg_control_vcf_names, neg_control_tbi_ids,
neg_output_name, neg_region_names]
# For our sample graph, we're going to need to start by making someing like the positive control, but
# filtering for phased variants. Note that making the actual graphs from these vcfs is a two step
# process, where a graph is constructed then haplotypes extracted.
if sample_graph:
sample_graph_vcf_ids, sample_graph_tbi_ids = [], []
sample_graph_vcf_names = []
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
make_sample = job.addChildJobFn(run_make_control_vcfs, context, vcf_id, vcf_name, tbi_id, sample_graph,
pos_only = True,
vcf_subdir = vcf_subdir,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
sample_graph_vcf_ids.append(make_sample.rv(0))
sample_graph_tbi_ids.append(make_sample.rv(1))
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
sample_graph_vcf_names.append('{}_{}_sample_withref.vcf.gz'.format(vcf_base, sample_graph))
if regions:
sample_graph_region_names = [output_name + '_{}_sample_withref'.format(sample_graph) + '_' + c.replace(':','-') for c in regions]
else:
sample_graph_region_names = None
sample_graph_output_name = remove_ext(output_name, '.vg') + '_{}_sample_withref.vg'.format(sample_graph)
output['sample-graph'] = [sample_graph_vcf_ids, sample_graph_vcf_names, sample_graph_tbi_ids,
sample_graph_output_name, sample_graph_region_names]
# we want a vcf to make a gbwt out of for making haplo graphs
if haplo_sample:
hap_control_vcf_ids, hap_control_tbi_ids = [], []
hap_control_vcf_names = []
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
make_controls = job.addChildJobFn(run_make_control_vcfs, context, vcf_id, vcf_name, tbi_id, haplo_sample,
pos_only = True,
vcf_subdir = vcf_subdir,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
hap_control_vcf_ids.append(make_controls.rv(0))
hap_control_tbi_ids.append(make_controls.rv(1))
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
hap_control_vcf_names.append('{}_{}_haplo.vcf.gz'.format(vcf_base, haplo_sample))
if regions:
hap_region_names = [output_name + '_{}_haplo'.format(haplo_sample) + '_' + c.replace(':','-') for c in regions]
else:
hap_region_names = None
hap_output_name = remove_ext(output_name, '.vg') + '_{}_haplo.vg'.format(haplo_sample)
output['haplo'] = [hap_control_vcf_ids, hap_control_vcf_names, hap_control_tbi_ids,
hap_output_name, hap_region_names]
# our family filter
if filter_samples:
filter_vcf_ids, filter_tbi_ids = [], []
filter_vcf_names = []
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
filter_job = job.addChildJobFn(run_filter_vcf_samples, context, vcf_id, vcf_name, tbi_id,
filter_samples,
vcf_subdir = vcf_subdir,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
filter_vcf_ids.append(filter_job.rv(0))
filter_tbi_ids.append(filter_job.rv(1))
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
filter_vcf_names.append('{}_filter.vcf.gz'.format(vcf_base))
if regions:
filter_region_names = [output_name + '_filter' + '_' + c.replace(':','-') for c in regions]
else:
filter_region_names = None
filter_output_name = remove_ext(output_name, '.vg') + '_filter.vg'
output['filter'] = [filter_vcf_ids, filter_vcf_names, filter_tbi_ids,
filter_output_name, filter_region_names]
# and one for each minimum allele frequency filter
for min_af in min_afs:
af_vcf_ids, af_tbi_ids = [], []
af_vcf_names = []
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
af_job = job.addChildJobFn(run_min_allele_filter_vcf_samples, context, vcf_id, vcf_name, tbi_id,
min_af,
vcf_subdir = vcf_subdir,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
af_vcf_ids.append(af_job.rv(0))
af_tbi_ids.append(af_job.rv(1))
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
af_vcf_names.append('{}_minaf_{}.vcf.gz'.format(vcf_base, min_af))
if regions:
af_region_names = [output_name + '_minaf_{}'.format(min_af) + '_' + c.replace(':','-') for c in regions]
else:
af_region_names = None
af_output_name = remove_ext(output_name, '.vg') + '_minaf_{}.vg'.format(min_af)
output['minaf-{}'.format(min_af)] = [af_vcf_ids, af_vcf_names, af_tbi_ids,
af_output_name, af_region_names]
# pad out vcf lists with nones so they are the same size as regions
# since we allow fasta regions that dont have corresponding vcf
# note: single vcf , multiple regions case not handled here as it's
# treated below (the same vcf is given to each region)
if regions and len(regions) > len(vcf_ids) and len(vcf_ids) != 1:
padding = [None] * (len(regions) - len(vcf_ids))
for key, val in list(output.items()):
val[0] += padding
val[1] += padding
val[2] += padding
return output
def run_construct_all(job, context, fasta_ids, fasta_names, vcf_inputs,
max_node_size, alt_paths, flat_alts, handle_svs, regions,
merge_graphs = False, sort_ids = False, join_ids = False,
wanted_indexes = set(),
haplo_extraction_sample = None, haplotypes = [0,1], gbwt_prune = False,
normalize = False, validate = False, alt_regions_id = None,
alt_regions = []):
"""
construct many graphs in parallel, optionally doing indexing too. vcf_inputs
is a list of tuples as created by run_generate_input_vcfs
Returns a list of tuples of the form (vg_ids, vg_names, indexes), where
indexes is the index dict from index type to file ID.
"""
output = []
for name, (vcf_ids, vcf_names, tbi_ids, output_name, region_names) in list(vcf_inputs.items()):
merge_output_name = output_name if merge_graphs or not regions or len(regions) < 2 else None
output_name_base = remove_ext(output_name, '.vg')
# special case that need thread indexes no matter what
haplo_extraction = name in ['haplo', 'sample-graph']
construct_job = job.addChildJobFn(run_construct_genome_graph, context, fasta_ids,
fasta_names, vcf_ids, vcf_names, tbi_ids,
max_node_size, ('gbwt' in wanted_indexes) or haplo_extraction or alt_paths,
flat_alts, handle_svs, regions,
region_names, sort_ids, join_ids, name, merge_output_name,
normalize and name != 'haplo', validate, alt_regions_id)
mapping_id = construct_job.rv('mapping')
# Find the joined VG files, which always exist
joined_vg_ids = construct_job.rv('joined')
# And give them names
joined_vg_names = [remove_ext(i, '.vg') + '.vg' for i in region_names]
if merge_graphs or not regions or len(regions) < 2:
# Sometimes we will have a single VG file also
single_vg_id = construct_job.rv('merged')
single_vg_name = remove_ext(merge_output_name, '.vg') + '.vg'
else:
# But sometimes not
single_vg_id = None
single_vg_name = None
# Now the graphs are ready
if not regions:
chroms = []
gbwt_regions = []
else:
# We have regions specified to restrict to.
# Get the chromosome names
chroms = [p.split(':')[0] for p in regions]
# Make sure we have no more than 1 region per chromosome.
# Otherwise GBWT region restriction will mess things up.
assert(len(chroms) == len(set(chroms)))
# Get the regions that are restrictions smaller than a whole chromosome to hint the GBWT.
# Otherwise running a small region of a big VCF means a very slow GBWT construction step.
gbwt_regions = [p for p in regions if ':' in p]
# strip nones out of vcf list
input_vcf_ids = []
input_tbi_ids = []
if haplo_extraction or ('gbwt' in wanted_indexes):
for vcf_id, tbi_id in zip(vcf_ids, tbi_ids):
if vcf_id and tbi_id:
input_vcf_ids.append(vcf_id)
input_tbi_ids.append(tbi_id)
else:
assert vcf_id == None and tbi_id == None
index_prev_job = construct_job
if haplo_extraction:
haplo_index_job = construct_job.addFollowOnJobFn(run_make_haplo_indexes, context,
input_vcf_ids, input_tbi_ids,
vcf_names, joined_vg_ids, joined_vg_names,
output_name_base, regions, haplo_extraction_sample,
intermediate=merge_graphs)
haplo_xg_ids = haplo_index_job.rv(0)
gbwt_ids = haplo_index_job.rv(1)
if name == 'sample-graph':
# ugly hack to distinguish the graphs with reference and our extracted sample graph
sample_name_base = output_name_base.replace('_withref', '')
sample_merge_output_name = merge_output_name.replace('_withref', '') if merge_output_name else None
region_names = [r.replace('_withref', '') for r in region_names]
# Extract out our real sample graph
sample_job = haplo_index_job.addFollowOnJobFn(run_make_sample_graphs, context,
joined_vg_ids, joined_vg_names,
haplo_xg_ids, sample_name_base, regions,
haplo_extraction_sample, gbwt_ids)
# Put them back together again with a no-op join, producing
# many graphs again and maybe a single merged graph with the
# reference removed.
join_job = sample_job.addFollowOnJobFn(run_join_graphs, context, sample_job.rv(),
False, region_names, name, sample_merge_output_name,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
# Want to keep a whole-genome withref xg index around for mapeval purposes
if len(regions) > 1 and ('xg' in wanted_indexes):
wanted = set('xg')
construct_job.addFollowOnJobFn(run_indexing, context, joined_vg_ids,
joined_vg_names, output_name_base, chroms, [], [],
wanted=wanted)
index_prev_job = join_job
# In the indexing step below, we want to index our haplo-extracted sample graph
# So replace the withref graph IDs and names with these
# Find the joined VG files, which always exist
joined_vg_ids = join_job.rv('joined')
# And give them names
joined_vg_names = [n.replace('_withref', '') for n in joined_vg_names]
if sample_merge_output_name:
# We expect a single output graph too
single_vg_id = join_job.rv('merged')
single_vg_name = remove_ext(sample_merge_output_name, '.vg') + '.vg'
else:
# No single merged graph
single_vg_id = None
single_vg_name = None
output_name_base = sample_name_base
elif name == 'haplo':
assert haplo_extraction_sample is not None
haplo_job = haplo_index_job.addFollowOnJobFn(run_make_haplo_graphs, context,
joined_vg_ids, joined_vg_names, haplo_xg_ids,
output_name_base, regions,
haplo_extraction_sample, haplotypes, gbwt_ids,
intermediate = merge_graphs)
# we want an xg index from our thread graphs to pass to vg sim for each haplotype
for haplotype in haplotypes:
haplo_xg_job = haplo_job.addFollowOnJobFn(run_xg_indexing, context, haplo_job.rv(haplotype),
joined_vg_names,
output_name_base + '_thread_{}'.format(haplotype),
include_alt_paths = 'xg_alts' in wanted_indexes,
cores=context.config.xg_index_cores,
memory=context.config.xg_index_mem,
disk=context.config.xg_index_disk)
# some indexes should never get built for haplo/sample graphs.
# So work out what indexes to build.
wanted = set(wanted_indexes)
if name == 'haplo':
wanted.discard('gcsa')
if haplo_extraction:
wanted.discard('snarls')
wanted.discard('trivial_snarls')
wanted.discard('gbwt')
indexing_job = index_prev_job.addFollowOnJobFn(run_indexing, context, joined_vg_ids,
joined_vg_names, output_name_base, chroms,
input_vcf_ids if ('gbwt' in wanted) else [],
input_tbi_ids if ('gbwt' in wanted) else [],
node_mapping_id=mapping_id,
wanted=wanted,
gbwt_prune=gbwt_prune and 'gbwt' in wanted,
gbwt_regions=gbwt_regions,
dont_restore_paths=alt_regions)
indexes = indexing_job.rv()
output.append((joined_vg_ids, joined_vg_names, indexes))
return output
def run_construct_genome_graph(job, context, fasta_ids, fasta_names, vcf_ids, vcf_names, tbi_ids,
max_node_size, alt_paths, flat_alts, handle_svs, regions, region_names,
sort_ids, join_ids, name, merge_output_name, normalize, validate, alt_regions_id):
"""
Construct graphs from one or more FASTA files and zero or more VCFs.
If regions and region_names are set, constructs only for the specified
regions, and constructs one graph per region. Otherwise, constructs one
graph overall on a single default region.
If merge_output_name is set, merges all constructed graphs together and
outputs them under that name. Otherwise, outputs each graph constructed
under its own name, but in a unified ID space.
Returns a dict containing:
'joined': a list of the unmerged, id-joined graph file IDs for each region.
'merged': the merged graph file ID, if merge_output_name is set, or the
only graph, if there is only one. None otherwise.
'mapping': the file ID of the .mapping file produced by `vg ids --join`, if
id joining had to happen. None otherwise.
"""
# encapsulate follow-on
child_job = Job()
job.addChild(child_job)
work_dir = job.fileStore.getLocalTempDir()
if not regions:
regions, region_names = [None], ['genome']
region_graph_ids = []
for i, (region, region_name) in enumerate(zip(regions, region_names)):
if not vcf_ids or (len(vcf_ids) > 1 and i >= len(vcf_ids)):
# no vcf for region
vcf_id = None
tbi_id = None
vcf_name = None
elif len(vcf_ids) == 1:
# special case: 1 vcf given, so assumed for all regions
vcf_id = vcf_ids[0]
tbi_id = tbi_ids[0]
vcf_name = vcf_names[0]
else:
# one vcf per region
vcf_id = vcf_ids[i]
tbi_id = tbi_ids[i]
vcf_name = vcf_names[i]
fasta_id = fasta_ids[0] if len(fasta_ids) == 1 else fasta_ids[i]
fasta_name = fasta_names[0] if len(fasta_names) == 1 else fasta_names[i]
construct_region_job = child_job.addChildJobFn(run_construct_region_graph, context,
fasta_id, fasta_name,
vcf_id, vcf_name, tbi_id, region, region_name,
max_node_size, alt_paths, flat_alts, handle_svs,
# todo: bump as command line option?
# also, needed if we update vg docker image?
is_chrom=not region or ':' not in region,
sort_ids=sort_ids,
normalize=normalize,
validate=validate,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
if alt_regions_id:
region_graph_ids.append(construct_region_job.addFollowOnJobFn(run_msga, context, region_name + '.vg',
construct_region_job.rv(),
fasta_id,
alt_regions_id,
region,
normalize=normalize,
max_node_size=max_node_size,
validate=validate,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk).rv())
else:
region_graph_ids.append(construct_region_job.rv())
return child_job.addFollowOnJobFn(run_join_graphs, context, region_graph_ids, join_ids,
region_names, name, merge_output_name,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk).rv()
def run_join_graphs(job, context, region_graph_ids, join_ids, region_names, name, merge_output_name = None):
"""
Join the ids of some graphs. If a merge_output_name is given, merge the
graph files all together as well.
Saves the unmerged, id-joined graphs, or the single merged graph if its
name is given, to the output store. Also saves the node mapping file,
produced from the `vg ids --join` call, to the output store.
Skips doing any joining or merging if there is only one input graph.
If join_ids is false, assumes the input graphs are already id-joined, and
passes them through, merging if requested.
Returns a dict containing:
'joined': a list of the unmerged, id-joined graph file IDs (or the input
graph(s) re-uploaded as output files if no joining occurred)
'merged': the merged graph file ID, if merging occurred, or the only input
graph ID, if there was only one. None otherwise.
'mapping': the file ID of the .mapping file produced by `vg ids --join`, if
run. None otherwise.
"""
work_dir = job.fileStore.getLocalTempDir()
# Download graph for each region.
# To keep command line lengths short we name the files by numbers.
region_files = []
for number, (region_graph_id, region_name) in enumerate(zip(region_graph_ids, region_names)):
region_file = '{}.vg'.format(number)
job.fileStore.readGlobalFile(region_graph_id, os.path.join(work_dir, region_file), mutable=True)
region_files.append(region_file)
if merge_output_name:
merge_output_name = remove_ext(merge_output_name, '.vg') + '.vg'
# This is our return value. Initialize it as empty but with all the keys
# set to make asking for things with .rv() easier.
to_return = {
'joined': [],
'merged': None,
'mapping': None
}
if join_ids and len(region_files) != 1:
# The graphs aren't pre-joined, and we have more than one.
# Do the actual joining
mapping_file = merge_output_name[:-3] if merge_output_name else name
mapping_file = os.path.join(work_dir, mapping_file + '.mapping')
# join the ids
cmd = ['vg', 'ids', '--join', '--mapping', os.path.basename(mapping_file)] + region_files
context.runner.call(job, cmd, work_dir=work_dir)
# save the mapping file
to_return['mapping'] = context.write_intermediate_file(job, mapping_file)
if merge_output_name is not None:
# We want a single merged output file, so merge the graphs that we now know are in a joined ID space.
# Make sure we aren't writing to an input file
assert merge_output_name not in region_files
# Run vg to combine into that file
cmd = ['vg', 'combine'] + region_files
with open(os.path.join(work_dir, merge_output_name), 'wb') as merge_file:
context.runner.call(job, cmd, work_dir=work_dir, outfile = merge_file)
# And write the merged graph as an output file
to_return['merged'] = context.write_output_file(job, os.path.join(work_dir, merge_output_name))
if join_ids and len(region_files) != 1:
# If we do all the merging, and we made new joined graphs, write the joined graphs as intermediates
to_return['joined'] = [context.write_intermediate_file(job, os.path.join(work_dir, f)) for f in region_files]
else:
# We can just pass through the existing intermediate files without re-uploading
to_return['joined'] = region_graph_ids
else:
# No merging happened, so the id-joined files need to be output files.
# We assume they came in as intermediate files, even if we didn't join them.
# So we defintiely have to write them.
to_return['joined'] = [context.write_output_file(job, os.path.join(work_dir, f)) for f in region_files]
return to_return
def run_construct_region_graph(job, context, fasta_id, fasta_name, vcf_id, vcf_name, tbi_id,
region, region_name, max_node_size, alt_paths, flat_alts, handle_svs,
is_chrom = False, sort_ids = True, normalize = False, validate = False):
"""
Construct a graph from the vcf for a given region and return its file id.
If is_chrom is set, pass along that fact to the constructor so it doesn't
try to pass a region out of the chromosome name.
If sort_ids is set (the default), do a sort pass after construction to make
sure the IDs come in topological order.
If normalize is set, try to normalize the graph and merge splits that
produce identical sequences.
If validate is true, subject the graph to a `vg validate` pass after
construct. This is off by default because vg currently does internal
validation during construction.
"""
work_dir = job.fileStore.getLocalTempDir()
# Download input files
fasta_file = os.path.join(work_dir, os.path.basename(fasta_name))
job.fileStore.readGlobalFile(fasta_id, fasta_file)
if vcf_id:
vcf_file = os.path.join(work_dir, os.path.basename(vcf_name))
job.fileStore.readGlobalFile(vcf_id, vcf_file)
job.fileStore.readGlobalFile(tbi_id, vcf_file + '.tbi')
cmd = ['vg', 'construct', '--reference', os.path.basename(fasta_file)]
if vcf_id:
cmd += ['--vcf', os.path.basename(vcf_file)]
if region:
cmd += ['--region', region]
if is_chrom:
cmd += ['--region-is-chrom']
if max_node_size:
cmd += ['--node-max', max_node_size]
if alt_paths:
cmd += ['--alt-paths']
if flat_alts:
cmd += ['--flat-alts']
if handle_svs:
cmd += ['--handle-sv']
if job.cores:
cmd += ['--threads', job.cores]
if normalize or sort_ids:
cmd = [cmd]
if normalize:
cmd.append(['vg', 'mod', '--until-normal', str(context.config.normalize_iterations), '-'])
# can be done in single mod command, but weary of being sensitive to order of operations
cmd.append(['vg', 'mod', '--chop', str(max_node_size), '-'])
if sort_ids:
cmd.append(['vg', 'ids', '--sort', '-'])
vg_path = os.path.join(work_dir, region_name)
try:
with open(vg_path, 'wb') as vg_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile = vg_file)
except:
# Dump everything we need to replicate the construction
logging.error("Construction failed. Dumping files.")
context.write_output_file(job, fasta_file)
if vcf_id:
context.write_output_file(job, vcf_file)
context.write_output_file(job, vcf_file + '.tbi')
raise
if validate:
# Check the constructed and possibly modified graph for errors
context.runner.call(job, ['vg', 'validate', os.path.basename(vg_path)], work_dir = work_dir)
return context.write_intermediate_file(job, vg_path)
def run_filter_vcf_samples(job, context, vcf_id, vcf_name, tbi_id, samples, vcf_subdir = None):
"""
Use vcflib to remove all variants specifc to a set of samples.
Keep all the sample data in the VCF except that for the sample that was removed.
"""
if not samples:
# We can exclude nothing with a no-op
return vcf_id, tbi_id
work_dir = job.fileStore.getLocalTempDir()
# Download the original VCF
vcf_file = os.path.join(work_dir, os.path.basename(vcf_name))
job.fileStore.readGlobalFile(vcf_id, vcf_file)
job.fileStore.readGlobalFile(tbi_id, vcf_file + '.tbi')
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
# Where will the final filtered VCF go?
filter_vcf_name = '{}_filter.vcf.gz'.format(vcf_base)
# What intermediate VCF will we use for variants to drop?
private_vcf_name = '{}_private.vcf.gz'.format(vcf_base)
# Make a VCF with only the variants for the sample we want gone
# TODO: if none of the samples listed are present, we get *all* variants instead of no variants.
# Then we proceed to remove all the variants in the isec step.
# Can we detect/avoid this?
cmd = ['bcftools', 'view', os.path.basename(vcf_file), '--private',
'--samples', ','.join(samples), '--force-samples', '--output-type', 'z']
with open(os.path.join(work_dir, private_vcf_name), 'wb') as out_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile = out_file)
# bcftools isec demands indexed input, so index the itnermediate file.
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', private_vcf_name],
work_dir=work_dir)
# Now make a VCF that excludes those variants and also excludes the filtered-out samples.
# We subtract the private variants from the original VCF, and then remove the samples we're excluding.
cmd = [['bcftools', 'isec', '--complement', os.path.basename(vcf_file), os.path.basename(private_vcf_name),
'--write', '1'],
['bcftools', 'view', '-', '--samples', '^' + (','.join(samples)), '--trim-alt-alleles',
'--force-samples', '--output-type', 'z']]
with open(os.path.join(work_dir, filter_vcf_name), 'wb') as out_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile = out_file)
if vcf_subdir:
write_fn = lambda x: context.write_output_file(job, x, out_store_path = os.path.join(vcf_subdir, os.path.basename(x)))
else:
write_fn = lambda x: context.write_intermediate_file(job, x)
# Upload the final VCF
out_vcf_id = write_fn(os.path.join(work_dir, filter_vcf_name))
# Index it
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', filter_vcf_name],
work_dir=work_dir)
# And upload the index
out_tbi_id = write_fn(os.path.join(work_dir, filter_vcf_name) + '.tbi')
return out_vcf_id, out_tbi_id
def run_make_control_vcfs(job, context, vcf_id, vcf_name, tbi_id, sample, pos_only = False,
vcf_subdir = None, no_filter_if_sample_not_found = False):
""" make a positive and negative control vcf
The positive control has only variants in the sample, the negative
control has only variants not in the sample
"""
assert sample is not None
work_dir = job.fileStore.getLocalTempDir()
vcf_file = os.path.join(work_dir, os.path.basename(vcf_name))
job.fileStore.readGlobalFile(vcf_id, vcf_file)
job.fileStore.readGlobalFile(tbi_id, vcf_file + '.tbi')
# In some cases, our sample may be missing from a chromosome (ex NA12878 from Y in 1000 Genomes)
# bcftools -s won't work so we handle here as a special case, assuming no sample means no variants
cmd = ['bcftools', 'query', '--list-samples', os.path.basename(vcf_file)]
found_samples = context.runner.call(job, cmd, work_dir=work_dir, check_output=True)
found_sample = sample in found_samples.decode().strip().split('\n')
# Hacky interface to not do anything if we can't find the sample.
# By default, we'd return an empty VCF in this case
if not found_sample and no_filter_if_sample_not_found:
return vcf_id, tbi_id, vcf_id, tbi_id
# filter down to sample in question
cmd = [['bcftools', 'view', os.path.basename(vcf_file), '--samples', sample, '--trim-alt-alleles']]
if found_sample:
# remove anything that's not alt (probably cleaner way to do this)
gfilter = 'GT="0" || GT="0|0" || GT="0/0"'
gfilter += ' || GT="." || GT=".|." || GT="./."'
gfilter += ' || GT=".|0" || GT="0/."'
gfilter += ' || GT="0|." || GT="./0"'
cmd.append(['bcftools', 'view', '-', '--output-type', 'z', '--exclude', gfilter])
else:
# if the sample isn't in the vcf, then there are no variants of interest, so
# we report a header field without any samples
cmd[0] += ['--force-samples', '--header-only', '--output-type', 'z']
out_pos_name = remove_ext(remove_ext(os.path.basename(vcf_name), '.gz'), '.vcf')
out_neg_name = out_pos_name + '_minus_{}.vcf.gz'.format(sample)
out_pos_name += '_{}.vcf.gz'.format(sample)
with open(os.path.join(work_dir, out_pos_name), 'wb') as out_file:
context.runner.call(job, cmd, work_dir=work_dir, outfile = out_file)
context.runner.call(job, ['tabix', '--force', '--preset', 'vcf', out_pos_name], work_dir=work_dir)
# we don't write vcfs to the output store unless we have a subdir to dump them in
if vcf_subdir:
def write_fn(local_path, out_store_path = None):
os_name = os.path.basename(local_path) if not out_store_path else os.path.basename(out_store_path)
return context.write_output_file(job, local_path, os.path.join(vcf_subdir, os_name))
else:
def write_fn(local_path, out_store_path = None):
return context.write_intermediate_file(job, local_path)
pos_control_vcf_id = write_fn(os.path.join(work_dir, out_pos_name))
pos_control_tbi_id = write_fn(os.path.join(work_dir, out_pos_name + '.tbi'))
if pos_only:
return pos_control_vcf_id, pos_control_tbi_id, None, None
# subtract the positive control to make the negative control
cmd = ['bcftools', 'isec', os.path.basename(vcf_file), out_pos_name, '-p', 'isec', '-O', 'z']
context.runner.call(job, cmd, work_dir=work_dir)
context.runner.call(job, ['tabix', '--force', '--preset', 'vcf', 'isec/0000.vcf.gz'], work_dir=work_dir)
neg_control_vcf_id = write_fn(os.path.join(work_dir, 'isec', '0000.vcf.gz'), out_store_path = out_neg_name)
neg_control_tbi_id = write_fn(os.path.join(work_dir, 'isec', '0000.vcf.gz.tbi'), out_store_path = out_neg_name + '.tbi')
return pos_control_vcf_id, pos_control_tbi_id, neg_control_vcf_id, neg_control_tbi_id
def run_min_allele_filter_vcf_samples(job, context, vcf_id, vcf_name, tbi_id, min_af, vcf_subdir = None):
"""
filter a vcf by allele frequency using bcftools --min-af
"""
if not min_af:
return vcf_id, tbi_id
work_dir = job.fileStore.getLocalTempDir()
vcf_file = os.path.join(work_dir, os.path.basename(vcf_name))
job.fileStore.readGlobalFile(vcf_id, vcf_file)
job.fileStore.readGlobalFile(tbi_id, vcf_file + '.tbi')
vcf_base = os.path.basename(remove_ext(remove_ext(vcf_name, '.gz'), '.vcf'))
af_vcf_name = '{}_minaf_{}.vcf.gz'.format(vcf_base, min_af)
cmd = ['bcftools', 'view', '--min-af', min_af, '-O', 'z', os.path.basename(vcf_file)]
with open(os.path.join(work_dir, af_vcf_name), 'wb') as out_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile=out_file)
if vcf_subdir:
write_fn = lambda x: context.write_output_file(job, x, out_store_path = os.path.join(vcf_subdir, os.path.basename(x)))
else:
write_fn = lambda x: context.write_intermediate_file(job, x)
out_vcf_id = write_fn(os.path.join(work_dir, af_vcf_name))
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', af_vcf_name],
work_dir=work_dir)
out_tbi_id = write_fn(os.path.join(work_dir, af_vcf_name) + '.tbi')
return out_vcf_id, out_tbi_id
def run_make_haplo_indexes(job, context, vcf_ids, tbi_ids, vcf_names, vg_ids, vg_names,
output_name, regions, sample, intermediate = False):
"""
return xg/gbwt for each chromosome for extracting haplotype thread graphs
(to simulate from) or sample graphs (as positive control)
"""
assert(sample is not None)
# make sure we're only dealing with chrom names (should probably be error otherwise)
chroms = [region[0:region.find(':')] if ':' in region else region for region in regions]
# Drop Nones from the VCF names; for some reason it is getting padded with Nones.
# TODO: Work out where/why that is happening and stop it.
vcf_names = [v for v in vcf_names if v is not None]
logger.debug('Making gbwt for {} vgs, {} chroms, {} vcfs, {} tbis, and {} vcf names'.format(
len(vg_ids), len(chroms), len(vcf_ids), len(tbi_ids), len(vcf_names)))
# validate options should enforce this but check to be sure assumptions met to avoid
# returning nonsense
assert len(vg_ids) == len(regions)
assert len(vcf_ids) == 1 or len(vcf_ids) <= len(regions)
assert len(tbi_ids) == len(vcf_ids)
assert len(vcf_names) == len(vcf_ids)
logger.info('Making gbwt for chromosomes {}'.format(chroms))
xg_ids = []
gbwt_ids = []
for i, (vg_id, vg_name, region) in enumerate(zip(vg_ids, vg_names, chroms)):
if len(vcf_names) == 1:
# One VCF for all contigs
vcf_name = vcf_names[0]
vcf_id = vcf_ids[0]
tbi_id = tbi_ids[0]
elif i < len(vcf_names):
# One VCF for this contig
vcf_name = vcf_names[i]
vcf_id = vcf_ids[i]
tbi_id = tbi_ids[i]
else:
# No VCF for this contig
vcf_name = None
vcf_id = None
tbi_id = None
# index the graph and vcf to make the gbwt
xg_name = remove_ext(vg_name, '.vg')
xg_job = job.addChildJobFn(run_xg_indexing, context, [vg_id], [vg_name],
xg_name, vcf_id, tbi_id,
make_gbwt=True,
intermediate=intermediate,
cores=context.config.xg_index_cores,
memory=context.config.xg_index_mem,
disk=context.config.xg_index_disk)
xg_ids.append(xg_job.rv(0))
gbwt_ids.append(xg_job.rv(1))
return xg_ids, gbwt_ids
def run_make_haplo_graphs(job, context, vg_ids, vg_names, xg_ids,
output_name, regions, sample, haplotypes, gbwt_ids,
intermediate = False):
"""
Make some haplotype graphs for threads in a gbwt. regions must be defined
since we use the chromosome name to get the threads. Also, gbwt_ids must be
specified (one genome gbwt or one per region).
Returns a list of haplotypes, where each haplotype is a list of vg graphs subset to that haplotype.
"""
assert(sample is not None)
# ith element will be a list of graphs (1 list / region) for haplotype i
thread_vg_ids = []
for h in haplotypes:
thread_vg_ids.append([])
# make sure we're only dealing with chrom names (should probably be error otherwise)
chroms = [region[0:region.find(':')] if ':' in region else region for region in regions]
# validate options should enforce this but check to be sure assumptions met to avoid
# returning nonsense
assert len(vg_ids) == len(regions)
logger.info('Making haplo graphs for chromosomes {}'.format(chroms))
for i, (vg_id, vg_name, region, xg_id) in enumerate(zip(vg_ids, vg_names, chroms, xg_ids)):
# make a thread graph from the xg
assert not gbwt_ids or len(gbwt_ids) in [1, len(xg_ids)]
# support whole-genome or chromosome gbwts
gbwt_id = None if not gbwt_ids else gbwt_ids[0] if len(gbwt_ids) == 1 else gbwt_ids[i]
hap_job = job.addChildJobFn(run_make_haplo_thread_graphs, context, vg_id, vg_name,
output_name, [region], xg_id, sample, haplotypes, gbwt_id,
intermediate = intermediate,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
for j in range(len(haplotypes)):
thread_vg_ids[j].append(hap_job.rv(j))
return thread_vg_ids
def run_make_haplo_thread_graphs(job, context, vg_id, vg_name, output_name, chroms, xg_id,
sample, haplotypes, gbwt_id, intermediate = False):
"""
make some haplotype graphs for threads in a gbwt
If there are no haplotypes in the gbwt, passes through the portion of the input graph covered by paths.
"""
work_dir = job.fileStore.getLocalTempDir()
xg_path = os.path.join(work_dir, vg_name[:-3] + '.xg')
job.fileStore.readGlobalFile(xg_id, xg_path)
vg_path = os.path.join(work_dir, vg_name)
job.fileStore.readGlobalFile(vg_id, vg_path)
if gbwt_id:
gbwt_path = os.path.join(work_dir, vg_name[:-3] + '.gbwt')
job.fileStore.readGlobalFile(gbwt_id, gbwt_path)
# Check if there are any threads in the index
# TODO: Won't be useful if the index covers multiple contigs because we aren't indexing one contig graph at a time.
try:
thread_count = int(context.runner.call(job,
[['vg', 'paths', '--list', '--gbwt', os.path.basename(gbwt_path)],
['wc', '-l']], work_dir = work_dir, check_output = True))
except:
# TODO: vg paths really needs to be fixed to be able to check for 0 threads without failing
RealtimeLogger.warning("No GBWT threads found in {}. Using reference path for haplotype extraction".format(
os.path.basename(gbwt_path)))
thread_count = 0
else:
# No gbwt means no threads
thread_count = 0
thread_vg_ids = []
for hap in haplotypes:
# This can't work if the sample is None and we want any haplotypes
assert(sample is not None)
try:
# Work out a tag for this graph, depending on whether it belongs to one chromosome or not
tag = '_{}'.format(chroms[0]) if len(chroms) == 1 else ''
if thread_count == 0:
# We have no haplotype data on this contig. This is something
# like chrM, and we want to pass through the ref version.
vg_with_thread_as_path_path = vg_path
else:
# We know we have haplotype data on this contig.
# Pull out the graph with just the haplotype thread as the only path to vg_with_thread_as_path_path
# To accomplish this we now need to make sure to use vg combine
# to combine the path-only vg Protobuf and the actual graph. So
# first get them in different files.
base_graph_filename = '{}{}_thread_{}_base.vg'.format(output_name, tag, hap)
# strip paths from our original graph
cmd = ['vg', 'paths', '-d', '-v', os.path.basename(vg_path)]
with open(os.path.join(work_dir, base_graph_filename), 'wb') as out_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile = out_file)
path_graph_filename = '{}{}_thread_{}_path.vg'.format(output_name, tag, hap)
# get haplotype thread paths from the gbwt
cmd = ['vg', 'paths', '--gbwt', os.path.basename(gbwt_path), '--extract-vg', '-x', os.path.basename(xg_path)]
for chrom in chroms:
cmd += ['-q', '_thread_{}_{}_{}'.format(sample, chrom, hap)]
with open(os.path.join(work_dir, path_graph_filename), 'wb') as out_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile = out_file)
# Now combine the two files, adding the paths to the graph
vg_with_thread_as_path_path = os.path.join(work_dir, '{}{}_thread_{}_merge.vg'.format(output_name, tag, hap))
logger.info('Creating thread graph {}'.format(vg_with_thread_as_path_path))
cmd = ['vg', 'combine', base_graph_filename, path_graph_filename]
with open(vg_with_thread_as_path_path, 'wb') as out_file:
context.runner.call(job, cmd, work_dir = work_dir, outfile = out_file)
# Now delete the intermediates
os.unlink(os.path.join(work_dir, base_graph_filename))
os.unlink(os.path.join(work_dir, path_graph_filename))
# Now trim the graph vg_with_thread_as_path_path into vg_trimmed_path, dropping anything not covered by a path
vg_trimmed_path = os.path.join(work_dir, '{}{}_thread_{}.vg'.format(output_name, tag, hap))
logger.info('Creating trimmed thread graph {}'.format(vg_trimmed_path))
with open(vg_trimmed_path, 'wb') as trimmed_file:
# Then we trim out anything other than our thread path
cmd = [['vg', 'mod', '-N', os.path.basename(vg_with_thread_as_path_path)]]
# And get rid of our thread paths since they take up lots of space when re-indexing
filter_cmd = ['vg', 'paths', '-v', '-']
for chrom in chroms:
filter_cmd += ['--retain-paths', chrom]
cmd.append(filter_cmd)
context.runner.call(job, cmd, work_dir = work_dir, outfile = trimmed_file)
write_fn = context.write_intermediate_file if intermediate else context.write_output_file
thread_vg_ids.append(write_fn(job, vg_trimmed_path))
except:
# Dump everything we need to replicate the thread extraction
logging.error("Thread extraction failed. Dumping files.")
context.write_output_file(job, vg_path)
context.write_output_file(job, xg_path)
if gbwt_id:
context.write_output_file(job, gbwt_path)
raise
logger.info('Got {} thread file IDs'.format(len(thread_vg_ids)))
return thread_vg_ids
def run_make_sample_graphs(job, context, vg_ids, vg_names, xg_ids,
output_name, regions, sample, gbwt_ids):
"""
Make some sample graphs for threads in a gbwt. regions must be defined
since we use the chromosome name to get the threads. Also, gbwt_ids must be
specified (one genome gbwt or one per region).
"""
assert(sample is not None)
# ith element will be a sample graph for region i
sample_vg_ids = []
# make sure we're only dealing with chrom names (should probably be error otherwise)
chroms = [region[0:region.find(':')] if ':' in region else region for region in regions]
# validate options should enforce this but check to be sure assumptions met to avoid
# returning nonsense
assert len(vg_ids) == len(regions)
logger.info('Making sample graphs for chromosomes {}'.format(chroms))
for i, (vg_id, vg_name, region, xg_id) in enumerate(zip(vg_ids, vg_names, chroms, xg_ids)):
# make a thread graph from the xg
assert not gbwt_ids or len(gbwt_ids) in [1, len(xg_ids)]
# support whole-genome or chromosome gbwts
gbwt_id = gbwt_ids[0] if len(gbwt_ids) == 1 else gbwt_ids[i]
hap_job = job.addChildJobFn(run_make_sample_region_graph, context, vg_id, vg_name,
output_name, region, xg_id, sample, [0,1], gbwt_id,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
sample_vg_ids.append(hap_job.rv())
return sample_vg_ids
def run_make_sample_region_graph(job, context, vg_id, vg_name, output_name, chrom, xg_id,
sample, haplotypes, gbwt_id, leave_thread_paths=True, validate=True):
"""
make a sample graph using the gbwt.
Extract the subgraph visited by threads for the requested sample, if it is nonempty.
Otherwise (for cases like chrM where there are no variant calls and no threads) pass through
the primary path of the graph.
If validate is True (the default), makes sure the final graph passes
`vg validate` before sending it on.
"""
# This can't work if the sample is None and we want any haplotypes
assert(sample is not None)
work_dir = job.fileStore.getLocalTempDir()
xg_path = os.path.join(work_dir, vg_name[:-3] + '.xg')
job.fileStore.readGlobalFile(xg_id, xg_path)
vg_path = os.path.join(work_dir, vg_name)
job.fileStore.readGlobalFile(vg_id, vg_path)
gbwt_path = os.path.join(work_dir, vg_name[:-3] + '.gbwt')
if gbwt_id:
job.fileStore.readGlobalFile(gbwt_id, gbwt_path)
# Check if there are any threads in the index
assert(gbwt_id)
thread_count = int(context.runner.call(job,
[['vg', 'paths', '--threads', '--list', '--gbwt', os.path.basename(gbwt_path), '-x', os.path.basename(xg_path)],
['wc', '-l']], work_dir = work_dir, check_output = True))
if thread_count == 0:
# There are no threads in our GBWT index (it is empty).
# This means that we have no haplotype data for this graph.
# This means the graph's contigs contig probably should be included,
# in at least their reference versions, in all graphs.
# Use the whole graph as our "extracted" graph, which we
# will then pare down to the part covered by paths (i.e. the primary path)
extract_graph_path = vg_path
else:
# We have actual thread data for the graph. Go extract the relevant threads.
extract_graph_path = os.path.join(work_dir, '{}_{}_extract.vg'.format(output_name, chrom))
logger.info('Creating sample extraction graph {}'.format(extract_graph_path))
with open(extract_graph_path, 'wb') as extract_graph_file:
# strip paths from our original graph
cmd = ['vg', 'paths', '-d', '-v', os.path.basename(vg_path)]
context.runner.call(job, cmd, work_dir = work_dir, outfile = extract_graph_file)
for hap in haplotypes:
# get haplotype thread paths from the index
if gbwt_id:
cmd = ['vg', 'paths', '--gbwt', os.path.basename(gbwt_path), '--extract-vg']
else:
cmd = ['vg', 'find']
cmd += ['-x', os.path.basename(xg_path)]
cmd += ['-q', '_thread_{}_{}_{}'.format(sample, chrom, hap)]
context.runner.call(job, cmd, work_dir = work_dir, outfile = extract_graph_file)
sample_graph_path = os.path.join(work_dir, '{}_{}.vg'.format(output_name, chrom))
logger.info('Creating sample graph {}'.format(sample_graph_path))
with open(sample_graph_path, 'wb') as sample_graph_file:
# Then we trim out anything other than our thread paths
cmd = [['vg', 'mod', '-N', os.path.basename(extract_graph_path)]]
if not leave_thread_paths:
cmd.append(['vg', 'paths', '-v', '-', '-d'])
context.runner.call(job, cmd, work_dir = work_dir, outfile = sample_graph_file)
if validate:
# Make sure that the resulting graph passes validation before returning it.
# This is another whole graph load and so will take a while.
context.runner.call(job, ['vg', 'validate', os.path.basename(sample_graph_path)], work_dir = work_dir)
sample_vg_id = context.write_intermediate_file(job, sample_graph_path)
return sample_vg_id
def construct_main(context, options):
"""
Wrapper for vg constructing.
"""
# check some options
validate_construct_options(options)
# How long did it take to run the entire pipeline, in seconds?
run_time_pipeline = None
# Mark when we start the pipeline
start_time_pipeline = timeit.default_timer()
# Merge up all filter samples
filter_samples = []
if options.filter_samples:
filter_samples += options.filter_samples
if options.filter_ceph:
filter_samples += CEPH_SAMPLES
filter_samples = list(set(filter_samples))
with context.get_toil(options.jobStore) as toil:
if not toil.options.restart:
importer = AsyncImporter(toil)
# Upload local files to the remote IO Store
inputFastaFileIDs = [importer.load(fasta) for fasta in options.fasta]
inputFastaNames = [os.path.basename(fasta) for fasta in options.fasta]
inputVCFFileIDs = []
inputVCFNames = []
inputTBIFileIDs = []
for vcf_batch in options.vcf:
inputVCFFileIDs.append([importer.load(make_url(vcf)) for vcf in vcf_batch.split(',')])
inputVCFNames.append([os.path.basename(vcf) for vcf in vcf_batch.split(',')])
inputTBIFileIDs.append([importer.load(make_url(vcf + '.tbi'), wait_on = inputVCFFileIDs[-1][i]) \
for i, vcf in enumerate(vcf_batch.split(','))])
inputBWAFastaID=None
if options.bwa_reference:
inputBWAFastaID = importer.load(options.bwa_reference)
inputRegionsFileID = None
if options.regions_file:
inputRegionsFileID = importer.load(options.regions_file)
alt_regions_id = importer.load(options.alt_regions_bed) if options.alt_regions_bed else None
importer.wait()
inputFastaFileIDs = importer.resolve(inputFastaFileIDs)
inputVCFFileIDs = importer.resolve(inputVCFFileIDs)
inputTBIFileIDs = importer.resolve(inputTBIFileIDs)
inputBWAFastaID = importer.resolve(inputBWAFastaID)
inputRegionsFileID = importer.resolve(inputRegionsFileID)
alt_regions_id = importer.resolve(alt_regions_id)
# We only support one haplotype extraction sample (enforced by validate) despire what CLI implies
haplo_extraction_sample = options.haplo_sample if options.haplo_sample else options.sample_graph
# Init the outstore
init_job = Job.wrapJobFn(run_write_info_to_outstore, context, sys.argv,
memory=context.config.misc_mem,
disk=context.config.misc_disk)
# Current job in follow-on chain
cur_job = init_job
# Unzip the fasta
for i, fasta in enumerate(options.fasta):
if fasta.endswith('.gz'):
inputFastaFileIDs[i] = init_job.addChildJobFn(run_unzip_fasta, context, inputFastaFileIDs[i],
os.path.basename(fasta),
disk=context.config.construct_disk).rv()
inputFastaNames[i] = inputFastaNames[i][:-3]
# Mask out ambigous bases
if options.mask_ambiguous:
mask_root = Job()
cur_job.addFollowOn(mask_root)
cur_job = mask_root
for i, (fasta_id, fasta_name) in enumerate(zip(inputFastaFileIDs, inputFastaNames)):
inputFastaFileIDs[i] = mask_root.addChildJobFn(run_mask_ambiguous, context, inputFastaFileIDs[i], inputFastaNames[i],
disk=context.config.construct_disk).rv(0)
# do minimum allele frequency filter as preprocessing step
if options.pre_min_af:
min_af_job = Job()
cur_job.addFollowOn(min_af_job)
cur_job = min_af_job
af_vcf_ids_list, af_tbi_ids_list = [], []
for vcf_ids, vcf_names, tbi_ids in zip(inputVCFFileIDs, inputVCFNames, inputTBIFileIDs):
af_vcf_ids, af_tbi_ids = [], []
for vcf_id, vcf_name, tbi_id in zip(vcf_ids, vcf_names, tbi_ids):
af_job = min_af_job.addChildJobFn(run_min_allele_filter_vcf_samples, context, vcf_id,
vcf_name, tbi_id, options.pre_min_af,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
af_vcf_ids.append(af_job.rv(0))
af_tbi_ids.append(af_job.rv(1))
af_vcf_ids_list.append(af_vcf_ids)
af_tbi_ids_list.append(af_tbi_ids)
inputVCFFileIDs, inputTBIFileIDs = af_vcf_ids_list, af_tbi_ids_list
regions_regex = None if not options.regions_regex else '|'.join(options.regions_regex)
# Parse the regions from file
if options.regions_file:
cur_job = cur_job.addFollowOnJobFn(run_scan_regions_file, context, inputRegionsFileID, regions_regex,
memory=context.config.misc_mem,
disk=context.config.misc_disk)
regions = cur_job.rv()
elif options.fasta_regions:
# Extract fasta sequence names and append them to regions
# Make sure we have a plausible amount of disk for downloading it
cur_job = cur_job.addFollowOnJobFn(run_scan_fasta_sequence_names, context,
inputFastaFileIDs[0],
inputFastaNames[0],
options.regions,
regions_regex,
memory=context.config.misc_mem,
disk=context.config.construct_disk)
regions = cur_job.rv()
else:
regions = options.regions
# Preproces chromosome names everywhere to be consistent,
# either mapping from 1-->chr1 etc, or going the other way
if options.add_chr_prefix or options.remove_chr_prefix:
cur_job = cur_job.addFollowOnJobFn(run_fix_chrom_names, context,
options.add_chr_prefix,
regions,
inputFastaFileIDs,
inputFastaNames,
inputVCFFileIDs,
inputVCFNames,
inputTBIFileIDs,
alt_regions_id,
cores=context.config.construct_cores,
memory=context.config.construct_mem,
disk=context.config.construct_disk)
regions = cur_job.rv(0)
inputFastaFileIDs, inputFastaFileNames = cur_job.rv(1), cur_job.rv(2)
inputVCFFileIDs, inputTBIFileIDs = cur_job.rv(3), cur_job.rv(5)
# Make sure that we don't have any alt sequences in our regions. alt sequences
# are inferred from the --target_regions bed file
if alt_regions_id:
cur_job = cur_job.addFollowOnJobFn(run_subtract_alt_regions,
context,
alt_regions_id,
regions)
regions, alt_regions = cur_job.rv(0), cur_job.rv(1)
else:
alt_regions=[]
# Merge up comma-separated vcfs with bcftools merge
cur_job = cur_job.addFollowOnJobFn(run_merge_all_vcfs, context,
inputVCFFileIDs, inputVCFNames, inputTBIFileIDs)
inputVCFFileIDs = cur_job.rv(0)
inputVCFNames = cur_job.rv(1)
inputTBIFileIDs = cur_job.rv(2)
# Automatically make and name a bunch of vcfs
vcf_job = cur_job.addFollowOnJobFn(run_generate_input_vcfs, context,
inputVCFFileIDs, inputVCFNames, inputTBIFileIDs,
regions,
options.out_name,
do_primary = options.primary,
do_pan = options.pangenome,
pos_control_sample = options.pos_control,
neg_control_sample = options.neg_control,
sample_graph = options.sample_graph,
haplo_sample = options.haplo_sample,
filter_samples = filter_samples,
min_afs = options.min_af,
vcf_subdir = '{}-vcfs'.format(options.out_name) if options.keep_vcfs else None)
# Construct graphs
vcf_job.addFollowOnJobFn(run_construct_all, context, inputFastaFileIDs,
inputFastaNames, vcf_job.rv(),
options.max_node_size, options.alt_paths or 'alt-gam' in options.indexes,
options.flat_alts, options.handle_svs, regions,
merge_graphs = options.merge_graphs,
sort_ids = True, join_ids = True,
wanted_indexes = options.indexes,
haplo_extraction_sample = haplo_extraction_sample,
gbwt_prune = options.gbwt_prune,
normalize = options.normalize,
validate = options.validate,
alt_regions_id = alt_regions_id,
alt_regions = alt_regions)
if inputBWAFastaID:
# If we need to make a BWA index too, do that in parallel with everything else
init_job.addFollowOnJobFn(run_bwa_index, context, inputBWAFastaID,
copy_fasta=True,
cores=context.config.bwa_index_cores, memory=context.config.bwa_index_mem,
disk=context.config.bwa_index_disk)
# Run the workflow
toil.start(init_job)
else:
toil.restart()
end_time_pipeline = timeit.default_timer()
run_time_pipeline = end_time_pipeline - start_time_pipeline
logger.info("All jobs completed successfully. Pipeline took {} seconds.".format(run_time_pipeline))
| 51.853117
| 195
| 0.592825
|
c7b306efcfcf03db8f2d309fea41d1e0f4abcb0e
| 6,026
|
py
|
Python
|
src/m8_still_more_mutation.py
|
jacobsme1/16-SequencesAndMutation
|
5f748d8f9573b0741e998ffc0b72c8f34c0c73af
|
[
"MIT"
] | null | null | null |
src/m8_still_more_mutation.py
|
jacobsme1/16-SequencesAndMutation
|
5f748d8f9573b0741e998ffc0b72c8f34c0c73af
|
[
"MIT"
] | null | null | null |
src/m8_still_more_mutation.py
|
jacobsme1/16-SequencesAndMutation
|
5f748d8f9573b0741e998ffc0b72c8f34c0c73af
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice MUTATION of lists.
In this module, you mutate by DELETING elements of a list.
Authors: David Mutchler, Amanda Stouder, Chandan Rupakheti, Katie Dion,
Claude Anderson, Delvin Defoe, Curt Clifton, their colleagues,
and Max Jacobs.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import m6_mutation
def main():
run_test_RETURN_delete_negatives()
run_test_MUTATE_delete_negatives()
def run_test_RETURN_delete_negatives():
""" Tests the RETURN_delete_negatives function. """
print()
print('--------------------------------')
print('Testing RETURN_delete_negatives:')
print('--------------------------------')
# ------------------------------------------------------------------
# Test 1:
# ------------------------------------------------------------------
run_test_number = 1
original_argument = [-30.2, 50, 12.5, -1, -5, 8, 0]
correct_argument_value_after_function_call = original_argument.copy()
correct_returned_value = [50, 12.5, 8, 0]
m6_mutation.run_test(RETURN_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
# ------------------------------------------------------------------
# Test 2:
# ------------------------------------------------------------------
run_test_number = 2
original_argument = [2, 0, -9, 1, -30]
correct_argument_value_after_function_call = original_argument.copy()
correct_returned_value = [2, 0, 1]
m6_mutation.run_test(RETURN_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
def RETURN_delete_negatives(numbers):
"""
Returns a NEW list that is the same as the given list of numbers,
but with each negative number in the list DELETED from the list.
For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0].
then the returned list is the NEW list [50, 12.5, 8, 0].
This function must NOT mutate the given list.
Precondition:
:type numbers: list
where the list is a list of numbers.
"""
# TODO: 2. First, READ THE ABOVE TEST CODE.
# Make sure that you understand it.
# In particular, note how it calls the run_test function
# from the module m6_mutation by using the notation:
# m6_mutation.run_test(...)
# Then, IMPLEMENT and test THIS FUNCTION
# (using the above code for testing).
boi = []
for k in range(len(numbers)):
if numbers[k] < 0:
pass
else:
boi.append(numbers[k])
return boi
def run_test_MUTATE_delete_negatives():
""" Tests the MUTATE_delete_negatives function. """
print()
print('--------------------------------')
print('Testing MUTATE_delete_negatives:')
print('--------------------------------')
# ------------------------------------------------------------------
# Test 1:
# ------------------------------------------------------------------
run_test_number = 1
original_argument = [-30.2, 50, 12.5, -1, -5, 8, 0]
correct_argument_value_after_function_call = [50, 12.5, 8, 0]
correct_returned_value = None
m6_mutation.run_test(MUTATE_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
# ------------------------------------------------------------------
# Test 2:
# ------------------------------------------------------------------
run_test_number = 2
original_argument = [2, 0, -9, 1, -30]
correct_argument_value_after_function_call = [2, 0, 1]
correct_returned_value = None
m6_mutation.run_test(MUTATE_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
def MUTATE_delete_negatives(numbers):
"""
MUTATES the given list of numbers so that each negative number
in the list is DELETED from the list.
For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0].
then that list is MUTATED to become [50, 12.5, 8, 0].
This function MAY use ONE additional list beyond the given list
(but see if you can solve the problem WITHOUT any additional lists).
The function must NOT return anything (other than the default None).
Precondition: The argument is a list of numbers.
"""
# DONE: 3. First, READ THE ABOVE TEST CODE.
# Make sure that you understand it.
# In particular, note how it calls the run_test function
# from the module m6_mutation by using the notation:
# m6_mutation.run_test(...)
# Then, IMPLEMENT and test THIS FUNCTION
# (using the above code for testing).
#
# HINT: This problem is MUCH harder than it would appear,
# for various quite-subtle reasons.
# Take a stab at this problem,
# then ask for help as needed.
# HINT #2: Why might it be wise to start at the end and
# work backwards through the list to the beginning?
for k in range(len(numbers)-1,-1,-1):
if numbers[k] < 0:
del numbers[k]
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| 38.139241
| 73
| 0.531862
|
a9acdf5c030c66303a762ca94c23574ee4c701fd
| 2,050
|
py
|
Python
|
src/Components/qfed/qfed/planck.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-12-02T14:23:30.000Z
|
2021-12-31T15:39:30.000Z
|
src/Components/qfed/qfed/planck.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-04-15T16:22:14.000Z
|
2022-03-24T13:59:25.000Z
|
src/Components/qfed/qfed/planck.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Planck function and its inverse, derived from J. Joiner IDL code.
Uses GLA TOVS consistent constants.
This software is hereby placed in the public domain.
Arlindo.daSilva@nasa.gov
"""
from numpy import *
from pylab import normpdf
# Constant consistent with GLATOVS
C = (1.19104e-5, 1.43833)
def planck(k,T):
"""
Planck function.
Returns radiances in [mw/(sr m^2 cm^-1)] given
k wave number [cm-1]
T temperature [K]
Shortcut:
If k<0, then abs(k) is assumed to be wave length in microns.
"""
if k<0:
k = - 1.0e4 / k
if not isscalar(T):
k = float(k) * ones(T.shape)
return C[0]*pow(k,3.0)/(exp(C[1]*k/T)-1.0)
def iplanck(k,I):
"""
Inverse of the Planck function.
Returns Brightness Temperature [K] given
k wave number [cm-1]
I radiance [mw/(sr m^2 cm^-1])
Shortcut:
If k<0, then abs(k) is assumed to be wave length in microns.
"""
if k<0:
k = - 1.0e4 / k
k = float(k) * ones(I.shape)
return C[1]*k/log(1.0+C[0]*pow(k,3.0)/I)
def nplanck(k,T,sig):
"""Planck function convolved with a normal pdf; on input
sig is the temperature stdv.
"""
N = 128
n = 4.
L = zeros(T.size)
for i in range(T.size):
T_ = linspace(T[i]-n*sig,T[i]+n*sig,N)
p = normpdf(T_,T[i],sig)
L[i] = sum(p * planck(k,T_)) / sum(p)
return L
#............................................................
def B21(T):
return planck(-3.959,T) # MODIS Channel 21
def B31(T):
return planck(-11.03,T) # MODIS Channel 31
def B32(T):
return planck(-12.02,T) # MODIS Channel 32
def iB21(I):
return iplanck(-3.959,I) # MODIS Channel 21
def iB31(I):
return iplanck(-11.03,I) # MODIS Channel 31
def iB32(I):
return iplanck(-12.02,I) # MODIS Channel 32
def nB21(T,sig):
return nplanck(-3.959,T,sig) # MODIS Channel 21
def nB31(T,sig):
return nplanck(-11.03,T,sig) # MODIS Channel 31
#............................................................
| 23.295455
| 67
| 0.555122
|
00eeed46ea1dd2995e66e18b7ad77dde55cfa603
| 1,353
|
py
|
Python
|
tests/test.py
|
galuhsahid/mariantranslate
|
a1bf7335714a1bfb6dd6d30a2572baddbe4bdea8
|
[
"MIT"
] | 1
|
2021-12-04T04:36:01.000Z
|
2021-12-04T04:36:01.000Z
|
tests/test.py
|
galuhsahid/mariantranslate
|
a1bf7335714a1bfb6dd6d30a2572baddbe4bdea8
|
[
"MIT"
] | 3
|
2021-08-19T19:29:23.000Z
|
2021-08-19T19:30:32.000Z
|
tests/test.py
|
galuhsahid/mariantranslate
|
a1bf7335714a1bfb6dd6d30a2572baddbe4bdea8
|
[
"MIT"
] | null | null | null |
import pytest
from translator import Translator
class TestTranslator:
@pytest.fixture(autouse=True)
def setup(self):
self.en_id_translator = Translator("en", "id")
def test_single_translate(self):
text_en = "Due to the limited vegetation cover of the Faroe Islands, it is relatively easy to follow the history of geology."
expected = "Karena tumbuhan terbatas menutupi Kepulauan Faroe, relatif mudah untuk mengikuti sejarah geologi."
result = self.en_id_translator.translate(text_en)
assert expected == result
def test_batch_translate(self):
texts_en = [
"The middle basalt series consists of thin lava flows with a highly porous interlayer.",
"This means that shops and services are now relocating en masse from the villages into the centres",
"By road, the main islands are connected by bridges and tunnels.",
]
expected = [
"Seri basal tengah terdiri dari aliran lava tipis dengan interlayer yang sangat berpori.",
"Ini berarti bahwa toko-toko dan layanan sekarang relokasi massal dari desa-desa ke pusat-pusat",
"Melalui jalan, pulau-pulau utama terhubung oleh jembatan dan terowongan.",
]
result = self.en_id_translator.translate(texts_en)
assert expected == result
| 43.645161
| 133
| 0.691057
|
7b4bd33a8ed89adf170576fcd5a5a45bb5352cf7
| 947
|
py
|
Python
|
Packages/matplotlib-2.2.2/lib/mpl_examples/pyplots/whats_new_1_subplot3d.py
|
NightKirie/NCKU_NLP_2108_industry3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
[
"MIT"
] | 1
|
2018-06-11T07:36:04.000Z
|
2018-06-11T07:36:04.000Z
|
Packages/matplotlib-2.2.2/lib/mpl_examples/pyplots/whats_new_1_subplot3d.py
|
NightKirie/NCKU_NLP_2108_industry3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
[
"MIT"
] | null | null | null |
Packages/matplotlib-2.2.2/lib/mpl_examples/pyplots/whats_new_1_subplot3d.py
|
NightKirie/NCKU_NLP_2108_industry3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
[
"MIT"
] | 4
|
2018-05-19T11:31:20.000Z
|
2018-07-01T20:58:29.000Z
|
"""
=====================
Whats New 1 Subplot3d
=====================
"""
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
#from matplotlib.ticker import LinearLocator, FixedLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1, projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
ax.set_zlim3d(-1.01, 1.01)
#ax.w_zaxis.set_major_locator(LinearLocator(10))
#ax.w_zaxis.set_major_formatter(FormatStrFormatter('%.03f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
from mpl_toolkits.mplot3d.axes3d import get_test_data
ax = fig.add_subplot(1, 2, 2, projection='3d')
X, Y, Z = get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
plt.show()
| 25.594595
| 78
| 0.68321
|
e57088868684d3b1c8d941bf6329d3c7b680c413
| 9,473
|
py
|
Python
|
toontown/shtiker/DisguisePage.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 1
|
2018-06-16T23:06:38.000Z
|
2018-06-16T23:06:38.000Z
|
toontown/shtiker/DisguisePage.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
toontown/shtiker/DisguisePage.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
from toontown.shtiker import ShtikerPage
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.suit import SuitDNA
from toontown.battle import SuitBattleGlobals
from toontown.minigame import MinigamePowerMeter
from toontown.coghq import CogDisguiseGlobals
DeptColors = (Vec4(0.647, 0.608, 0.596, 1.0),
Vec4(0.588, 0.635, 0.671, 1.0),
Vec4(0.596, 0.714, 0.659, 1.0),
Vec4(0.761, 0.678, 0.69, 1.0),
Vec4(0.5, 0.5, 0.5, 1.0))
PartNames = ('lUpleg', 'lLowleg', 'lShoe', 'rUpleg', 'rLowleg', 'rShoe', 'lShoulder', 'rShoulder', 'chest', 'waist', 'hip', 'lUparm', 'lLowarm', 'lHand', 'rUparm', 'rLowarm', 'rHand')
class DisguisePage(ShtikerPage.ShtikerPage):
meterColor = Vec4(0.87, 0.87, 0.827, 1.0)
meterActiveColor = Vec4(0.7, 0.3, 0.3, 1)
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.activeTab = 0
self.progressTitle = None
def load(self):
ShtikerPage.ShtikerPage.load(self)
gui = loader.loadModel('phase_9/models/gui/cog_disguises')
icons = loader.loadModel('phase_3/models/gui/cog_icons')
self.frame = DirectFrame(parent=self, relief=None, scale=0.47, pos=(0.1, 1, 0))
self.bkgd = DirectFrame(parent=self.frame, geom=gui.find('**/base'), relief=None, scale=(0.98, 1, 1))
self.bkgd.setTextureOff(1)
self.buttons = []
self.pageFrame = DirectFrame(parent=self.frame, relief=None)
self.xOffset = 0.4
self.deptLabel = DirectLabel(parent=self.frame, text='', text_font=ToontownGlobals.getSuitFont(), text_style=3, text_fg=(1,1,1,1), text_scale=TTLocalizer.DPdeptLabel, text_pos=(-0.1, 0.8))
DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/pipe_frame'))
self.tube = DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/tube'))
DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/robot/face'))
DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_cog_disguises'), geom_pos=(0, 0.1, 0))
self.meritTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_merit_progress'), geom_pos=(0, 0.1, 0))
self.meritTitle.hide()
self.cogbuckTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_cashbuck_progress'), geom_pos=(0, 0.1, 0))
self.cogbuckTitle.hide()
self.juryNoticeTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_jury_notice_progress'), geom_pos=(0, 0.1, 0))
self.juryNoticeTitle.hide()
self.stockOptionTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_stock_option_progress'), geom_pos=(0, 0.1, 0))
self.stockOptionTitle.hide()
self.progressTitle = self.meritTitle
self.promotionTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_ready4promotion'), geom_pos=(0, 0.1, 0))
self.cogName = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=TTLocalizer.DPcogName, text_align=TextNode.ACenter, pos=(-0.948, 0, -1.15))
self.cogLevel = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.09, text_align=TextNode.ACenter, pos=(-0.91, 0, -1.02))
self.partFrame = DirectFrame(parent=self.frame, relief=None)
self.parts = []
for partNum in xrange(0, 17):
self.parts.append(DirectFrame(parent=self.partFrame, relief=None, geom=gui.find('**/robot/' + PartNames[partNum])))
self.holes = []
for partNum in xrange(0, 17):
self.holes.append(DirectFrame(parent=self.partFrame, relief=None, geom=gui.find('**/robot_hole/' + PartNames[partNum])))
self.cogPartRatio = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.08, text_align=TextNode.ACenter, pos=(-0.91, 0, -0.82))
self.cogMeritRatio = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.08, text_align=TextNode.ACenter, pos=(0.45, 0, -0.36))
meterFace = gui.find('**/meter_face_whole')
meterFaceHalf = gui.find('**/meter_face_half')
self.meterFace = DirectLabel(parent=self.frame, relief=None, geom=meterFace, color=self.meterColor, pos=(0.455, 0.0, 0.04))
self.meterFaceHalf1 = DirectLabel(parent=self.frame, relief=None, geom=meterFaceHalf, color=self.meterActiveColor, pos=(0.455, 0.0, 0.04))
self.meterFaceHalf2 = DirectLabel(parent=self.frame, relief=None, geom=meterFaceHalf, color=self.meterColor, pos=(0.455, 0.0, 0.04))
for dept in xrange(len(SuitDNA.suitDepts)):
button = DirectButton(parent=self.frame, relief=None, pos=(-1 + self.xOffset * dept, 0, 1.05), image=icons.find(SuitDNA.suitDeptModelPaths[dept]), image_scale=0.25, image2_color=(1, 1, 1, 0.75), command = self.doTab, extraArgs=[dept])
self.buttons.append(button)
self.frame.hide()
self.activeTab = 3
self.updatePage()
def unload(self):
ShtikerPage.ShtikerPage.unload(self)
def enter(self):
self.frame.show()
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
self.frame.hide()
ShtikerPage.ShtikerPage.exit(self)
def updatePage(self):
self.doTab(self.activeTab)
def updatePartsDisplay(self, index, numParts, numPartsRequired):
partBitmask = 1
groupingBitmask = CogDisguiseGlobals.PartsPerSuitBitmasks[index]
previousPart = 0
for part in self.parts:
groupingBit = groupingBitmask & partBitmask
if numParts & partBitmask & groupingBit:
part.show()
self.holes[self.parts.index(part)].hide()
if groupingBit:
previousPart = 1
elif not groupingBit and previousPart:
part.show()
self.holes[self.parts.index(part)].hide()
else:
self.holes[self.parts.index(part)].show()
part.hide()
previousPart = 0
partBitmask = partBitmask << 1
def updateMeritBar(self, dept):
merits = base.localAvatar.cogMerits[dept]
totalMerits = CogDisguiseGlobals.getTotalMerits(base.localAvatar, dept)
if totalMerits == 0:
progress = 1
else:
progress = min(merits / float(totalMerits), 1)
self.updateMeritDial(progress)
if base.localAvatar.readyForPromotion(dept):
self.cogMeritRatio['text'] = TTLocalizer.DisguisePageMeritFull
self.promotionTitle.show()
self.progressTitle.hide()
else:
self.cogMeritRatio['text'] = '%d/%d' % (merits, totalMerits)
self.promotionTitle.hide()
self.progressTitle.show()
def updateMeritDial(self, progress):
if progress == 0:
self.meterFaceHalf1.hide()
self.meterFaceHalf2.hide()
self.meterFace.setColor(self.meterColor)
elif progress == 1:
self.meterFaceHalf1.hide()
self.meterFaceHalf2.hide()
self.meterFace.setColor(self.meterActiveColor)
else:
self.meterFaceHalf1.show()
self.meterFaceHalf2.show()
self.meterFace.setColor(self.meterColor)
if progress < 0.5:
self.meterFaceHalf2.setColor(self.meterColor)
else:
self.meterFaceHalf2.setColor(self.meterActiveColor)
progress = progress - 0.5
self.meterFaceHalf2.setR(180 * (progress / 0.5))
def doTab(self, index):
self.activeTab = index
self.bkgd.setColor(DeptColors[index])
self.deptLabel['text'] = (SuitDNA.suitDeptFullnames[SuitDNA.suitDepts[index]],)
cogIndex = base.localAvatar.cogTypes[index] + SuitDNA.suitsPerDept * index
cog = SuitDNA.suitHeadTypes[cogIndex]
self.progressTitle.hide()
if SuitDNA.suitDepts[index] == 'm':
self.progressTitle = self.cogbuckTitle
elif SuitDNA.suitDepts[index] == 'l':
self.progressTitle = self.juryNoticeTitle
elif SuitDNA.suitDepts[index] == 'c':
self.progressTitle = self.stockOptionTitle
else:
self.progressTitle = self.meritTitle
self.progressTitle.show()
self.cogName['text'] = SuitBattleGlobals.SuitAttributes[cog]['name']
cogLevel = base.localAvatar.cogLevels[index]
if base.localAvatar.cogReviveLevels[self.activeTab] > -1:
cogLevel = base.localAvatar.cogReviveLevels[self.activeTab]
self.cogLevel['text_scale'] = 0.065
self.cogLevel['text'] = TTLocalizer.DisguisePageCogLevel % str(cogLevel + 1) + TTLocalizer.SkeleRevivePostFix
else:
self.cogLevel['text_scale'] = 0.09
self.cogLevel['text'] = TTLocalizer.DisguisePageCogLevel % str(cogLevel + 1)
numParts = base.localAvatar.cogParts[index]
numPartsRequired = CogDisguiseGlobals.PartsPerSuit[index]
self.updatePartsDisplay(index, numParts, numPartsRequired)
self.updateMeritBar(index)
self.cogPartRatio['text'] = '%d/%d' % (CogDisguiseGlobals.getTotalParts(numParts), numPartsRequired)
| 53.219101
| 246
| 0.653858
|
7cfbfc6ed8f52badecc9a3773082549a63a48ed7
| 4,512
|
py
|
Python
|
mnist/main.py
|
andreh7/pytorch-examples
|
2dca10404443ce3178343c07ba6e22af13efb006
|
[
"BSD-3-Clause"
] | 168
|
2017-02-28T20:07:08.000Z
|
2021-09-03T08:24:42.000Z
|
mnist/main.py
|
andreh7/pytorch-examples
|
2dca10404443ce3178343c07ba6e22af13efb006
|
[
"BSD-3-Clause"
] | 34
|
2017-04-30T21:30:17.000Z
|
2021-08-20T12:12:48.000Z
|
mnist/main.py
|
andreh7/pytorch-examples
|
2dca10404443ce3178343c07ba6e22af13efb006
|
[
"BSD-3-Clause"
] | 63
|
2017-02-03T07:12:51.000Z
|
2022-01-26T23:33:44.000Z
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
model = Net()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
| 39.578947
| 95
| 0.615913
|
0fb187deac214d4a991ecf40d8a5d9fca1d17b1c
| 5,095
|
py
|
Python
|
examples/ExecutionPools/Remote/test_plan.py
|
YinjunZhu/testplan
|
8feeb92a15211454213c5ab279f1e4396a6d59a8
|
[
"Apache-2.0"
] | 1
|
2021-07-21T08:28:41.000Z
|
2021-07-21T08:28:41.000Z
|
examples/ExecutionPools/Remote/test_plan.py
|
YinjunZhu/testplan
|
8feeb92a15211454213c5ab279f1e4396a6d59a8
|
[
"Apache-2.0"
] | null | null | null |
examples/ExecutionPools/Remote/test_plan.py
|
YinjunZhu/testplan
|
8feeb92a15211454213c5ab279f1e4396a6d59a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
Parallel test execution in a remote pool.
"""
import os
import sys
import socket
import getpass
import shutil
import tempfile
from testplan import test_plan
from testplan import Task
from testplan.runners.pools import RemotePool
from testplan.common.utils.path import module_abspath, pwd
from testplan.parser import TestplanParser
from testplan.report.testing.styles import Style, StyleEnum
OUTPUT_STYLE = Style(StyleEnum.ASSERTION_DETAIL, StyleEnum.ASSERTION_DETAIL)
TEMP_DIR = None
class CustomParser(TestplanParser):
"""Inheriting base parser."""
def add_arguments(self, parser):
"""Defining custom arguments for this Testplan."""
parser.add_argument("--tasks-num", action="store", type=int, default=8)
parser.add_argument("--pool-size", action="store", type=int, default=4)
# Function that creates a file with some content
# to demonstrate custom file transferring.
def make_file(filename, dirname, content):
path = os.path.join(dirname, filename)
with open(path, "w") as fobj:
fobj.write(content)
return path
# Using a custom parser to support `--tasks-num` and `--pool-size` command
# line arguments so that users can experiment with remote pool test execution.
# Hard-coding `pdf_path`, 'stdout_style' and 'pdf_style' so that the
# downloadable example gives meaningful and presentable output.
# NOTE: this programmatic arguments passing approach will cause Testplan
# to ignore any command line arguments related to that functionality.
@test_plan(
name="RemotePoolExecution",
parser=CustomParser,
pdf_path=os.path.join(pwd(), "report.pdf"),
stdout_style=OUTPUT_STYLE,
pdf_style=OUTPUT_STYLE,
)
def main(plan):
"""
Testplan decorated main function to add and execute MultiTests.
:return: Testplan result object.
:rtype: ``testplan.base.TestplanResult``
"""
workspace = os.path.dirname(__file__)
# Create two temporary files locally. For demonstration, just write the
# filename as the content of each.
assert TEMP_DIR is not None
for filename in ("file1", "file2"):
make_file(filename, TEMP_DIR, content=filename)
# Explicitly specify the full paths to both the local source files just
# created and the destination filepaths on the remote host.
push_files = [
(os.path.join(TEMP_DIR, "file1"), "/tmp/remote_example/file1"),
(os.path.join(TEMP_DIR, "file2"), "/tmp/remote_example/file2"),
]
# Check if the remote host has been specified in the environment. Remote
# hosts can only be Linux systems. If none is specified when running on a
# Linux system we can default to using the localhost as our "remote"
# worker. Whichever remote host is used must be configured to accept SSH
# connections from the localhost.
remote_host = os.environ.get("TESTPLAN_REMOTE_HOST")
if not remote_host:
if os.name == "posix":
remote_host = socket.gethostname()
else:
raise RuntimeError(
"You must specify a remote host via the TESTPLAN_REMOTE_HOST "
"environment var on non-Linux systems."
)
# Add a remote pool test execution resource to the plan of given size.
pool = RemotePool(
name="MyPool",
# Create 3 workers on the same remote host.
hosts={remote_host: 3},
# Allow the remote port to be overridden by the
# environment. Default to 0, which will make testplan use
# the default SSH port for connections.
port=int(os.environ.get("TESTPLAN_REMOTE_PORT", 0)),
setup_script=["/bin/bash", "setup_script.ksh"],
env={"LOCAL_USER": getpass.getuser(), "LOCAL_WORKSPACE": workspace},
workspace_exclude=[".git/", ".cache/", "doc/", "test/"],
# We push local files to the remote worker using the
# explicit source and destination locations defined above.
push=push_files,
workspace=workspace,
)
plan.add_resource(pool)
# Add a given number of similar tests to the remote pool
# to be executed in parallel.
for idx in range(plan.args.tasks_num):
# All Task arguments need to be serializable.
task = Task(
target="make_multitest",
module="tasks",
# We specify the full paths to files as they will be found
# on the remote host.
kwargs={
"index": idx,
"files": [
"/tmp/remote_example/file1",
"/tmp/remote_example/file2",
],
},
)
plan.schedule(task, resource="MyPool")
if __name__ == "__main__":
# Create a new temporary directory for this test plan.
TEMP_DIR = tempfile.mkdtemp()
# Run the test plan.
res = main()
# Clean up all the temporary files used by this test plan.
shutil.rmtree(TEMP_DIR)
print("Exiting code: {}".format(res.exit_code))
sys.exit(res.exit_code)
| 34.194631
| 79
| 0.669676
|
15e2b0829d5d388635fd97df68bad9657d79b076
| 6,556
|
py
|
Python
|
muspinsim/input/input.py
|
stur86/muspinsim
|
17c035475aa3fd4278f8c270c9a40cc214631b42
|
[
"MIT"
] | 2
|
2021-02-23T12:34:42.000Z
|
2021-04-28T09:33:33.000Z
|
muspinsim/input/input.py
|
stur86/muspinsim
|
17c035475aa3fd4278f8c270c9a40cc214631b42
|
[
"MIT"
] | 1
|
2021-02-02T14:27:43.000Z
|
2021-02-08T17:32:55.000Z
|
muspinsim/input/input.py
|
stur86/muspinsim
|
17c035475aa3fd4278f8c270c9a40cc214631b42
|
[
"MIT"
] | 1
|
2021-04-26T14:03:50.000Z
|
2021-04-26T14:03:50.000Z
|
"""input.py
Class to read in input files for the muspinsim script
"""
import re
from io import StringIO
import numpy as np
from collections import namedtuple
from muspinsim.input.keyword import (
InputKeywords,
MuSpinEvaluateKeyword,
MuSpinCouplingKeyword,
)
class MuSpinInputError(Exception):
pass
MuSpinInputValue = namedtuple("MuSpinInputValue", ["name", "args", "value"])
# Experiment defaults as .in files
_exp_defaults = {
"alc": """
polarization
longitudinal
y_axis
integral
x_axis
field
""",
"zero_field": """
field
0.0
polarization
transverse
x_axis
time
y_axis
asymmetry
""",
}
class MuSpinInput(object):
def __init__(self, fs=None):
"""Read in an input file
Read in an input file from an opened file stream
Arguments:
fs {TextIOBase} -- I/O stream (should be file, can be StringIO)
"""
self._keywords = {}
self._variables = {}
self._fitting_info = {"fit": False, "data": None, "method": None, "rtol": None}
if fs is not None:
lines = fs.readlines()
# Split lines in blocks
raw_blocks = {}
curr_block = None
indre = re.compile("(\\s+)[^\\s]")
indent = None
for l in lines:
if l.strip() == "" or l[0] == "#":
continue # It's a comment
m = indre.match(l)
if m:
if indent is None:
indent = m.groups()[0]
if m.groups()[0] != indent:
raise RuntimeError("Invalid indent in input file")
else:
try:
raw_blocks[curr_block].append(l.strip())
except KeyError:
raise RuntimeError("Badly formatted input file")
else:
curr_block = l.strip()
raw_blocks[curr_block] = []
indent = None # Reset for each block
# A special case: if there are fitting variables, we need to know
# right away
self._load_fitting_kw(raw_blocks)
# Another special case: if the "experiment" keyword is present,
# use it to set some defaults
try:
block = raw_blocks.pop("experiment")
kw = InputKeywords["experiment"](block)
exptype = kw.evaluate()[0]
if len(exptype) > 1:
raise MuSpinInputError(
"Can not define more than one experiment type"
)
elif len(exptype) == 1:
try:
mock_i = MuSpinInput(StringIO(_exp_defaults[exptype[0]]))
self._keywords.update(mock_i._keywords)
except KeyError:
raise MuSpinInputError("Invalid experiment type defined")
except KeyError:
pass
# Now parse
for header, block in raw_blocks.items():
hsplit = header.split()
name = hsplit[0]
args = hsplit[1:]
try:
KWClass = InputKeywords[name]
except KeyError:
raise MuSpinInputError(
"Invalid keyword " "{0} ".format(name) + "found in input file"
)
if issubclass(KWClass, MuSpinEvaluateKeyword):
kw = KWClass(block, args=args, variables=self._variables)
else:
kw = KWClass(block, args=args)
kwid = kw.id
if name != kwid:
self._keywords[name] = self._keywords.get(name, {})
self._keywords[name][kwid] = kw
else:
self._keywords[name] = kw
@property
def variables(self):
return {**self._variables}
@property
def fitting_info(self):
return {**self._fitting_info}
def evaluate(self, **variables):
"""Produce a full dictionary with a value for every input keyword,
interpreted given the variable values that have been passed."""
result = {"couplings": {}, "fitting_info": self.fitting_info}
for name, KWClass in InputKeywords.items():
if issubclass(KWClass, MuSpinCouplingKeyword):
if name in self._keywords:
for kwid, kw in self._keywords[name].items():
val = MuSpinInputValue(
name, kw.arguments, kw.evaluate(**variables)
)
result["couplings"][kwid] = val
else:
if name in self._keywords:
kw = self._keywords[name]
v = variables if issubclass(KWClass, MuSpinEvaluateKeyword) else {}
val = kw.evaluate(**v)
result[name] = MuSpinInputValue(name, kw.arguments, val)
elif KWClass.default is not None:
kw = KWClass()
val = np.array(kw.evaluate())
result[name] = MuSpinInputValue(name, kw.arguments, val)
return result
def _load_fitting_kw(self, raw_blocks):
"""Special case: handling of all the fitting related keywords and
information."""
try:
block = raw_blocks.pop("fitting_variables")
kw = InputKeywords["fitting_variables"](block)
self._variables = {v.name: v for v in kw.evaluate()}
except KeyError:
pass
if len(self._variables) == 0:
return
self._fitting_info["fit"] = True
try:
block = raw_blocks.pop("fitting_data")
kw = InputKeywords["fitting_data"](block)
self._fitting_info["data"] = np.array(kw.evaluate())
except KeyError:
raise MuSpinInputError(
"Fitting variables defined without defining" " a set of data to fit"
)
block = raw_blocks.pop("fitting_tolerance", [])
kw = InputKeywords["fitting_tolerance"](block)
self._fitting_info["rtol"] = float(kw.evaluate()[0][0])
block = raw_blocks.pop("fitting_method", [])
kw = InputKeywords["fitting_method"](block)
self._fitting_info["method"] = kw.evaluate()[0][0]
| 30.779343
| 87
| 0.515558
|
7014ebe37edc286896c3286bcc5a73c29d0f144a
| 3,279
|
py
|
Python
|
1_shinno/sample/attention.py
|
yfur/dl-chainer
|
c1917710c80fd6b3dc4cded81700b92bbc349302
|
[
"Apache-2.0"
] | null | null | null |
1_shinno/sample/attention.py
|
yfur/dl-chainer
|
c1917710c80fd6b3dc4cded81700b92bbc349302
|
[
"Apache-2.0"
] | null | null | null |
1_shinno/sample/attention.py
|
yfur/dl-chainer
|
c1917710c80fd6b3dc4cded81700b92bbc349302
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, \
optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
jvocab = {}
jlines = open('jp.txt').read().split('\n')
for i in range(len(jlines)):
lt = jlines[i].split()
for w in lt:
if w not in jvocab:
jvocab[w] = len(jvocab)
jvocab['<eos>'] = len(jvocab)
jv = len(jvocab)
evocab = {}
id2wd = {}
elines = open('eng.txt').read().split('\n')
for i in range(len(elines)):
lt = elines[i].split()
for w in lt:
if w not in evocab:
id = len(evocab)
evocab[w] = id
id2wd[id] = w
id = len(evocab)
evocab['<eos>'] = id
id2wd[id] = '<eos>'
ev = len(evocab)
def mk_ct(gh, ht):
alp = []
s = 0.0
for i in range(len(gh)):
s += np.exp(ht.dot(gh[i]))
ct = np.zeros(100)
for i in range(len(gh)):
alpi = np.exp(ht.dot(gh[i]))/s
ct += alpi * gh[i]
ct = Variable(np.array([ct]).astype(np.float32))
return ct
class MyATT(chainer.Chain):
def __init__(self, jv, ev, k):
super(MyATT, self).__init__(
embedx = L.EmbedID(jv, k),
embedy = L.EmbedID(ev, k),
H = L.LSTM(k, k),
Wc1 = L.Linear(k, k),
Wc2 = L.Linear(k, k),
W = L.Linear(k, ev),
)
def __call__(self, jline, eline):
gh = []
for i in range(len(jline)):
wid = jvocab[jline[i]]
x_k = self.embedx(Variable(np.array([wid], dtype=np.int32)))
h = self.H(x_k)
gh.append(np.copy(h.data[0]))
x_k = self.embedx(Variable(np.array([jvocab['<eos>']], dtype=np.int32)))
tx = Variable(np.array([evocab[eline[0]]], dtype=np.int32))
h = self.H(x_k)
ct = mk_ct(gh, h.data[0])
h2 = F.tanh(self.Wc1(ct) + self.Wc2(h))
accum_loss = F.softmax_cross_entropy(self.W(h2), tx)
for i in range(len(eline)):
wid = evocab[eline[i]]
x_k = self.embedy(Variable(np.array([wid], dtype=np.int32)))
next_wid = evocab['<eos>'] if (i == len(eline) - 1) else evocab[eline[i+1]]
tx = Variable(np.array([next_wid], dtype=np.int32))
h = self.H(x_k)
ct = mk_ct(gh, h.data)
h2 = F.tanh(self.Wc1(ct) + self.Wc2(h))
loss = F.softmax_cross_entropy(self.W(h2), tx)
accum_loss += loss
return accum_loss
def reset_state(self):
self.H.reset_state()
demb = 100
model = MyATT(jv, ev, demb)
optimizer = optimizers.Adam()
optimizer.setup(model)
for epoch in range(100):
for i in range(len(jlines)-1):
jln = jlines[i].split()
jlnr = jln[::-1]
eln = elines[i].split()
model.reset_state()
model.zerograds()
loss = model(jlnr, eln)
loss.backward()
loss.unchain_backward() # truncate
optimizer.update()
print i, " finished"
outfile = "attention-" + str(epoch) + ".model"
serializers.save_npz(outfile, model)
| 30.361111
| 96
| 0.526685
|
944903400d2c3b58d0c1d8841806cd54b30d415c
| 9,490
|
py
|
Python
|
from_cpython/Lib/test/test_pkg.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
from_cpython/Lib/test/test_pkg.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/test/test_pkg.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
# expected: fail
# Test packages (dotted-name import)
import sys
import os
import tempfile
import textwrap
import unittest
from test import test_support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
# XXX Things to test
#
# import package without __init__
# import package with __init__
# __init__ importing submodule
# __init__ importing global module
# __init__ defining variables
# submodule importing other submodule
# submodule importing global module
# submodule import submodule via global name
# from package import submodule
# from package import subpackage
# from package import variable (defined in __init__)
# from package import * (defined in __init__)
class Test(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
def tearDown(self):
sys.path[:] = self.syspath
if self.root: # Only clean if the test was actually run
cleanout(self.root)
# delete all modules concerning the tested hierarchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_1(self):
hier = [("t1", None), ("t1 __init__"+os.extsep+"py", "")]
self.mkhier(hier)
import t1
def test_2(self):
hier = [
("t2", None),
("t2 __init__"+os.extsep+"py", "'doc for t2'"),
("t2 sub", None),
("t2 sub __init__"+os.extsep+"py", ""),
("t2 sub subsub", None),
("t2 sub subsub __init__"+os.extsep+"py", "spam = 1"),
]
self.mkhier(hier)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
# This exec crap is needed because Py3k forbids 'import *' outside
# of module-scope and __import__() is insufficient for what we need.
s = """
import t2
from t2 import *
self.assertEqual(dir(), ['self', 'sub', 't2'])
"""
self.run_code(s)
from t2 import sub
from t2.sub import subsub
from t2.sub.subsub import spam
self.assertEqual(sub.__name__, "t2.sub")
self.assertEqual(subsub.__name__, "t2.sub.subsub")
self.assertEqual(sub.subsub.__name__, "t2.sub.subsub")
for name in ['spam', 'sub', 'subsub', 't2']:
self.assertTrue(locals()["name"], "Failed to import %s" % name)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
s = """
from t2 import *
self.assertTrue(dir(), ['self', 'sub'])
"""
self.run_code(s)
def test_3(self):
hier = [
("t3", None),
("t3 __init__"+os.extsep+"py", ""),
("t3 sub", None),
("t3 sub __init__"+os.extsep+"py", ""),
("t3 sub subsub", None),
("t3 sub subsub __init__"+os.extsep+"py", "spam = 1"),
]
self.mkhier(hier)
import t3.sub.subsub
self.assertEqual(t3.__name__, "t3")
self.assertEqual(t3.sub.__name__, "t3.sub")
self.assertEqual(t3.sub.subsub.__name__, "t3.sub.subsub")
def test_4(self):
hier = [
("t4.py", "raise RuntimeError('Shouldnt load t4.py')"),
("t4", None),
("t4 __init__"+os.extsep+"py", ""),
("t4 sub.py", "raise RuntimeError('Shouldnt load sub.py')"),
("t4 sub", None),
("t4 sub __init__"+os.extsep+"py", ""),
("t4 sub subsub"+os.extsep+"py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t4 sub subsub", None),
("t4 sub subsub __init__"+os.extsep+"py", "spam = 1"),
]
self.mkhier(hier)
s = """
from t4.sub.subsub import *
self.assertEqual(spam, 1)
"""
self.run_code(s)
def test_5(self):
hier = [
("t5", None),
("t5 __init__"+os.extsep+"py", "import t5.foo"),
("t5 string"+os.extsep+"py", "spam = 1"),
("t5 foo"+os.extsep+"py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__doc__', '__file__', '__name__',
'__package__', '__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__doc__', '__file__', '__name__', '__package__',
'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__doc__', '__file__', '__name__','__package__',
'spam'])
def test_6(self):
hier = [
("t6", None),
("t6 __init__"+os.extsep+"py",
"__all__ = ['spam', 'ham', 'eggs']"),
("t6 spam"+os.extsep+"py", ""),
("t6 ham"+os.extsep+"py", ""),
("t6 eggs"+os.extsep+"py", ""),
]
self.mkhier(hier)
import t6
self.assertEqual(fixdir(dir(t6)),
['__all__', '__doc__', '__file__',
'__name__', '__package__', '__path__'])
s = """
import t6
from t6 import *
self.assertEqual(fixdir(dir(t6)),
['__all__', '__doc__', '__file__',
'__name__', '__package__', '__path__',
'eggs', 'ham', 'spam'])
self.assertEqual(dir(), ['eggs', 'ham', 'self', 'spam', 't6'])
"""
self.run_code(s)
def test_7(self):
hier = [
("t7", None),
("t7"+os.extsep+"py", ""),
("t7 __init__"+os.extsep+"py", ""),
("t7 sub"+os.extsep+"py",
"raise RuntimeError('Shouldnt load sub.py')"),
("t7 sub", None),
("t7 sub __init__"+os.extsep+"py", ""),
("t7 sub "+os.extsep+"py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t7 sub subsub", None),
("t7 sub subsub __init__"+os.extsep+"py",
"spam = 1"),
]
self.mkhier(hier)
t7, sub, subsub = None, None, None
import t7 as tas
self.assertEqual(fixdir(dir(tas)),
['__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.assertFalse(t7)
from t7 import sub as subpar
self.assertEqual(fixdir(dir(subpar)),
['__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.assertFalse(t7)
self.assertFalse(sub)
from t7.sub import subsub as subsubsub
self.assertEqual(fixdir(dir(subsubsub)),
['__doc__', '__file__', '__name__',
'__package__', '__path__', 'spam'])
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
from t7.sub.subsub import spam as ham
self.assertEqual(ham, 1)
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_8(self):
hier = [
("t8", None),
("t8 __init__"+os.extsep+"py", "'doc for t8'"),
]
self.mkhier(hier)
import t8
self.assertEqual(t8.__doc__, "doc for t8")
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| 32.278912
| 76
| 0.506639
|
b3341458234a098618417d531e2b1da304922ab3
| 1,727
|
py
|
Python
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 180
|
2020-09-23T17:21:15.000Z
|
2022-03-30T17:25:47.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 601
|
2020-09-23T16:23:44.000Z
|
2022-03-31T19:08:23.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 109
|
2020-09-23T16:22:04.000Z
|
2022-03-28T21:18:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateEntityType
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_sync]
from google.cloud import aiplatform_v1
def sample_create_entity_type():
"""Snippet for create_entity_type"""
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateEntityTypeRequest(
parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
entity_type_id="entity_type_id_value",
)
# Make the request
operation = client.create_entity_type(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_sync]
| 33.862745
| 86
| 0.76491
|
19174b7a72a8b56637398d87ee0861ea16f4a54b
| 4,498
|
py
|
Python
|
tests/test_n2v_utils.py
|
mbinfokyaw/n2v
|
480d94a4e87c08c26e4e8c301468b125682b67f3
|
[
"BSD-3-Clause"
] | 1
|
2021-12-06T14:37:48.000Z
|
2021-12-06T14:37:48.000Z
|
tests/test_n2v_utils.py
|
mbinfokyaw/n2v
|
480d94a4e87c08c26e4e8c301468b125682b67f3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_n2v_utils.py
|
mbinfokyaw/n2v
|
480d94a4e87c08c26e4e8c301468b125682b67f3
|
[
"BSD-3-Clause"
] | 1
|
2021-04-18T22:10:33.000Z
|
2021-04-18T22:10:33.000Z
|
import numpy as np
from n2v.utils import n2v_utils
def test_get_subpatch():
patch = np.arange(100)
patch.shape = (10,10)
subpatch_target = np.array([[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45],
[51, 52, 53, 54, 55]])
subpatch_test = n2v_utils.get_subpatch(patch, (3,3), 2)
assert np.sum(subpatch_target - subpatch_test) == 0
subpatch_test = n2v_utils.get_subpatch(patch, (3,3), 1)
assert np.sum(subpatch_target[1:-1, 1:-1] - subpatch_test) == 0
patch = np.arange(1000)
patch.shape = (10,10,10)
subpatch_target = np.array([[[31,32,33],
[41,42,43],
[51,52,53]],
[[131,132,133],
[141,142,143],
[151,152,153]],
[[231,232,233],
[241,242,243],
[251,252,253]]])
subpatch_test = n2v_utils.get_subpatch(patch, (1,4,2), 1)
assert np.sum(subpatch_target - subpatch_test) == 0
def test_random_neighbor():
coord = np.array([51,52,32])
shape = [128, 128, 128]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
shape = [55, 53, 32]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
def test_pm_normal_neighbor_withoutCP():
patch = np.arange(100)
patch.shape = (10,10)
coords = (np.array([2, 4]), np.array([1,3]))
sampler = n2v_utils.pm_normal_withoutCP(1)
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10, 1)
coords = (np.array([2, 4, 6]), np.array([1,3,5]), np.array([3,5,1]))
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 1000
def test_pm_uniform_withCP():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_uniform_withCP(3)
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
for i in range(10):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 1000
def test_pm_normal_additive():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_additive(0)
val = sampler(patch, coords, len(patch.shape))
for v, y, x in zip(val, *coords):
assert v == patch[y, x]
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v, z, y, x in zip(val, *coords):
assert v == patch[z,y,x]
def test_pm_normal_fitted():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_fitted(3)
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert isinstance(v, float)
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert isinstance(v, float)
def test_pm_identity():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_identity(1)
val = sampler(patch, coords, len(patch.shape))
for v, y, x in zip(val, *coords):
assert v == patch[y, x]
patch = np.arange(1000)
patch.shape = (10, 10, 10, 1)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v, z, y, x in zip(val, *coords):
assert v == patch[z, y, x]
| 27.260606
| 76
| 0.526901
|
20dcaee977e66054a76b67c68e422c844bffa372
| 286
|
py
|
Python
|
PManager/services/service_queue.py
|
srisankethu/opengift.io
|
fc490332bd0252610b55a68c1fff1c4f704fcbd4
|
[
"Apache-2.0"
] | 1
|
2020-08-30T23:12:08.000Z
|
2020-08-30T23:12:08.000Z
|
PManager/services/service_queue.py
|
lenarhus/opengift.io
|
db37494eac141e795c8d9d5b262d54cd6f20fb15
|
[
"Apache-2.0"
] | null | null | null |
PManager/services/service_queue.py
|
lenarhus/opengift.io
|
db37494eac141e795c8d9d5b262d54cd6f20fb15
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
__author__ = 'Rayleigh'
import redis
from tracker import settings
service_queue = redis.StrictRedis(
host=settings.ORDERS_REDIS_HOST,
port=settings.ORDERS_REDIS_PORT,
db=settings.ORDERS_REDIS_DB,
password=settings.ORDERS_REDIS_PASSWORD
).publish
| 23.833333
| 43
| 0.769231
|
24d886e0dbb1393e454efbff417109c03492290b
| 59
|
py
|
Python
|
01-HelloWorld/hello.py
|
SafinaM/boost-python-examples
|
27c61c7477bcdd69ce608d75b59b8cdad237b322
|
[
"BSL-1.0"
] | 794
|
2015-01-08T19:33:18.000Z
|
2022-03-31T13:15:01.000Z
|
01-HelloWorld/hello.py
|
SafinaM/boost-python-examples
|
27c61c7477bcdd69ce608d75b59b8cdad237b322
|
[
"BSL-1.0"
] | 36
|
2015-10-15T14:21:42.000Z
|
2022-03-28T07:06:43.000Z
|
01-HelloWorld/hello.py
|
SafinaM/boost-python-examples
|
27c61c7477bcdd69ce608d75b59b8cdad237b322
|
[
"BSL-1.0"
] | 266
|
2015-01-02T22:11:49.000Z
|
2022-03-03T07:35:39.000Z
|
#!/usr/bin/env python
import hello
print (hello.greet())
| 9.833333
| 21
| 0.694915
|
3c6132f8106c89451796753a4627cf99a4536a85
| 9,335
|
py
|
Python
|
week03/04.MusicLibrary/music_library.py
|
TsvetomirTsvetkov/Python-Course-101
|
1c5ea4631128c22effe3c4ee5a18c43f5e79d463
|
[
"MIT"
] | null | null | null |
week03/04.MusicLibrary/music_library.py
|
TsvetomirTsvetkov/Python-Course-101
|
1c5ea4631128c22effe3c4ee5a18c43f5e79d463
|
[
"MIT"
] | null | null | null |
week03/04.MusicLibrary/music_library.py
|
TsvetomirTsvetkov/Python-Course-101
|
1c5ea4631128c22effe3c4ee5a18c43f5e79d463
|
[
"MIT"
] | null | null | null |
# music_library.py
"""
For my solution, I've decided not to use the datetime library.
"""
from random import randint
import json
import os
def serialize_song(obj):
if isinstance(obj, Song):
info = obj.title + "-" + obj.artist + "-" + obj.album + "-" + str(obj._length)
return info
else:
raise TypeError ("Type not serializable")
class Duration:
# Constructor
def __init__(self, length):
self.is_valid_length(length)
list_len = length.split(':')
if len(list_len) == 2:
list_len.insert(0, '0')
self.hours = list_len[0]
self.minutes = list_len[1]
self.seconds = list_len[2]
# Dunders
def __str__(self):
if self.hours != '0':
return f'{self.hours}:{self.minutes}:{self.seconds}'
else:
return f'{self.minutes}:{self.seconds}'
def __eq__(self, other):
return self.hours == other.hours and\
self.minutes == other.minutes and\
self.seconds == other.seconds
# Static
@staticmethod
def is_valid_length(length):
if len(length) < 4:
raise Exception('Length cannot be less than 4 symbols.')
elif ':' not in length:
raise Exception('Length must have ":" to distinguish seconds, minutes and hours.')
elif length.count(':') > 2:
raise Exception('Length can only include seconds, minutes and hours.')
list_len = length.split(':')
if '' in list_len:
raise Exception('Error: Found ":" in an unexpected position.')
size = len(list_len)
shift = 0
if size == 3:
shift = 1
if int(list_len[0]) < 0:
raise Exception('Hours cannot be negative.')
if len(list_len[1]) != 2:
raise Exception("Two digits represent minutes, if duration is more than an hour.")
if int(list_len[0 + shift]) < 0:
raise Exception('Minutes cannot be negative.')
elif int(list_len[0 + shift]) > 59:
raise Exception('Minutes cannot be more than 59.')
if int(list_len[1 + shift]) < 0:
raise Exception('Seconds cannot be negative.')
elif int(list_len[1 + shift]) > 59:
raise Exception('Seconds cannot be more than 59.')
if len(list_len[1 + shift]) != 2 :
raise Exception('Two digits represent seconds.')
time = [int(elem) == 0 for elem in list_len]
if all(time):
raise Exception("Duration's length cannot be zero.")
class Song:
# Constructor
def __init__(self, title, artist, album, length):
self.validate_argument(title)
self.validate_argument(artist)
self.validate_argument(album)
self._length = Duration(length)
self.title = title
self.artist = artist
self.album = album
# Dunders
def __str__(self):
return f'{self.artist} - {self.title} from {self.album} - {self._length}'
def __eq__(self, other):
return self.artist == other.artist and\
self.title == other.title and\
self.album == other.album and\
self._length == other._length
def __hash__(self):
return int(getattr(self._length, 'seconds'))
# Public
def length(self, hours = None, minutes = None, seconds = None):
if hours == True and minutes == None and seconds == None:
result =getattr(self._length, 'hours')
elif minutes == True and hours == None and seconds == None:
result =int(getattr(self._length, 'hours')) * 60 +\
int(getattr(self._length, 'minutes'))
elif seconds == True and hours == None and minutes == None:
result =int(getattr(self._length, 'seconds')) +\
int(getattr(self._length, 'minutes')) * 60 +\
int(getattr(self._length, 'hours')) * 3600
elif hours == None and minutes == None and seconds == None:
return str(self._length)
else:
raise Exception('Argument mismatch in function call.')
return str(result)
# Static
@staticmethod
def validate_argument(argument):
if type(argument) is not str:
raise Exception('All arguments must be of "str" type.')
if argument == '':
raise Exception('Empty string cannot be an argument.')
class Playlist:
__played_songs = []
# Constructor
def __init__(self, name, repeat = False, shuffle = False):
self.validate_name(name)
self.validate_repeat(repeat)
self.validate_shuffle(shuffle)
self.name = name
self.repeat = repeat
self.shuffle = shuffle
self.songs = []
self.index = 0
# Dunders
def __eq__(self, other):
for elem in self.songs:
if elem not in other.songs:
return False
return True
# Public
def add_song(self, song):
if song in self.songs:
raise Exception('Song is already in the playlist.')
self.songs.append(song)
def remove_song(self, song):
if song in self.songs:
self.songs.remove(song)
else:
raise Exception(f'{song.title} is not in this playlist.')
def total_length(self):
if self.songs == []:
return '0:00'
total_seconds = 0
for elem in self.songs:
total_seconds += int(getattr(getattr(elem, '_length'), 'hours')) * 3600
total_seconds += int(getattr(getattr(elem, '_length'), 'minutes')) * 60
total_seconds += int(getattr(getattr(elem, '_length'), 'seconds'))
total_hours = total_seconds // 3600
total_seconds = total_seconds - (total_hours * 3600)
total_minutes = total_seconds // 60
total_seconds = total_seconds - (total_minutes * 60)
if total_hours == 0:
return f'{total_minutes}:{total_seconds}'
else:
if total_minutes < 10:
if total_seconds < 10:
return f'{total_hours}:0{total_minutes}:0{total_seconds}'
else:
return f'{total_hours}:0{total_minutes}:{total_seconds}'
else:
return f'{total_hours}:{total_minutes}:{total_seconds}'
def artists(self):
histogram = {}
for elem in self.songs:
if getattr(elem, 'artist') in histogram.keys():
histogram[getattr(elem, 'artist')] += 1
else:
histogram[getattr(elem, 'artist')] = 1
return histogram
def next_song(self):
if self.repeat == True and self.shuffle == False:
if self.index == len(self.songs) - 1:
self.index = 0
return self.songs[self.index]
else:
self.index += 1
return self.songs[self.index]
elif self.repeat == False and self.shuffle == False:
if self.index == len(self.songs) - 1:
return 'There are no more songs in your playlist.'
else:
self.index += 1
return self.songs[self.index]
elif self.repeat == False and self.shuffle == True:
if len(self.__played_songs) == len(self.songs):
return 'There are no more songs in your playlist.'
else:
while True:
random = randint(0, len(self.songs) - 1)
if self.songs[random] not in self.__played_songs:
self.index += 1
self.__played_songs.append(self.songs[random])
return self.songs[random]
break
else:
if len(self.__played_songs) == len(self.songs):
self.__played_songs = []
random = randint(0, len(self.songs) - 1)
self.__played_songs.append(self.songs[random])
return self.songs[random]
else:
while True:
random = randint(0, len(self.songs) - 1)
if self.songs[random] not in self.__played_songs:
self.__played_songs.append(self.songs[random])
return self.songs[random]
def save(self):
my_dir = '/home/sktuan/Documents/Python-Course-101/week03/04.MusicLibrary/playlist-data'
playlist_name = self.name.replace(' ', '-')
fname = os.path.join(my_dir, playlist_name)
with open(fname, 'w') as f:
json_data = json.dumps(self.__dict__, indent=4, default=serialize_song)
f.write(json_data)
# Static
@staticmethod
def load(path = '/home/sktuan/Documents/Python-Course-101/week03/04.MusicLibrary/playlist-data/Code.txt'):
my_dir = '/home/sktuan/Documents/Python-Course-101/week03/04.MusicLibrary/playlist-data/'
with open(my_dir + str(path), 'r') as f:
# Reads example.json file
my_read_object = json.load(f)
new_obj = Playlist( name = my_read_object['name'],\
repeat = my_read_object['repeat'],\
shuffle = my_read_object['shuffle'])
loaded_songs = my_read_object['songs']
for elem in loaded_songs:
to_add = elem.split('-')
new_song = Song(to_add[0], to_add[1], to_add[2], to_add[3])
new_obj.add_song(new_song)
return new_obj
@staticmethod
def validate_name(name):
if type(name) is not str:
raise Exception('Name must be of "str" type.')
if name == '':
raise Exception('Empty string cannot be a name.')
@staticmethod
def validate_repeat(repeat):
if type(repeat) is not bool:
raise Exception('Repeat must be of "bool" type.')
@staticmethod
def validate_shuffle(shuffle):
if type(shuffle) is not bool:
raise Exception('Shuffle must be of "bool" type.')
def main():
f = Song(title="Odin", artist="Manowar", album="The Sons of Odin", length="3:44")
s = Song(title="Empty", artist="Metric", album="Live It Out", length="5:55")
q = Song(title="Superman", artist="Eminem", album="The Eminem Show", length="5:50")
code_songs = Playlist(name="Code", repeat=True, shuffle=True)
code_songs.add_song(f)
code_songs.add_song(s)
code_songs.add_song(q)
print(f.length(seconds = True))
print(code_songs.artists())
print(code_songs.total_length())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
print(code_songs.next_song())
code_songs.save()
code_songs2 = Playlist.load('Code')
print(code_songs == code_songs2)
if __name__ == '__main__':
main()
| 26.902017
| 107
| 0.676058
|
d54153586f1aec2c5747ecdfc0e4f2df9996755d
| 6,849
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/streptomycesspnrrls340.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/streptomycesspnrrls340.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/streptomycesspnrrls340.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Streptomyces sp. NRRLS340.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 23:10:41.151930
The undirected graph Streptomyces sp. NRRLS340 has 7198 nodes and 1184610
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04573 and has 30 connected components, where the component
with most nodes has 7135 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 277, the mean node degree is 329.15,
and the node degree mode is 2. The top 5 most central nodes are 1463901.JOIY01000019_gene5093
(degree 3539), 1463901.JOIY01000032_gene3502 (degree 2306), 1463901.JOIY01000078_gene2615
(degree 2285), 1463901.JOIY01000017_gene6773 (degree 2232) and 1463901.JOIY01000036_gene6803
(degree 2171).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesSpNrrls340
# Then load the graph
graph = StreptomycesSpNrrls340()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def StreptomycesSpNrrls340(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Streptomyces sp. NRRLS340 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces sp. NRRLS340 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 23:10:41.151930
The undirected graph Streptomyces sp. NRRLS340 has 7198 nodes and 1184610
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04573 and has 30 connected components, where the component
with most nodes has 7135 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 277, the mean node degree is 329.15,
and the node degree mode is 2. The top 5 most central nodes are 1463901.JOIY01000019_gene5093
(degree 3539), 1463901.JOIY01000032_gene3502 (degree 2306), 1463901.JOIY01000078_gene2615
(degree 2285), 1463901.JOIY01000017_gene6773 (degree 2232) and 1463901.JOIY01000036_gene6803
(degree 2171).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesSpNrrls340
# Then load the graph
graph = StreptomycesSpNrrls340()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesSpNrrls340",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.858639
| 223
| 0.709155
|
e9063e26c831da7f3095ef07cee6ae2364e83752
| 1,043
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
achavesjimenez/recipe-app-api
|
9337ac6e249fdaad3b1a77d3c2e943a23c5ccef3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
achavesjimenez/recipe-app-api
|
9337ac6e249fdaad3b1a77d3c2e943a23c5ccef3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
achavesjimenez/recipe-app-api
|
9337ac6e249fdaad3b1a77d3c2e943a23c5ccef3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-07-30 16:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.965517
| 118
| 0.604027
|
b2f32d0cf36a337b59633c23da68b521509a528d
| 340
|
py
|
Python
|
code/sevenWonders.py
|
matthewReff/Kattis-Problems
|
848628af630c990fb91bde6256a77afad6a3f5f6
|
[
"MIT"
] | 8
|
2020-02-21T22:21:01.000Z
|
2022-02-16T05:30:54.000Z
|
code/sevenWonders.py
|
matthewReff/Kattis-Problems
|
848628af630c990fb91bde6256a77afad6a3f5f6
|
[
"MIT"
] | null | null | null |
code/sevenWonders.py
|
matthewReff/Kattis-Problems
|
848628af630c990fb91bde6256a77afad6a3f5f6
|
[
"MIT"
] | 3
|
2020-08-05T05:42:35.000Z
|
2021-08-30T05:39:51.000Z
|
def seven_wonders():
rawInput=raw_input()
cards=[0,0,0]
total = 0
for i in rawInput:
if i =="T":
cards[0]+=1
elif i == "C":
cards[1]+=1
else:
cards[2]+=1
for i in cards:
total+=i**2
cards.sort()
total+=cards[0]*7
print total
seven_wonders()
| 20
| 24
| 0.464706
|
8bcf567222ed17b748773d1dfaf2a07f83ea1776
| 1,747
|
py
|
Python
|
status.py
|
bUsernameIsUnavailable/Proiect-Inginerie-Software
|
4dae6a079bd16ad970eb63aa907159e87aa565b8
|
[
"MIT"
] | null | null | null |
status.py
|
bUsernameIsUnavailable/Proiect-Inginerie-Software
|
4dae6a079bd16ad970eb63aa907159e87aa565b8
|
[
"MIT"
] | 3
|
2022-01-31T18:18:03.000Z
|
2022-02-01T22:24:25.000Z
|
status.py
|
bUsernameIsUnavailable/Proiect-Inginerie-Software
|
4dae6a079bd16ad970eb63aa907159e87aa565b8
|
[
"MIT"
] | 3
|
2022-02-02T15:10:56.000Z
|
2022-02-03T16:49:08.000Z
|
from db import get_db
from weather import get_humidity_from_weather_api, get_precipitation_from_weather_api
def get_ideal_parameters(humidity, precipitation, soil_moisture, system_temperature):
ideal_temperature = 20
ideal_moisture = 50
if precipitation is None:
precipitation = 0
if soil_moisture is None or soil_moisture >= ideal_moisture or precipitation > 4:
return 0.0
water_quantity = (ideal_moisture - soil_moisture) * 100
if system_temperature and (system_temperature - ideal_temperature > 5):
water_quantity += 50
if humidity and humidity < 100:
water_quantity += 50
return water_quantity
def get_status():
soil_moisture = get_db().execute(
'SELECT id, timestamp, value FROM soil_moisture ORDER BY timestamp DESC'
).fetchone()
if soil_moisture:
soil_moisture = soil_moisture['value']
system_temperature = get_db().execute(
'SELECT id, timestamp, value FROM temperature ORDER BY timestamp DESC'
).fetchone()
if system_temperature:
system_temperature = system_temperature['value']
humidity = get_humidity_from_weather_api()['data']['humidity']
precipitation = get_precipitation_from_weather_api()['data']['precipitation']
water_quantity = get_ideal_parameters(humidity, precipitation, soil_moisture, system_temperature)
if water_quantity:
db = get_db()
db.execute(
'INSERT INTO watering (water_quantity) VALUES (?)',
(water_quantity,)
)
db.commit()
else:
water_quantity = 0.0
return {
'status': 'The plant was successfully watered',
'data': {
'water_quantity': water_quantity
}
}
| 30.649123
| 101
| 0.68174
|
2734a4560639e3cdb5604cda00f594ff6e592fce
| 12,973
|
py
|
Python
|
tests/admin_views/test_autocomplete_view.py
|
imjvdn/scratch-game-1
|
5dffd79f17e0b66d3d2e57262749311aca28e850
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 7
|
2020-01-13T18:26:41.000Z
|
2021-04-20T04:22:26.000Z
|
tests/admin_views/test_autocomplete_view.py
|
imjvdn/scratch-game-1
|
5dffd79f17e0b66d3d2e57262749311aca28e850
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 7
|
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
tests/admin_views/test_autocomplete_view.py
|
imjvdn/scratch-game-1
|
5dffd79f17e0b66d3d2e57262749311aca28e850
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 11
|
2019-09-14T20:57:30.000Z
|
2022-01-19T17:59:26.000Z
|
import json
import time
from django.contrib import admin
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.autocomplete import AutocompleteJsonView
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.http import Http404
from django.test import RequestFactory, override_settings
from django.urls import reverse, reverse_lazy
from .admin import AnswerAdmin, QuestionAdmin
from .models import Answer, Author, Authorship, Book, Question
from .tests import AdminViewBasicTestCase
PAGINATOR_SIZE = AutocompleteJsonView.paginate_by
class AuthorAdmin(admin.ModelAdmin):
ordering = ['id']
search_fields = ['id']
class AuthorshipInline(admin.TabularInline):
model = Authorship
autocomplete_fields = ['author']
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorshipInline]
site = admin.AdminSite(name='autocomplete_admin')
site.register(Question, QuestionAdmin)
site.register(Answer, AnswerAdmin)
site.register(Author, AuthorAdmin)
site.register(Book, BookAdmin)
class AutocompleteJsonViewTests(AdminViewBasicTestCase):
as_view_args = {'model_admin': QuestionAdmin(Question, site)}
factory = RequestFactory()
url = reverse_lazy('autocomplete_admin:admin_views_question_autocomplete')
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
username='user', password='secret',
email='user@example.com', is_staff=True,
)
super().setUpTestData()
def test_success(self):
q = Question.objects.create(question='Is this a question?')
request = self.factory.get(self.url, {'term': 'is'})
request.user = self.superuser
response = AutocompleteJsonView.as_view(**self.as_view_args)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data, {
'results': [{'id': str(q.pk), 'text': q.question}],
'pagination': {'more': False},
})
def test_must_be_logged_in(self):
response = self.client.get(self.url, {'term': ''})
self.assertEqual(response.status_code, 200)
self.client.logout()
response = self.client.get(self.url, {'term': ''})
self.assertEqual(response.status_code, 302)
def test_has_view_or_change_permission_required(self):
"""
Users require the change permission for the related model to the
autocomplete view for it.
"""
request = self.factory.get(self.url, {'term': 'is'})
self.user.is_staff = True
self.user.save()
request.user = self.user
response = AutocompleteJsonView.as_view(**self.as_view_args)(request)
self.assertEqual(response.status_code, 403)
self.assertJSONEqual(response.content.decode('utf-8'), {'error': '403 Forbidden'})
for permission in ('view', 'change'):
with self.subTest(permission=permission):
self.user.user_permissions.clear()
p = Permission.objects.get(
content_type=ContentType.objects.get_for_model(Question),
codename='%s_question' % permission,
)
self.user.user_permissions.add(p)
request.user = User.objects.get(pk=self.user.pk)
response = AutocompleteJsonView.as_view(**self.as_view_args)(request)
self.assertEqual(response.status_code, 200)
def test_search_use_distinct(self):
"""
Searching across model relations use QuerySet.distinct() to avoid
duplicates.
"""
q1 = Question.objects.create(question='question 1')
q2 = Question.objects.create(question='question 2')
q2.related_questions.add(q1)
q3 = Question.objects.create(question='question 3')
q3.related_questions.add(q1)
request = self.factory.get(self.url, {'term': 'question'})
request.user = self.superuser
class DistinctQuestionAdmin(QuestionAdmin):
search_fields = ['related_questions__question', 'question']
model_admin = DistinctQuestionAdmin(Question, site)
response = AutocompleteJsonView.as_view(model_admin=model_admin)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(data['results']), 3)
def test_missing_search_fields(self):
class EmptySearchAdmin(QuestionAdmin):
search_fields = []
model_admin = EmptySearchAdmin(Question, site)
msg = 'EmptySearchAdmin must have search_fields for the autocomplete_view.'
with self.assertRaisesMessage(Http404, msg):
model_admin.autocomplete_view(self.factory.get(self.url))
def test_get_paginator(self):
"""Search results are paginated."""
Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10))
model_admin = QuestionAdmin(Question, site)
model_admin.ordering = ['pk']
# The first page of results.
request = self.factory.get(self.url, {'term': ''})
request.user = self.superuser
response = AutocompleteJsonView.as_view(model_admin=model_admin)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data, {
'results': [{'id': str(q.pk), 'text': q.question} for q in Question.objects.all()[:PAGINATOR_SIZE]],
'pagination': {'more': True},
})
# The second page of results.
request = self.factory.get(self.url, {'term': '', 'page': '2'})
request.user = self.superuser
response = AutocompleteJsonView.as_view(model_admin=model_admin)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data, {
'results': [{'id': str(q.pk), 'text': q.question} for q in Question.objects.all()[PAGINATOR_SIZE:]],
'pagination': {'more': False},
})
@override_settings(ROOT_URLCONF='admin_views.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_views'] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username='super', password='secret', email='super@example.com',
)
self.admin_login(username='super', password='secret', login_url=reverse('autocomplete_admin:index'))
def test_select(self):
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_answer_add'))
elem = self.selenium.find_element_by_css_selector('.select2-selection')
elem.click() # Open the autocomplete dropdown.
results = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(results.is_displayed())
option = self.selenium.find_element_by_css_selector('.select2-results__option')
self.assertEqual(option.text, 'No results found')
elem.click() # Close the autocomplete dropdown.
q1 = Question.objects.create(question='Who am I?')
Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10))
elem.click() # Reopen the dropdown now that some objects exist.
result_container = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
# PAGINATOR_SIZE results and "Loading more results".
self.assertEqual(len(results), PAGINATOR_SIZE + 1)
search = self.selenium.find_element_by_css_selector('.select2-search__field')
# Load next page of results by scrolling to the bottom of the list.
for _ in range(len(results)):
search.send_keys(Keys.ARROW_DOWN)
results = result_container.find_elements_by_css_selector('.select2-results__option')
# All objects and "Loading more results".
self.assertEqual(len(results), PAGINATOR_SIZE + 11)
# Limit the results with the search field.
search.send_keys('Who')
# Ajax request is delayed.
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), PAGINATOR_SIZE + 12)
# Wait for ajax delay.
time.sleep(0.25)
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 1)
# Select the result.
search.send_keys(Keys.RETURN)
select = Select(self.selenium.find_element_by_id('id_question'))
self.assertEqual(select.first_selected_option.get_attribute('value'), str(q1.pk))
def test_select_multiple(self):
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_question_add'))
elem = self.selenium.find_element_by_css_selector('.select2-selection')
elem.click() # Open the autocomplete dropdown.
results = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(results.is_displayed())
option = self.selenium.find_element_by_css_selector('.select2-results__option')
self.assertEqual(option.text, 'No results found')
elem.click() # Close the autocomplete dropdown.
Question.objects.create(question='Who am I?')
Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10))
elem.click() # Reopen the dropdown now that some objects exist.
result_container = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), PAGINATOR_SIZE + 1)
search = self.selenium.find_element_by_css_selector('.select2-search__field')
# Load next page of results by scrolling to the bottom of the list.
for _ in range(len(results)):
search.send_keys(Keys.ARROW_DOWN)
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 31)
# Limit the results with the search field.
search.send_keys('Who')
# Ajax request is delayed.
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 32)
# Wait for ajax delay.
time.sleep(0.25)
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 1)
# Select the result.
search.send_keys(Keys.RETURN)
# Reopen the dropdown and add the first result to the selection.
elem.click()
search.send_keys(Keys.ARROW_DOWN)
search.send_keys(Keys.RETURN)
select = Select(self.selenium.find_element_by_id('id_related_questions'))
self.assertEqual(len(select.all_selected_options), 2)
def test_inline_add_another_widgets(self):
def assertNoResults(row):
elem = row.find_element_by_css_selector('.select2-selection')
elem.click() # Open the autocomplete dropdown.
results = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(results.is_displayed())
option = self.selenium.find_element_by_css_selector('.select2-results__option')
self.assertEqual(option.text, 'No results found')
# Autocomplete works in rows present when the page loads.
self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_book_add'))
rows = self.selenium.find_elements_by_css_selector('.dynamic-authorship_set')
self.assertEqual(len(rows), 3)
assertNoResults(rows[0])
# Autocomplete works in rows added using the "Add another" button.
self.selenium.find_element_by_link_text('Add another Authorship').click()
rows = self.selenium.find_elements_by_css_selector('.dynamic-authorship_set')
self.assertEqual(len(rows), 4)
assertNoResults(rows[-1])
| 48.048148
| 112
| 0.68789
|
82ddfe123a19942b2678edb03597e8cce4de9c62
| 12,649
|
py
|
Python
|
colour/adaptation/cmccat2000.py
|
rift-labs-developer/colour
|
15112dbe824aab0f21447e0db4a046a28a06f43a
|
[
"BSD-3-Clause"
] | 1,380
|
2015-01-10T12:30:33.000Z
|
2022-03-30T10:19:57.000Z
|
colour/adaptation/cmccat2000.py
|
rift-labs-developer/colour
|
15112dbe824aab0f21447e0db4a046a28a06f43a
|
[
"BSD-3-Clause"
] | 638
|
2015-01-02T10:49:05.000Z
|
2022-03-29T10:16:22.000Z
|
colour/adaptation/cmccat2000.py
|
rift-labs-developer/colour
|
15112dbe824aab0f21447e0db4a046a28a06f43a
|
[
"BSD-3-Clause"
] | 250
|
2015-01-21T15:27:19.000Z
|
2022-03-30T10:23:58.000Z
|
# -*- coding: utf-8 -*-
"""
CMCCAT2000 Chromatic Adaptation Model
=====================================
Defines the *CMCCAT2000* chromatic adaptation model objects:
- :class:`colour.adaptation.InductionFactors_CMCCAT2000`
- :class:`colour.VIEWING_CONDITIONS_CMCCAT2000`
- :func:`colour.adaptation.chromatic_adaptation_forward_CMCCAT2000`
- :func:`colour.adaptation.chromatic_adaptation_inverse_CMCCAT2000`
- :func:`colour.adaptation.chromatic_adaptation_CMCCAT2000`
References
----------
- :cite:`Li2002a` : Li, C., Luo, M. R., Rigg, B., & Hunt, R. W. G. (2002).
CMC 2000 chromatic adaptation transform: CMCCAT2000. Color Research &
Application, 27(1), 49-58. doi:10.1002/col.10005
- :cite:`Westland2012k` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
CMCCAT2000. In Computational Colour Science Using MATLAB (2nd ed., pp.
83-86). ISBN:978-0-470-66569-5
"""
import numpy as np
from collections import namedtuple
from colour.adaptation import CAT_CMCCAT2000
from colour.algebra import vector_dot
from colour.utilities import (CaseInsensitiveMapping, as_float_array,
from_range_100, to_domain_100, validate_method)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'CAT_INVERSE_CMCCAT2000', 'InductionFactors_CMCCAT2000',
'VIEWING_CONDITIONS_CMCCAT2000', 'chromatic_adaptation_forward_CMCCAT2000',
'chromatic_adaptation_inverse_CMCCAT2000',
'chromatic_adaptation_CMCCAT2000'
]
CAT_INVERSE_CMCCAT2000 = np.linalg.inv(CAT_CMCCAT2000)
"""
Inverse *CMCCAT2000* chromatic adaptation transform.
CAT_INVERSE_CMCCAT2000 : array_like, (3, 3)
"""
class InductionFactors_CMCCAT2000(
namedtuple('InductionFactors_CMCCAT2000', ('F', ))):
"""
*CMCCAT2000* chromatic adaptation model induction factors.
Parameters
----------
F : numeric or array_like
:math:`F` surround condition.
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
"""
VIEWING_CONDITIONS_CMCCAT2000 = CaseInsensitiveMapping({
'Average': InductionFactors_CMCCAT2000(1),
'Dim': InductionFactors_CMCCAT2000(0.8),
'Dark': InductionFactors_CMCCAT2000(0.8)
})
VIEWING_CONDITIONS_CMCCAT2000.__doc__ = """
Reference *CMCCAT2000* chromatic adaptation model viewing conditions.
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
VIEWING_CONDITIONS_CMCCAT2000 : CaseInsensitiveMapping
('Average', 'Dim', 'Dark')
"""
def chromatic_adaptation_forward_CMCCAT2000(
XYZ,
XYZ_w,
XYZ_wr,
L_A1,
L_A2,
surround=VIEWING_CONDITIONS_CMCCAT2000['Average']):
"""
Adapts given stimulus *CIE XYZ* tristimulus values from test viewing
conditions to reference viewing conditions using *CMCCAT2000* forward
chromatic adaptation model.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values of the stimulus to adapt.
XYZ_w : array_like
Test viewing condition *CIE XYZ* tristimulus values of the whitepoint.
XYZ_wr : array_like
Reference viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
L_A1 : numeric or array_like
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2 : numeric or array_like
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround : InductionFactors_CMCCAT2000, optional
Surround viewing conditions induction factors.
Returns
-------
ndarray
*CIE XYZ_c* tristimulus values of the stimulus corresponding colour.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ_c`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
>>> XYZ = np.array([22.48, 22.74, 8.54])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_forward_CMCCAT2000(XYZ, XYZ_w, XYZ_wr, L_A1, L_A2)
... # doctest: +ELLIPSIS
array([ 19.5269832..., 23.0683396..., 24.9717522...])
"""
XYZ = to_domain_100(XYZ)
XYZ_w = to_domain_100(XYZ_w)
XYZ_wr = to_domain_100(XYZ_wr)
L_A1 = as_float_array(L_A1)
L_A2 = as_float_array(L_A2)
RGB = vector_dot(CAT_CMCCAT2000, XYZ)
RGB_w = vector_dot(CAT_CMCCAT2000, XYZ_w)
RGB_wr = vector_dot(CAT_CMCCAT2000, XYZ_wr)
D = (surround.F * (0.08 * np.log10(0.5 * (L_A1 + L_A2)) + 0.76 - 0.45 *
(L_A1 - L_A2) / (L_A1 + L_A2)))
D = np.clip(D, 0, 1)
a = D * XYZ_w[..., 1] / XYZ_wr[..., 1]
RGB_c = (
RGB * (a[..., np.newaxis] * (RGB_wr / RGB_w) + 1 - D[..., np.newaxis]))
XYZ_c = vector_dot(CAT_INVERSE_CMCCAT2000, RGB_c)
return from_range_100(XYZ_c)
def chromatic_adaptation_inverse_CMCCAT2000(
XYZ_c,
XYZ_w,
XYZ_wr,
L_A1,
L_A2,
surround=VIEWING_CONDITIONS_CMCCAT2000['Average']):
"""
Adapts given stimulus corresponding colour *CIE XYZ* tristimulus values
from reference viewing conditions to test viewing conditions using
*CMCCAT2000* inverse chromatic adaptation model.
Parameters
----------
XYZ_c : array_like
*CIE XYZ* tristimulus values of the stimulus to adapt.
XYZ_w : array_like
Test viewing condition *CIE XYZ* tristimulus values of the whitepoint.
XYZ_wr : array_like
Reference viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
L_A1 : numeric or array_like
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2 : numeric or array_like
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround : InductionFactors_CMCCAT2000, optional
Surround viewing conditions induction factors.
Returns
-------
ndarray
*CIE XYZ_c* tristimulus values of the adapted stimulus.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ_c`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
>>> XYZ_c = np.array([19.53, 23.07, 24.97])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_inverse_CMCCAT2000(XYZ_c, XYZ_w, XYZ_wr, L_A1,
... L_A2)
... # doctest: +ELLIPSIS
array([ 22.4839876..., 22.7419485..., 8.5393392...])
"""
XYZ_c = to_domain_100(XYZ_c)
XYZ_w = to_domain_100(XYZ_w)
XYZ_wr = to_domain_100(XYZ_wr)
L_A1 = as_float_array(L_A1)
L_A2 = as_float_array(L_A2)
RGB_c = vector_dot(CAT_CMCCAT2000, XYZ_c)
RGB_w = vector_dot(CAT_CMCCAT2000, XYZ_w)
RGB_wr = vector_dot(CAT_CMCCAT2000, XYZ_wr)
D = (surround.F * (0.08 * np.log10(0.5 * (L_A1 + L_A2)) + 0.76 - 0.45 *
(L_A1 - L_A2) / (L_A1 + L_A2)))
D = np.clip(D, 0, 1)
a = D * XYZ_w[..., 1] / XYZ_wr[..., 1]
RGB = (RGB_c / (a[..., np.newaxis] *
(RGB_wr / RGB_w) + 1 - D[..., np.newaxis]))
XYZ = vector_dot(CAT_INVERSE_CMCCAT2000, RGB)
return from_range_100(XYZ)
def chromatic_adaptation_CMCCAT2000(
XYZ,
XYZ_w,
XYZ_wr,
L_A1,
L_A2,
surround=VIEWING_CONDITIONS_CMCCAT2000['Average'],
direction='Forward'):
"""
Adapts given stimulus *CIE XYZ* tristimulus values using given viewing
conditions.
This definition is a convenient wrapper around
:func:`colour.adaptation.chromatic_adaptation_forward_CMCCAT2000` and
:func:`colour.adaptation.chromatic_adaptation_inverse_CMCCAT2000`.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values of the stimulus to adapt.
XYZ_w : array_like
Source viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
XYZ_wr : array_like
Target viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
L_A1 : numeric or array_like
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2 : numeric or array_like
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround : InductionFactors_CMCCAT2000, optional
Surround viewing conditions induction factors.
direction : unicode, optional
**{'Forward', 'Inverse'}**,
Chromatic adaptation direction.
Returns
-------
ndarray
Adapted stimulus *CIE XYZ* tristimulus values.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
>>> XYZ = np.array([22.48, 22.74, 8.54])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_CMCCAT2000(
... XYZ, XYZ_w, XYZ_wr, L_A1, L_A2, direction='Forward')
... # doctest: +ELLIPSIS
array([ 19.5269832..., 23.0683396..., 24.9717522...])
Using the *CMCCAT2000* inverse model:
>>> XYZ = np.array([19.52698326, 23.06833960, 24.97175229])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_CMCCAT2000(
... XYZ, XYZ_w, XYZ_wr, L_A1, L_A2, direction='Inverse')
... # doctest: +ELLIPSIS
array([ 22.48, 22.74, 8.54])
"""
direction = validate_method(
direction, ['Forward', 'Inverse'],
'"{0}" direction is invalid, it must be one of {1}!')
if direction == 'forward':
return chromatic_adaptation_forward_CMCCAT2000(XYZ, XYZ_w, XYZ_wr,
L_A1, L_A2, surround)
else:
return chromatic_adaptation_inverse_CMCCAT2000(XYZ, XYZ_w, XYZ_wr,
L_A1, L_A2, surround)
| 34.279133
| 79
| 0.529923
|
8429a86cac92c0522eaeeafbb263a13adb796a18
| 2,003
|
py
|
Python
|
xpower/Algorithms/BuyEveryDay.py
|
UpSea/ZipLineMid
|
1e0cdcfa7974f412dbee32809cffdaf2de6b4971
|
[
"MIT"
] | null | null | null |
xpower/Algorithms/BuyEveryDay.py
|
UpSea/ZipLineMid
|
1e0cdcfa7974f412dbee32809cffdaf2de6b4971
|
[
"MIT"
] | null | null | null |
xpower/Algorithms/BuyEveryDay.py
|
UpSea/ZipLineMid
|
1e0cdcfa7974f412dbee32809cffdaf2de6b4971
|
[
"MIT"
] | 1
|
2021-04-10T06:05:05.000Z
|
2021-04-10T06:05:05.000Z
|
# -*- coding: utf-8 -*-
import zipline as zp
class BuyEveryDay(zp.TradingAlgorithm):
def __init__(self, *args, **kwargs):
super(BuyEveryDay, self).__init__(*args, **kwargs)
print('BuyEveryDay.__init__()')
def initialize(self):
print("<---BuyEveryDay.initialize() start")
print(self)
self.sid = self.symbol('AAPL') #mid 传入algo的dataFrame必须要有此处symbol中所指定的symbol列,即必须要有AAPL列
self.amount = 100
self.data = []
print("--->BuEveryDay.initialize() end")
def handle_data(self,data):
print('----BuyEveryDay.handle_data().',data[0]['dt'])
self.data.append(data[0]['dt']) #mid collect all data
self.order(self.sid,self.amount) #mid open 1 long position.
self.record(AAPL=data[self.sid].price) #mid add one column named 'AAPL' to returns of Algorithm.run()
if __name__ == '__main__':
import zipline.utils.factory as zpf
from datetime import datetime
import matplotlib.pyplot as plt
data = zpf.load_from_yahoo(stocks=['AAPL'],
indexes={},
start=datetime(1997, 1, 1),
end=datetime(1998, 6, 1),
adjusted=True)
algo = BuyEveryDay(instant_fill=True,
capital_base=50000,
env=None,
sim_params = None, # 设置有此参数时,start和end不能再设置,否则,显得多余也会运行assert错误
#start = algo['start'],
#end = algo['end'],
data_frequency = 'daily')
def dumpDict(dictStr):
""""""
import json
jsonDumpsIndentStr = json.dumps(dictStr, indent=4,skipkeys = False,default=str,sort_keys=True)
print (jsonDumpsIndentStr)
algo.dumpDict = dumpDict
result = algo.run(data)
result['pnl'].plot()
#result['portfolio_value'].plot()
plt.show()
| 44.511111
| 133
| 0.546181
|
16f8e123bda588bb780297f2d097cedb6c187974
| 30,183
|
py
|
Python
|
venv/Lib/site-packages/direct/p3d/HostInfo.py
|
ferris77/pacman
|
9d793146189630b4305af0bc7af65ce822b3998f
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/direct/p3d/HostInfo.py
|
ferris77/pacman
|
9d793146189630b4305af0bc7af65ce822b3998f
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/direct/p3d/HostInfo.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 1
|
2021-04-09T00:02:59.000Z
|
2021-04-09T00:02:59.000Z
|
"""
.. deprecated:: 1.10.0
The p3d packaging system has been replaced with the new setuptools-based
system. See the :ref:`distribution` manual section.
"""
__all__ = ["HostInfo"]
from panda3d.core import HashVal, Filename, PandaSystem, DocumentSpec, Ramfile
from panda3d.core import ConfigVariableInt
from panda3d import core
from direct.p3d.PackageInfo import PackageInfo
from direct.p3d.FileSpec import FileSpec
from direct.directnotify.DirectNotifyGlobal import directNotify
import time
class HostInfo:
""" This class represents a particular download host serving up
Panda3D packages. It is the Python equivalent of the P3DHost
class in the core API. """
notify = directNotify.newCategory("HostInfo")
def __init__(self, hostUrl, appRunner = None, hostDir = None,
rootDir = None, asMirror = False, perPlatform = None):
""" You must specify either an appRunner or a hostDir to the
HostInfo constructor.
If you pass asMirror = True, it means that this HostInfo
object is to be used to populate a "mirror" folder, a
duplicate (or subset) of the contents hosted by a server.
This means when you use this HostInfo to download packages, it
will only download the compressed archive file and leave it
there. At the moment, mirror folders do not download old
patch files from the server.
If you pass perPlatform = True, then files are unpacked into a
platform-specific directory, which is appropriate when you
might be downloading multiple platforms. The default is
perPlatform = False, which means all files are unpacked into
the host directory directly, without an intervening
platform-specific directory name. If asMirror is True, then
the default is perPlatform = True.
Note that perPlatform is also restricted by the individual
package's specification. """
self.__setHostUrl(hostUrl)
self.appRunner = appRunner
self.rootDir = rootDir
if rootDir is None and appRunner:
self.rootDir = appRunner.rootDir
if hostDir and not isinstance(hostDir, Filename):
hostDir = Filename.fromOsSpecific(hostDir)
self.hostDir = hostDir
self.asMirror = asMirror
self.perPlatform = perPlatform
if perPlatform is None:
self.perPlatform = asMirror
# Initially false, this is set true when the contents file is
# successfully read.
self.hasContentsFile = False
# This is the time value at which the current contents file is
# no longer valid.
self.contentsExpiration = 0
# Contains the md5 hash of the original contents.xml file.
self.contentsSpec = FileSpec()
# descriptiveName will be filled in later, when the
# contents file is read.
self.descriptiveName = None
# A list of known mirrors for this host, all URL's guaranteed
# to end with a slash.
self.mirrors = []
# A map of keyword -> altHost URL's. An altHost is different
# than a mirror; an altHost is an alternate URL to download a
# different (e.g. testing) version of this host's contents.
# It is rarely used.
self.altHosts = {}
# This is a dictionary of packages by (name, version). It
# will be filled in when the contents file is read.
self.packages = {}
if self.appRunner and self.appRunner.verifyContents != self.appRunner.P3DVCForce:
# Attempt to pre-read the existing contents.xml; maybe it
# will be current enough for our purposes.
self.readContentsFile()
def __setHostUrl(self, hostUrl):
""" Assigns self.hostUrl, and related values. """
self.hostUrl = hostUrl
if not self.hostUrl:
# A special case: the URL will be set later.
self.hostUrlPrefix = None
self.downloadUrlPrefix = None
else:
# hostUrlPrefix is the host URL, but it is guaranteed to end
# with a slash.
self.hostUrlPrefix = hostUrl
if self.hostUrlPrefix[-1] != '/':
self.hostUrlPrefix += '/'
# downloadUrlPrefix is the URL prefix that should be used for
# everything other than the contents.xml file. It might be
# the same as hostUrlPrefix, but in the case of an
# https-protected hostUrl, it will be the cleartext channel.
self.downloadUrlPrefix = self.hostUrlPrefix
def freshenFile(self, http, fileSpec, localPathname):
""" Ensures that the localPathname is the most current version
of the file defined by fileSpec, as offered by host. If not,
it downloads a new version on-the-spot. Returns true on
success, false on failure. """
if fileSpec.quickVerify(pathname = localPathname):
# It's good, keep it.
return True
# It's stale, get a new one.
doc = None
if self.appRunner and self.appRunner.superMirrorUrl:
# Use the "super mirror" first.
url = core.URLSpec(self.appRunner.superMirrorUrl + fileSpec.filename)
self.notify.info("Freshening %s" % (url))
doc = http.getDocument(url)
if not doc or not doc.isValid():
# Failing the super mirror, contact the actual host.
url = core.URLSpec(self.hostUrlPrefix + fileSpec.filename)
self.notify.info("Freshening %s" % (url))
doc = http.getDocument(url)
if not doc.isValid():
return False
file = Filename.temporary('', 'p3d_')
if not doc.downloadToFile(file):
# Failed to download.
file.unlink()
return False
# Successfully downloaded!
localPathname.makeDir()
if not file.renameTo(localPathname):
# Couldn't move it into place.
file.unlink()
return False
if not fileSpec.fullVerify(pathname = localPathname, notify = self.notify):
# No good after download.
self.notify.info("%s is still no good after downloading." % (url))
return False
return True
def downloadContentsFile(self, http, redownload = False,
hashVal = None):
""" Downloads the contents.xml file for this particular host,
synchronously, and then reads it. Returns true on success,
false on failure. If hashVal is not None, it should be a
HashVal object, which will be filled with the hash from the
new contents.xml file."""
if self.hasCurrentContentsFile():
# We've already got one.
return True
if self.appRunner and self.appRunner.verifyContents == self.appRunner.P3DVCNever:
# Not allowed to.
return False
rf = None
if http:
if not redownload and self.appRunner and self.appRunner.superMirrorUrl:
# We start with the "super mirror", if it's defined.
url = self.appRunner.superMirrorUrl + 'contents.xml'
request = DocumentSpec(url)
self.notify.info("Downloading contents file %s" % (request))
rf = Ramfile()
channel = http.makeChannel(False)
channel.getDocument(request)
if not channel.downloadToRam(rf):
self.notify.warning("Unable to download %s" % (url))
rf = None
if not rf:
# Then go to the main host, if our super mirror let us
# down.
url = self.hostUrlPrefix + 'contents.xml'
# Append a uniquifying query string to the URL to force the
# download to go all the way through any caches. We use the
# time in seconds; that's unique enough.
url += '?' + str(int(time.time()))
# We might as well explicitly request the cache to be disabled
# too, since we have an interface for that via HTTPChannel.
request = DocumentSpec(url)
request.setCacheControl(DocumentSpec.CCNoCache)
self.notify.info("Downloading contents file %s" % (request))
statusCode = None
statusString = ''
for attempt in range(int(ConfigVariableInt('contents-xml-dl-attempts', 3))):
if attempt > 0:
self.notify.info("Retrying (%s)..."%(attempt,))
rf = Ramfile()
channel = http.makeChannel(False)
channel.getDocument(request)
if channel.downloadToRam(rf):
self.notify.info("Successfully downloaded %s" % (url,))
break
else:
rf = None
statusCode = channel.getStatusCode()
statusString = channel.getStatusString()
self.notify.warning("Could not contact download server at %s" % (url,))
self.notify.warning("Status code = %s %s" % (statusCode, statusString))
if not rf:
self.notify.warning("Unable to download %s" % (url,))
try:
# Something screwed up.
if statusCode == core.HTTPChannel.SCDownloadOpenError or \
statusCode == core.HTTPChannel.SCDownloadWriteError:
launcher.setPandaErrorCode(2)
elif statusCode == 404:
# 404 not found
launcher.setPandaErrorCode(5)
elif statusCode < 100:
# statusCode < 100 implies the connection attempt itself
# failed. This is usually due to firewall software
# interfering. Apparently some firewall software might
# allow the first connection and disallow subsequent
# connections; how strange.
launcher.setPandaErrorCode(4)
else:
# There are other kinds of failures, but these will
# generally have been caught already by the first test; so
# if we get here there may be some bigger problem. Just
# give the generic "big problem" message.
launcher.setPandaErrorCode(6)
except NameError as e:
# no launcher
pass
except AttributeError as e:
self.notify.warning("%s" % (str(e),))
pass
return False
tempFilename = Filename.temporary('', 'p3d_', '.xml')
if rf:
f = open(tempFilename.toOsSpecific(), 'wb')
f.write(rf.getData())
f.close()
if hashVal:
hashVal.hashString(rf.getData())
if not self.readContentsFile(tempFilename, freshDownload = True):
self.notify.warning("Failure reading %s" % (url))
tempFilename.unlink()
return False
tempFilename.unlink()
return True
# Couldn't download the file. Maybe we should look for a
# previously-downloaded copy already on disk?
return False
def redownloadContentsFile(self, http):
""" Downloads a new contents.xml file in case it has changed.
Returns true if the file has indeed changed, false if it has
not. """
assert self.hasContentsFile
if self.appRunner and self.appRunner.verifyContents == self.appRunner.P3DVCNever:
# Not allowed to.
return False
url = self.hostUrlPrefix + 'contents.xml'
self.notify.info("Redownloading %s" % (url))
# Get the hash of the original file.
assert self.hostDir
hv1 = HashVal()
if self.contentsSpec.hash:
hv1.setFromHex(self.contentsSpec.hash)
else:
filename = Filename(self.hostDir, 'contents.xml')
hv1.hashFile(filename)
# Now download it again.
self.hasContentsFile = False
hv2 = HashVal()
if not self.downloadContentsFile(http, redownload = True,
hashVal = hv2):
return False
if hv2 == HashVal():
self.notify.info("%s didn't actually redownload." % (url))
return False
elif hv1 != hv2:
self.notify.info("%s has changed." % (url))
return True
else:
self.notify.info("%s has not changed." % (url))
return False
def hasCurrentContentsFile(self):
""" Returns true if a contents.xml file has been successfully
read for this host and is still current, false otherwise. """
if not self.appRunner \
or self.appRunner.verifyContents == self.appRunner.P3DVCNone \
or self.appRunner.verifyContents == self.appRunner.P3DVCNever:
# If we're not asking to verify contents, then
# contents.xml files never expires.
return self.hasContentsFile
now = int(time.time())
return now < self.contentsExpiration and self.hasContentsFile
def readContentsFile(self, tempFilename = None, freshDownload = False):
""" Reads the contents.xml file for this particular host, once
it has been downloaded into the indicated temporary file.
Returns true on success, false if the contents file is not
already on disk or is unreadable.
If tempFilename is specified, it is the filename read, and it
is copied the file into the standard location if it's not
there already. If tempFilename is not specified, the standard
filename is read if it is known. """
if not hasattr(core, 'TiXmlDocument'):
return False
if not tempFilename:
if self.hostDir:
# If the filename is not specified, we can infer it
# if we already know our hostDir
hostDir = self.hostDir
else:
# Otherwise, we have to guess the hostDir.
hostDir = self.__determineHostDir(None, self.hostUrl)
tempFilename = Filename(hostDir, 'contents.xml')
doc = core.TiXmlDocument(tempFilename.toOsSpecific())
if not doc.LoadFile():
return False
xcontents = doc.FirstChildElement('contents')
if not xcontents:
return False
maxAge = xcontents.Attribute('max_age')
if maxAge:
try:
maxAge = int(maxAge)
except:
maxAge = None
if maxAge is None:
# Default max_age if unspecified (see p3d_plugin.h).
from direct.p3d.AppRunner import AppRunner
maxAge = AppRunner.P3D_CONTENTS_DEFAULT_MAX_AGE
# Get the latest possible expiration time, based on the max_age
# indication. Any expiration time later than this is in error.
now = int(time.time())
self.contentsExpiration = now + maxAge
if freshDownload:
self.contentsSpec.readHash(tempFilename)
# Update the XML with the new download information.
xorig = xcontents.FirstChildElement('orig')
while xorig:
xcontents.RemoveChild(xorig)
xorig = xcontents.FirstChildElement('orig')
xorig = core.TiXmlElement('orig')
self.contentsSpec.storeXml(xorig)
xorig.SetAttribute('expiration', str(self.contentsExpiration))
xcontents.InsertEndChild(xorig)
else:
# Read the download hash and expiration time from the XML.
expiration = None
xorig = xcontents.FirstChildElement('orig')
if xorig:
self.contentsSpec.loadXml(xorig)
expiration = xorig.Attribute('expiration')
if expiration:
try:
expiration = int(expiration)
except:
expiration = None
if not self.contentsSpec.hash:
self.contentsSpec.readHash(tempFilename)
if expiration is not None:
self.contentsExpiration = min(self.contentsExpiration, expiration)
# Look for our own entry in the hosts table.
if self.hostUrl:
self.__findHostXml(xcontents)
else:
assert self.hostDir
self.__findHostXmlForHostDir(xcontents)
if self.rootDir and not self.hostDir:
self.hostDir = self.__determineHostDir(None, self.hostUrl)
# Get the list of packages available for download and/or import.
xpackage = xcontents.FirstChildElement('package')
while xpackage:
name = xpackage.Attribute('name')
platform = xpackage.Attribute('platform')
version = xpackage.Attribute('version')
try:
solo = int(xpackage.Attribute('solo') or '')
except ValueError:
solo = False
try:
perPlatform = int(xpackage.Attribute('per_platform') or '')
except ValueError:
perPlatform = False
package = self.__makePackage(name, platform, version, solo, perPlatform)
package.descFile = FileSpec()
package.descFile.loadXml(xpackage)
package.setupFilenames()
package.importDescFile = None
ximport = xpackage.FirstChildElement('import')
if ximport:
package.importDescFile = FileSpec()
package.importDescFile.loadXml(ximport)
xpackage = xpackage.NextSiblingElement('package')
self.hasContentsFile = True
# Now save the contents.xml file into the standard location.
if self.appRunner and self.appRunner.verifyContents != self.appRunner.P3DVCNever:
assert self.hostDir
filename = Filename(self.hostDir, 'contents.xml')
filename.makeDir()
if freshDownload:
doc.SaveFile(filename.toOsSpecific())
else:
if filename != tempFilename:
tempFilename.copyTo(filename)
return True
def __findHostXml(self, xcontents):
""" Looks for the <host> or <alt_host> entry in the
contents.xml that corresponds to the URL that we actually
downloaded from. """
xhost = xcontents.FirstChildElement('host')
while xhost:
url = xhost.Attribute('url')
if url == self.hostUrl:
self.readHostXml(xhost)
return
xalthost = xhost.FirstChildElement('alt_host')
while xalthost:
url = xalthost.Attribute('url')
if url == self.hostUrl:
self.readHostXml(xalthost)
return
xalthost = xalthost.NextSiblingElement('alt_host')
xhost = xhost.NextSiblingElement('host')
def __findHostXmlForHostDir(self, xcontents):
""" Looks for the <host> or <alt_host> entry in the
contents.xml that corresponds to the host dir that we read the
contents.xml from. This is used when reading a contents.xml
file found on disk, as opposed to downloading it from a
site. """
xhost = xcontents.FirstChildElement('host')
while xhost:
url = xhost.Attribute('url')
hostDirBasename = xhost.Attribute('host_dir')
hostDir = self.__determineHostDir(hostDirBasename, url)
if hostDir == self.hostDir:
self.__setHostUrl(url)
self.readHostXml(xhost)
return
xalthost = xhost.FirstChildElement('alt_host')
while xalthost:
url = xalthost.Attribute('url')
hostDirBasename = xalthost.Attribute('host_dir')
hostDir = self.__determineHostDir(hostDirBasename, url)
if hostDir == self.hostDir:
self.__setHostUrl(url)
self.readHostXml(xalthost)
return
xalthost = xalthost.NextSiblingElement('alt_host')
xhost = xhost.NextSiblingElement('host')
def readHostXml(self, xhost):
""" Reads a <host> or <alt_host> entry and applies the data to
this object. """
descriptiveName = xhost.Attribute('descriptive_name')
if descriptiveName and not self.descriptiveName:
self.descriptiveName = descriptiveName
hostDirBasename = xhost.Attribute('host_dir')
if self.rootDir and not self.hostDir:
self.hostDir = self.__determineHostDir(hostDirBasename, self.hostUrl)
# Get the "download" URL, which is the source from which we
# download everything other than the contents.xml file.
downloadUrl = xhost.Attribute('download_url')
if downloadUrl:
self.downloadUrlPrefix = downloadUrl
if self.downloadUrlPrefix[-1] != '/':
self.downloadUrlPrefix += '/'
else:
self.downloadUrlPrefix = self.hostUrlPrefix
xmirror = xhost.FirstChildElement('mirror')
while xmirror:
url = xmirror.Attribute('url')
if url:
if url[-1] != '/':
url += '/'
if url not in self.mirrors:
self.mirrors.append(url)
xmirror = xmirror.NextSiblingElement('mirror')
xalthost = xhost.FirstChildElement('alt_host')
while xalthost:
keyword = xalthost.Attribute('keyword')
url = xalthost.Attribute('url')
if url and keyword:
self.altHosts[keyword] = url
xalthost = xalthost.NextSiblingElement('alt_host')
def __makePackage(self, name, platform, version, solo, perPlatform):
""" Creates a new PackageInfo entry for the given name,
version, and platform. If there is already a matching
PackageInfo, returns it. """
if not platform:
platform = None
platforms = self.packages.setdefault((name, version or ""), {})
package = platforms.get("", None)
if not package:
package = PackageInfo(self, name, version, platform = platform,
solo = solo, asMirror = self.asMirror,
perPlatform = perPlatform)
platforms[platform or ""] = package
return package
def getPackage(self, name, version, platform = None):
""" Returns a PackageInfo that matches the indicated name and
version and the indicated platform or the current runtime
platform, if one is provided by this host, or None if not. """
assert self.hasContentsFile
platforms = self.packages.get((name, version or ""), {})
if platform:
# In this case, we are looking for a specific platform
# only.
return platforms.get(platform, None)
# We are looking for one matching the current runtime
# platform. First, look for a package matching the current
# platform exactly.
package = platforms.get(PandaSystem.getPlatform(), None)
# If not found, look for one matching no particular platform.
if not package:
package = platforms.get("", None)
return package
def getPackages(self, name = None, platform = None):
""" Returns a list of PackageInfo objects that match the
indicated name and/or platform, with no particular regards to
version. If name is None, all packages are returned. """
assert self.hasContentsFile
packages = []
for (pn, version), platforms in self.packages.items():
if name and pn != name:
continue
if not platform:
for p2 in platforms:
package = self.getPackage(pn, version, platform = p2)
if package:
packages.append(package)
else:
package = self.getPackage(pn, version, platform = platform)
if package:
packages.append(package)
return packages
def getAllPackages(self, includeAllPlatforms = False):
""" Returns a list of all available packages provided by this
host. """
result = []
items = sorted(self.packages.items())
for key, platforms in items:
if self.perPlatform or includeAllPlatforms:
# If we maintain a different answer per platform,
# return all of them.
pitems = sorted(platforms.items())
for pkey, package in pitems:
result.append(package)
else:
# If we maintain a host for the current platform
# only (e.g. a client copy), then return only the
# current platform, or no particular platform.
package = platforms.get(PandaSystem.getPlatform(), None)
if not package:
package = platforms.get("", None)
if package:
result.append(package)
return result
def deletePackages(self, packages):
""" Removes all of the indicated packages from the disk,
uninstalling them and deleting all of their files. The
packages parameter must be a list of one or more PackageInfo
objects, for instance as returned by getPackage(). Returns
the list of packages that were NOT found. """
packages = packages[:]
for key, platforms in list(self.packages.items()):
for platform, package in list(platforms.items()):
if package in packages:
self.__deletePackageFiles(package)
del platforms[platform]
packages.remove(package)
if not platforms:
# If we've removed all the platforms for a given
# package, remove the key from the toplevel map.
del self.packages[key]
return packages
def __deletePackageFiles(self, package):
""" Called by deletePackage(), this actually removes the files
for the indicated package. """
if self.appRunner:
self.notify.info("Deleting package %s: %s" % (package.packageName, package.getPackageDir()))
self.appRunner.rmtree(package.getPackageDir())
self.appRunner.sendRequest('forget_package', self.hostUrl, package.packageName, package.packageVersion or '')
def __determineHostDir(self, hostDirBasename, hostUrl):
""" Hashes the host URL into a (mostly) unique directory
string, which will be the root of the host's install tree.
Returns the resulting path, as a Filename.
This code is duplicated in C++, in
P3DHost::determine_host_dir(). """
if hostDirBasename:
# If the contents.xml specified a host_dir parameter, use
# it.
hostDir = str(self.rootDir) + '/hosts'
for component in hostDirBasename.split('/'):
if component:
if component[0] == '.':
# Forbid ".foo" or "..".
component = 'x' + component
hostDir += '/'
hostDir += component
return Filename(hostDir)
hostDir = 'hosts/'
# Look for a server name in the URL. Including this string in the
# directory name makes it friendlier for people browsing the
# directory.
# We could use URLSpec, but we do it by hand instead, to make
# it more likely that our hash code will exactly match the
# similar logic in P3DHost.
p = hostUrl.find('://')
hostname = ''
if p != -1:
start = p + 3
end = hostUrl.find('/', start)
# Now start .. end is something like "username@host:port".
at = hostUrl.find('@', start)
if at != -1 and at < end:
start = at + 1
colon = hostUrl.find(':', start)
if colon != -1 and colon < end:
end = colon
# Now start .. end is just the hostname.
hostname = hostUrl[start : end]
# Now build a hash string of the whole URL. We'll use MD5 to
# get a pretty good hash, with a minimum chance of collision.
# Even if there is a hash collision, though, it's not the end
# of the world; it just means that both hosts will dump their
# packages into the same directory, and they'll fight over the
# toplevel contents.xml file. Assuming they use different
# version numbers (which should be safe since they have the
# same hostname), there will be minimal redownloading.
hashSize = 16
keepHash = hashSize
if hostname:
hostDir += hostname + '_'
# If we successfully got a hostname, we don't really need the
# full hash. We'll keep half of it.
keepHash = keepHash // 2
md = HashVal()
md.hashString(hostUrl)
hostDir += md.asHex()[:keepHash * 2]
hostDir = Filename(self.rootDir, hostDir)
return hostDir
| 39.871863
| 121
| 0.576318
|
8d260f4d7143d25534949ddbda8bdbacb79966a4
| 5,374
|
py
|
Python
|
src/packagedcode/cargo.py
|
doc22940/scancode-toolk
|
588b9a9411730e99d763d715ae9f38575744aaee
|
[
"Apache-2.0",
"CC0-1.0"
] | 1
|
2020-06-24T16:03:52.000Z
|
2020-06-24T16:03:52.000Z
|
src/packagedcode/cargo.py
|
doc22940/scancode-toolk
|
588b9a9411730e99d763d715ae9f38575744aaee
|
[
"Apache-2.0",
"CC0-1.0"
] | 1
|
2021-06-02T02:50:07.000Z
|
2021-06-02T02:50:07.000Z
|
src/packagedcode/cargo.py
|
hwpplayers/scancode-toolkit
|
72850bd57a1a841e5a6a6e4120223a00c4189046
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# Copyright (c) 2019 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import logging
import re
import attr
import toml
from commoncode import filetype
from commoncode import fileutils
from packagedcode import models
"""
Handle Rust cargo crates
"""
TRACE = False
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
@attr.s()
class RustCargoCrate(models.Package):
metafiles = ('Cargo.toml',)
default_type = 'cargo'
default_primary_language = 'Rust'
default_web_baseurl = 'https://crates.io'
default_download_baseurl = 'https://crates.io/api/v1'
default_api_baseurl = 'https://crates.io/api/v1'
@classmethod
def recognize(cls, location):
yield parse(location)
@classmethod
def get_package_root(cls, manifest_resource, codebase):
return manifest_resource.parent(codebase)
def repository_homepage_url(self, baseurl=default_web_baseurl):
return '{}/crates/{}'.format(baseurl, self.name)
def repository_download_url(self, baseurl=default_download_baseurl):
return '{}/crates/{}/{}/download'.format(baseurl, self.name, self.version)
def api_data_url(self, baseurl=default_api_baseurl):
return '{}/crates/{}'.format(baseurl, self.name)
def is_cargo_toml(location):
return (filetype.is_file(location) and fileutils.file_name(location).lower() == 'cargo.toml')
def parse(location):
"""
Return a Package object from a Cargo.toml file or None.
"""
if not is_cargo_toml(location):
return
package_data = toml.load(location, _dict=OrderedDict)
return build_package(package_data)
def build_package(package_data):
"""
Return a Pacakge object from a package data mapping or None.
"""
core_package_data = package_data.get('package', {})
name = core_package_data.get('name')
version = core_package_data.get('version')
description = core_package_data.get('description')
if description:
description = description.strip()
authors = core_package_data.get('authors')
parties = list(party_mapper(authors, party_role='author'))
declared_license = core_package_data.get('license')
package = RustCargoCrate(
name=name,
version=version,
description=description,
parties=parties,
declared_license=declared_license
)
return package
def party_mapper(party, party_role):
"""
Yields a Party object with party of `party_role`.
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
"""
for person in party:
name, email = parse_person(person)
yield models.Party(
type=models.party_person,
name=name,
role=party_role,
email=email)
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "Isaac Z. Schlueter <i@izs.me>"
For example:
>>> p = parse_person('Barney Rubble <b@rubble.com>')
>>> assert p == ('Barney Rubble', 'b@rubble.com')
>>> p = parse_person('Barney Rubble')
>>> assert p == ('Barney Rubble', None)
>>> p = parse_person('<b@rubble.com>')
>>> assert p == (None, 'b@rubble.com')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
person_parser = re.compile(
r'^(?P<name>[^\(<]+)'
r'\s?'
r'(?P<email><([^>]+)>)?'
).match
person_parser_no_name = re.compile(
r'(?P<email><([^>]+)>)?'
).match
| 29.527473
| 97
| 0.692966
|
3d523674c10ce1d584e0b5039537da8875a2d45b
| 4,009
|
py
|
Python
|
options/train_options.py
|
DaBaiTuuu/SynthCP
|
42b6e6185f572dd1bf82bba2878014f5283dbe27
|
[
"MIT"
] | 54
|
2020-09-09T09:07:48.000Z
|
2022-03-15T12:19:56.000Z
|
options/train_options.py
|
DaBaiTuuu/SynthCP
|
42b6e6185f572dd1bf82bba2878014f5283dbe27
|
[
"MIT"
] | 12
|
2020-10-27T17:40:40.000Z
|
2021-09-06T21:10:32.000Z
|
options/train_options.py
|
DaBaiTuuu/SynthCP
|
42b6e6185f572dd1bf82bba2878014f5283dbe27
|
[
"MIT"
] | 8
|
2020-10-12T09:28:01.000Z
|
2021-04-21T14:43:28.000Z
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
# for displays
parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=20, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed')
# for training
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--niter', type=int, default=50, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay')
parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.')
# for discriminators
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
parser.add_argument('--lambda_vgg', type=float, default=10.0, help='weight for vgg loss')
parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
parser.add_argument('--netD', type=str, default='multiscale', help='(n_layers|multiscale|image)')
parser.add_argument('--no_TTUR', action='store_true', help='Use TTUR training scheme')
parser.add_argument('--lambda_kld', type=float, default=0.05)
# for FCN and joint training
parser.add_argument('--joint_train', action='store_true', help='specify to joint train seg and SPADE')
parser.add_argument('--fcn_pretrained', type=str, default='', help='pretrained_fcn_path')
parser.add_argument('--init_name', type=str, default='dir name of networks for loading weights')
parser.add_argument('--n_fold', type=int, default=0, help='n fold validation')
parser.add_argument('--fold', type=int, default=0, help='fold number')
parser.add_argument('--cross_validation_mode', type=str, default='train', help='cross validation mode')
self.isTrain = True
return parser
| 74.240741
| 173
| 0.703417
|
bd15c472f906d140ef00546b94634b33eb43240f
| 714
|
py
|
Python
|
topy/__init__.py
|
TarcisioLOliveira/topy
|
060da675e6494fee63fa5547befcb1f8ecc39fdc
|
[
"MIT"
] | 1
|
2021-01-25T00:13:34.000Z
|
2021-01-25T00:13:34.000Z
|
topy/__init__.py
|
TarcisioLOliveira/topy
|
060da675e6494fee63fa5547befcb1f8ecc39fdc
|
[
"MIT"
] | null | null | null |
topy/__init__.py
|
TarcisioLOliveira/topy
|
060da675e6494fee63fa5547befcb1f8ecc39fdc
|
[
"MIT"
] | null | null | null |
"""
# ==============================================================================
# ToPy -- Topology optimization with Python.
# Copyright (C) 2012, 2015, 2016, 2017 William Hunter.
# Copyright (C) 2020, 2021, Tarcísio L. de Oliveira
# ==============================================================================
"""
from .topology_trad import *
from .topology_gen import *
from .visualisation import *
from .elements import *
from .optimisation import *
from .pathfinding import *
__version__ = "1.0.0"
__author__ = "Tarcisio L. de Oliveira"
__all__ = (
topology_trad.__all__ +
topology_gen.__all__ +
visualisation.__all__ +
elements.__all__ +
optimisation.__all__ +
pathfinding.__all__
)
| 26.444444
| 80
| 0.560224
|
9a958077eb6190cdb4ab192230850e04169ee430
| 4,947
|
py
|
Python
|
user/tests/test_user_api.py
|
khaledzaki2017/Recipe-app-api
|
c525b2764866fc3b8501baf9d8e9c7cc0374080b
|
[
"MIT"
] | null | null | null |
user/tests/test_user_api.py
|
khaledzaki2017/Recipe-app-api
|
c525b2764866fc3b8501baf9d8e9c7cc0374080b
|
[
"MIT"
] | null | null | null |
user/tests/test_user_api.py
|
khaledzaki2017/Recipe-app-api
|
c525b2764866fc3b8501baf9d8e9c7cc0374080b
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': 'test@gmail.com',
'password': 'testpass',
'name': 'Test name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creatinga user that already exists fails"""
payload = {
'email': 'test@gmail.com',
'password': 'testpass',
'name': 'Test',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {
'email': 'test@gmail.com',
'password': 'pw',
'name': 'Test',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@gmail.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@gmail.com', password="testpass")
payload = {'email': 'test@gmail.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
payload = {'email': 'test@gmail.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@gmail.com',
password='testpass',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in used"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 34.594406
| 77
| 0.64625
|
2e7321b99fedbfce4eac11e701130afb05edb2c1
| 8,565
|
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/pygments/lexers/_lua_builtins.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/pygments/lexers/_lua_builtins.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/pygments/lexers/_lua_builtins.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getmetatable',
'ipairs',
'load',
'loadfile',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawlen',
'rawset',
'select',
'setmetatable',
'tonumber',
'tostring',
'type',
'xpcall'),
'bit32': ('bit32.arshift',
'bit32.band',
'bit32.bnot',
'bit32.bor',
'bit32.btest',
'bit32.bxor',
'bit32.extract',
'bit32.lrotate',
'bit32.lshift',
'bit32.replace',
'bit32.rrotate',
'bit32.rshift'),
'coroutine': ('coroutine.create',
'coroutine.isyieldable',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.getuservalue',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.setuservalue',
'debug.traceback',
'debug.upvalueid',
'debug.upvaluejoin'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.stderr',
'io.stdin',
'io.stdout',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan',
'math.atan2',
'math.ceil',
'math.cos',
'math.cosh',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log',
'math.max',
'math.maxinteger',
'math.min',
'math.mininteger',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sin',
'math.sinh',
'math.sqrt',
'math.tan',
'math.tanh',
'math.tointeger',
'math.type',
'math.ult'),
'modules': ('package.config',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.searchers',
'package.searchpath',
'require'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.pack',
'string.packsize',
'string.rep',
'string.reverse',
'string.sub',
'string.unpack',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.move',
'table.pack',
'table.remove',
'table.sort',
'table.unpack'),
'utf8': ('utf8.char',
'utf8.charpattern',
'utf8.codepoint',
'utf8.codes',
'utf8.len',
'utf8.offset')}
if __name__ == '__main__': # pragma: no cover
import re
import sys
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">(Lua )?\1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(?!lua|LUA)([^:]+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
version = get_newest_version()
functions = set()
for v in ('5.2', version):
print('> Downloading function index for Lua %s' % v)
f = get_lua_functions(v)
print('> %d functions found, %d new:' %
(len(f), len(set(f) - functions)))
functions |= set(f)
functions = sorted(functions)
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
modules = {k: tuple(v) for k, v in modules.items()}
regenerate(__file__, modules)
run()
| 29.232082
| 80
| 0.468301
|
5decec2368ff2f737b5e7fd0c748bc5098b941fe
| 208
|
py
|
Python
|
app/utils.py
|
DCBergantini/markd_table
|
4ce6fa880920bd0f4fcd73c4a787ccfcc971fbee
|
[
"MIT"
] | null | null | null |
app/utils.py
|
DCBergantini/markd_table
|
4ce6fa880920bd0f4fcd73c4a787ccfcc971fbee
|
[
"MIT"
] | null | null | null |
app/utils.py
|
DCBergantini/markd_table
|
4ce6fa880920bd0f4fcd73c4a787ccfcc971fbee
|
[
"MIT"
] | 2
|
2022-01-15T01:27:15.000Z
|
2022-01-18T18:57:30.000Z
|
def get_unique_values(list: list):
unique_list=[]
for item in list:
if item not in unique_list:
unique_list.append(item)
return unique_list
__name__=="__main__"
| 13.866667
| 36
| 0.615385
|
fff703f27d714fc09d38b8c75435e07a0cdd2a08
| 1,085
|
py
|
Python
|
azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 2
|
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EntityHealthStateChunkList(Model):
"""A base type for the list of health state chunks found in the cluster. It
contains the total number of health states that match the input filters.
:param total_count: Total number of entity health state objects that
match the specified filters from the cluster health chunk query
description.
:type total_count: long
"""
_attribute_map = {
'total_count': {'key': 'TotalCount', 'type': 'long'},
}
def __init__(self, total_count=None):
self.total_count = total_count
| 35
| 79
| 0.623041
|
ca32b4b5999e8abf6ec12d057a3a243cf2eda0aa
| 6,534
|
py
|
Python
|
src/sentry/integrations/github_enterprise/webhook.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | 1
|
2019-05-28T06:18:03.000Z
|
2019-05-28T06:18:03.000Z
|
src/sentry/integrations/github_enterprise/webhook.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | 6
|
2018-10-19T10:04:23.000Z
|
2019-12-09T20:29:12.000Z
|
src/sentry/integrations/github_enterprise/webhook.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import hashlib
import hmac
import logging
import six
from django.http import HttpResponse
from django.utils.crypto import constant_time_compare
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from simplejson import JSONDecodeError
from sentry.models import Integration
from sentry.utils import json
from sentry.integrations.github.webhook import InstallationEventWebhook, InstallationRepositoryEventWebhook, PushEventWebhook, PullRequestEventWebhook
from .repository import GitHubEnterpriseRepositoryProvider
logger = logging.getLogger('sentry.webhooks')
def get_installation_metadata(event, host):
if not host:
return
try:
integration = Integration.objects.get(
external_id='{}:{}'.format(host, event['installation']['id']),
provider='github_enterprise')
except Integration.DoesNotExist:
return
return integration.metadata['installation']
class GitHubEnterpriseInstallationEventWebhook(InstallationEventWebhook):
provider = 'github_enterprise'
class GitHubEnterpriseInstallationRepositoryEventWebhook(InstallationRepositoryEventWebhook):
provider = 'github_enterprise'
# https://developer.github.com/v3/activity/events/types/#installationrepositoriesevent
def _handle(self, event, organization, repo):
pass
class GitHubEnterprisePushEventWebhook(PushEventWebhook):
provider = 'github_enterprise'
# https://developer.github.com/v3/activity/events/types/#pushevent
def is_anonymous_email(self, email):
return email[-25:] == '@users.noreply.github.com'
def get_external_id(self, username):
return 'github_enterprise:%s' % username
def get_idp_external_id(self, integration, host):
return '{}:{}'.format(host, integration.metadata['installation']['id'])
def should_ignore_commit(self, commit):
return GitHubEnterpriseRepositoryProvider.should_ignore_commit(commit['message'])
class GitHubEnterprisePullRequestEventWebhook(PullRequestEventWebhook):
provider = 'github_enterprise'
# https://developer.github.com/v3/activity/events/types/#pullrequestevent
def is_anonymous_email(self, email):
return email[-25:] == '@users.noreply.github.com'
def get_external_id(self, username):
return 'github_enterprise:%s' % username
def get_idp_external_id(self, integration, host):
return '{}:{}'.format(host, integration.metadata['installation']['id'])
class GitHubEnterpriseWebhookBase(View):
# https://developer.github.com/webhooks/
def get_handler(self, event_type):
return self._handlers.get(event_type)
def is_valid_signature(self, method, body, secret, signature):
if method == 'sha1':
mod = hashlib.sha1
else:
raise NotImplementedError('signature method %s is not supported' % (method, ))
expected = hmac.new(
key=secret.encode('utf-8'),
msg=body,
digestmod=mod,
).hexdigest()
return constant_time_compare(expected, signature)
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
if request.method != 'POST':
return HttpResponse(status=405)
return super(GitHubEnterpriseWebhookBase, self).dispatch(request, *args, **kwargs)
def get_logging_data(self):
pass
def get_secret(self, event, host):
metadata = get_installation_metadata(event, host)
if metadata:
return metadata.get('webhook_secret')
else:
return None
def handle(self, request):
body = six.binary_type(request.body)
if not body:
logger.error(
'github_enterprise.webhook.missing-body',
extra=self.get_logging_data(),
)
return HttpResponse(status=400)
try:
handler = self.get_handler(request.META['HTTP_X_GITHUB_EVENT'])
except KeyError:
logger.error(
'github_enterprise.webhook.missing-event',
extra=self.get_logging_data(),
)
return HttpResponse(status=400)
if not handler:
return HttpResponse(status=204)
try:
method, signature = request.META['HTTP_X_HUB_SIGNATURE'].split('=', 1)
except (KeyError, IndexError):
logger.error(
'github_enterprise.webhook.missing-signature',
extra=self.get_logging_data(),
)
return HttpResponse(status=400)
try:
event = json.loads(body.decode('utf-8'))
except JSONDecodeError:
logger.error(
'github_enterprise.webhook.invalid-json',
extra=self.get_logging_data(),
exc_info=True,
)
return HttpResponse(status=400)
try:
host = request.META['HTTP_X_GITHUB_ENTERPRISE_HOST']
except KeyError:
return HttpResponse(status=401)
secret = self.get_secret(event, host)
if secret is None:
logger.error(
'github_enterprise.webhook.missing-secret',
extra=self.get_logging_data(),
)
return HttpResponse(status=401)
if not self.is_valid_signature(method, body, self.get_secret(event, host), signature):
logger.error(
'github_enterprise.webhook.invalid-signature',
extra=self.get_logging_data(),
)
return HttpResponse(status=401)
handler()(event, host)
return HttpResponse(status=204)
class GitHubEnterpriseWebhookEndpoint(GitHubEnterpriseWebhookBase):
_handlers = {
'push': GitHubEnterprisePushEventWebhook,
'pull_request': GitHubEnterprisePullRequestEventWebhook,
'installation': GitHubEnterpriseInstallationEventWebhook,
'installation_repositories': GitHubEnterpriseInstallationRepositoryEventWebhook,
}
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
if request.method != 'POST':
return HttpResponse(status=405)
return super(GitHubEnterpriseWebhookEndpoint, self).dispatch(request, *args, **kwargs)
@method_decorator(csrf_exempt)
def post(self, request):
return self.handle(request)
| 33.680412
| 150
| 0.667126
|
12aef745bf6a8b9faf2356fd833ade0e8f08d97f
| 2,037
|
py
|
Python
|
dogia/wallet/sign_coin_solutions.py
|
dogia-coin/dogia-blockchain
|
16ea0a09777354905150c216e1fab60141296473
|
[
"Apache-2.0"
] | null | null | null |
dogia/wallet/sign_coin_solutions.py
|
dogia-coin/dogia-blockchain
|
16ea0a09777354905150c216e1fab60141296473
|
[
"Apache-2.0"
] | null | null | null |
dogia/wallet/sign_coin_solutions.py
|
dogia-coin/dogia-blockchain
|
16ea0a09777354905150c216e1fab60141296473
|
[
"Apache-2.0"
] | null | null | null |
import inspect
from typing import List, Any
import blspy
from blspy import AugSchemeMPL
from dogia.types.coin_solution import CoinSolution
from dogia.types.spend_bundle import SpendBundle
from dogia.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
async def sign_coin_solutions(
coin_solutions: List[CoinSolution],
secret_key_for_public_key_f: Any, # Potentially awaitable function from G1Element => Optional[PrivateKey]
additional_data: bytes,
max_cost: int,
) -> SpendBundle:
signatures: List[blspy.G2Element] = []
pk_list: List[blspy.G1Element] = []
msg_list: List[bytes] = []
for coin_solution in coin_solutions:
# Get AGG_SIG conditions
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if err or conditions_dict is None:
error_msg = f"Sign transaction failed, con:{conditions_dict}, error: {err}"
raise ValueError(error_msg)
# Create signature
for pk, msg in pkm_pairs_for_conditions_dict(
conditions_dict, bytes(coin_solution.coin.name()), additional_data
):
pk_list.append(pk)
msg_list.append(msg)
if inspect.iscoroutinefunction(secret_key_for_public_key_f):
secret_key = await secret_key_for_public_key_f(pk)
else:
secret_key = secret_key_for_public_key_f(pk)
if secret_key is None:
e_msg = f"no secret key for {pk}"
raise ValueError(e_msg)
assert bytes(secret_key.get_g1()) == bytes(pk)
signature = AugSchemeMPL.sign(secret_key, msg)
assert AugSchemeMPL.verify(pk, msg, signature)
signatures.append(signature)
# Aggregate signatures
aggsig = AugSchemeMPL.aggregate(signatures)
assert AugSchemeMPL.aggregate_verify(pk_list, msg_list, aggsig)
return SpendBundle(coin_solutions, aggsig)
| 39.173077
| 110
| 0.689249
|
dd80015de7b1762c7dce04218da5261a69d29d0d
| 438
|
py
|
Python
|
basic_coding/Day1/perfect_number.py
|
pralhad88/samyakDemo
|
197ad256a70f969c1be06ca9f25fd45f16ed793d
|
[
"MIT"
] | 2
|
2020-04-14T08:31:39.000Z
|
2021-11-02T11:22:19.000Z
|
basic_coding/Day1/perfect_number.py
|
pralhad88/BasicPython
|
197ad256a70f969c1be06ca9f25fd45f16ed793d
|
[
"MIT"
] | null | null | null |
basic_coding/Day1/perfect_number.py
|
pralhad88/BasicPython
|
197ad256a70f969c1be06ca9f25fd45f16ed793d
|
[
"MIT"
] | null | null | null |
# If you want cheack only one number is perfect or not
x = int(input("Enter number"))
sum1 = 0
for i in range(1,x):
if x % i == 0:
print(i)
sum1=sum1+i
if sum1 == x:
print(x,"perfect number hai")
# If you want to cheak multiple number is perfect number is or not.
x = int(input("Enter number"))
for i in range(1,x):
sum2 = 0
for j in range(1,i):
if i % j == 0:
sum2 = sum2 + j
if sum2 == i:
print(i,"Is Perfect Number")
| 20.857143
| 67
| 0.625571
|
f1ede6bb60c650777caa1a39ceab94dffef273d8
| 23,706
|
py
|
Python
|
python/ray/tune/ray_trial_executor.py
|
gaocegege/ray
|
03d05c8765bb6cfd30fdbbcd4577dc22c5dc5af7
|
[
"Apache-2.0"
] | 1
|
2019-08-21T08:31:10.000Z
|
2019-08-21T08:31:10.000Z
|
python/ray/tune/ray_trial_executor.py
|
GitAlanWong/ray
|
c852213b8349b6b9e9e7353573e2259a1b9ef925
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/ray_trial_executor.py
|
GitAlanWong/ray
|
c852213b8349b6b9e9e7353573e2259a1b9ef925
|
[
"Apache-2.0"
] | 2
|
2019-09-04T13:27:51.000Z
|
2019-09-17T04:20:38.000Z
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import random
import time
import traceback
import ray
from ray.tune.error import AbortTrialExecution
from ray.tune.logger import NoopLogger
from ray.tune.trial import Trial, Checkpoint
from ray.tune.resources import Resources
from ray.tune.trial_executor import TrialExecutor
from ray.tune.util import warn_if_slow
logger = logging.getLogger(__name__)
RESOURCE_REFRESH_PERIOD = 0.5 # Refresh resources every 500 ms
BOTTLENECK_WARN_PERIOD_S = 60
NONTRIVIAL_WAIT_TIME_THRESHOLD_S = 1e-3
class _LocalWrapper(object):
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class RayTrialExecutor(TrialExecutor):
"""An implemention of TrialExecutor based on Ray."""
def __init__(self,
queue_trials=False,
reuse_actors=False,
ray_auto_init=False,
refresh_period=RESOURCE_REFRESH_PERIOD):
super(RayTrialExecutor, self).__init__(queue_trials)
self._running = {}
# Since trial resume after paused should not run
# trial.train.remote(), thus no more new remote object id generated.
# We use self._paused to store paused trials here.
self._paused = {}
self._reuse_actors = reuse_actors
self._cached_actor = None
self._avail_resources = Resources(cpu=0, gpu=0)
self._committed_resources = Resources(cpu=0, gpu=0)
self._resources_initialized = False
self._refresh_period = refresh_period
self._last_resource_refresh = float("-inf")
self._last_nontrivial_wait = time.time()
if not ray.is_initialized() and ray_auto_init:
logger.info("Initializing Ray automatically."
"For cluster usage or custom Ray initialization, "
"call `ray.init(...)` before `tune.run`.")
ray.init(object_store_memory=int(1e8))
if ray.is_initialized():
self._update_avail_resources()
def _setup_runner(self, trial, reuse_allowed):
if (self._reuse_actors and reuse_allowed
and self._cached_actor is not None):
logger.debug("Reusing cached runner {} for {}".format(
self._cached_actor, trial.trial_id))
existing_runner = self._cached_actor
self._cached_actor = None
else:
if self._cached_actor:
logger.debug(
"Cannot reuse cached runner {} for new trial".format(
self._cached_actor))
self._cached_actor.stop.remote()
self._cached_actor.__ray_terminate__.remote()
self._cached_actor = None
existing_runner = None
cls = ray.remote(
num_cpus=trial.resources.cpu,
num_gpus=trial.resources.gpu,
resources=trial.resources.custom_resources)(
trial._get_trainable_cls())
trial.init_logger()
# We checkpoint metadata here to try mitigating logdir duplication
self.try_checkpoint_metadata(trial)
remote_logdir = trial.logdir
if existing_runner:
trial.runner = existing_runner
if not self.reset_trial(trial, trial.config, trial.experiment_tag):
raise AbortTrialExecution(
"Trainable runner reuse requires reset_config() to be "
"implemented and return True.")
return existing_runner
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
return cls.remote(config=trial.config, logger_creator=logger_creator)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
"""
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(
trial,
reuse_allowed=checkpoint is not None
or trial._checkpoint.value is not None)
if not self.restore(trial, checkpoint):
if trial.status == Trial.ERROR:
raise RuntimeError(
"Restore from checkpoint failed for Trial {}.".format(
str(trial)))
previous_run = self._find_item(self._paused, trial)
if (prior_status == Trial.PAUSED and previous_run):
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial)
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.debug(
"Destroying actor for trial {}.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# note that we don't return the resources, since they may
# have been lost
def _find_item(self, dictionary, item):
out = [rid for rid, t in dictionary.items() if t is item]
return out
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Returning resources for Trial %s.", str(trial))
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id)
def continue_training(self, trial):
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
"""
trial_future = self._find_item(self._running, trial)
if trial_future:
self._paused[trial_future[0]] = trial
super(RayTrialExecutor, self).pause_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val
def get_running_trials(self):
"""Returns the running trials."""
return list(self._running.values())
def get_alive_node_ips(self):
nodes = ray.state.nodes()
ip_addresses = set()
for node in nodes:
if node["alive"]:
ip_addresses.add(node["NodeManagerAddress"])
return ip_addresses
def get_current_trial_ips(self):
return {t.node_ip for t in self.get_running_trials()}
def get_next_available_trial(self):
if ray.worker._mode() != ray.worker.LOCAL_MODE:
live_cluster_ips = self.get_alive_node_ips()
if live_cluster_ips - self.get_current_trial_ips():
for trial in self.get_running_trials():
if trial.node_ip and trial.node_ip not in live_cluster_ips:
logger.warning(
"{} (ip: {}) detected as stale. This is likely "
"because the node was lost. Processing this "
"trial first.".format(trial, trial.node_ip))
return trial
shuffled_results = list(self._running.keys())
random.shuffle(shuffled_results)
# Note: We shuffle the results because `ray.wait` by default returns
# the first available result, and we want to guarantee that slower
# trials (i.e. trials that run remotely) also get fairly reported.
# See https://github.com/ray-project/ray/issues/4211 for details.
start = time.time()
[result_id], _ = ray.wait(shuffled_results)
wait_time = time.time() - start
if wait_time > NONTRIVIAL_WAIT_TIME_THRESHOLD_S:
self._last_nontrivial_wait = time.time()
if time.time() - self._last_nontrivial_wait > BOTTLENECK_WARN_PERIOD_S:
logger.warn(
"Over the last {} seconds, the Tune event loop has been "
"backlogged processing new results. Consider increasing your "
"period of result reporting to improve performance.".format(
BOTTLENECK_WARN_PERIOD_S))
self._last_nontrivial_wait = time.time()
return self._running[result_id]
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run."""
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0])
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result
def _commit_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) + resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu + resources.cpu_total(),
committed.gpu + resources.gpu_total(),
custom_resources=custom_resources)
def _return_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) - resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu - resources.cpu_total(),
committed.gpu - resources.gpu_total(),
custom_resources=custom_resources)
assert self._committed_resources.is_nonnegative(), (
"Resource invalid: {}".format(resources))
def _update_avail_resources(self, num_retries=5):
for i in range(num_retries):
try:
resources = ray.cluster_resources()
except Exception:
# TODO(rliaw): Remove this when local mode is fixed.
# https://github.com/ray-project/ray/issues/4147
logger.debug("Using resources for local machine.")
resources = ray.services.check_and_update_resources(
None, None, None)
if not resources:
logger.warning(
"Cluster resources not detected or are 0. Retrying...")
time.sleep(0.5)
if not resources:
# NOTE: This hides the possibility that Ray may be waiting for
# clients to connect.
resources.setdefault("CPU", 0)
resources.setdefault("GPU", 0)
logger.warning("Cluster resources cannot be detected or are 0. "
"You can resume this experiment by passing in "
"`resume=True` to `run`.")
resources = resources.copy()
num_cpus = resources.pop("CPU", 0)
num_gpus = resources.pop("GPU", 0)
custom_resources = resources
self._avail_resources = Resources(
int(num_cpus), int(num_gpus), custom_resources=custom_resources)
self._last_resource_refresh = time.time()
self._resources_initialized = True
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
"""
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
return True
can_overcommit = self._queue_trials
if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \
(resources.gpu_total() > 0 and currently_available.gpu <= 0) or \
any((resources.get_res_total(res_name) > 0
and currently_available.get(res_name) <= 0)
for res_name in resources.custom_resources):
can_overcommit = False # requested resource is already saturated
if can_overcommit:
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = "Resources requested: {}/{} CPUs, {}/{} GPUs".format(
self._committed_resources.cpu, self._avail_resources.cpu,
self._committed_resources.gpu, self._avail_resources.gpu)
customs = ", ".join([
"{}/{} {}".format(
self._committed_resources.get_res_total(name),
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources
])
if customs:
status += " ({})".format(customs)
return status
else:
return "Resources requested: ?"
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
self._avail_resources.gpu)
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs"
def on_step_begin(self):
"""Before step() called, update the available resources."""
self._update_avail_resources()
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint."""
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.value = trial.runner.save_to_object.remote()
else:
# Keeps only highest performing checkpoints if enabled
if trial.keep_checkpoints_num:
try:
last_attr_val = trial.last_result[
trial.checkpoint_score_attr]
if (trial.compare_checkpoints(last_attr_val)
and not math.isnan(last_attr_val)):
trial.best_checkpoint_attr_value = last_attr_val
self._checkpoint_and_erase(trial)
except KeyError:
logger.warning(
"Result dict has no key: {}. keep"
"_checkpoints_num flag will not work".format(
trial.checkpoint_score_attr))
else:
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(
trial.runner.save.remote())
return trial._checkpoint.value
def _checkpoint_and_erase(self, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
if len(trial.history) >= trial.keep_checkpoints_num:
ray.get(trial.runner.delete_checkpoint.remote(trial.history[-1]))
trial.history.pop()
trial.history.insert(0, trial._checkpoint.value)
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
This will also sync the trial results to a new location
if restoring on a different node.
"""
if checkpoint is None or checkpoint.value is None:
checkpoint = trial._checkpoint
if checkpoint is None or checkpoint.value is None:
return True
if trial.runner is None:
logger.error("Unable to restore - no runner.")
self.set_status(trial, Trial.ERROR)
return False
try:
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
assert type(value) != Checkpoint, type(value)
trial.runner.restore_from_object.remote(value)
else:
# TODO: Somehow, the call to get the current IP on the
# remote actor can be very slow - a better fix would
# be to use an actor table to detect the IP of the Trainable
# and rsync the files there.
# See https://github.com/ray-project/ray/issues/5168
with warn_if_slow("get_current_ip"):
worker_ip = ray.get(trial.runner.current_ip.remote())
with warn_if_slow("sync_to_new_location"):
trial.sync_logger_to_new_location(worker_ip)
with warn_if_slow("restore_from_disk"):
ray.get(trial.runner.restore.remote(value))
trial.last_result = checkpoint.last_result
return True
except Exception:
logger.exception("Error restoring runner for Trial %s.", trial)
self.set_status(trial, Trial.ERROR)
return False
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
return ray.get(
trial.runner.export_model.remote(trial.export_formats))
return {}
| 40.043919
| 79
| 0.597486
|
7ca650ad105a0de0a548ba831dfc6672d4df339a
| 1,469
|
py
|
Python
|
Jarvan/Modules/admin.py
|
jabezborja/Jarvan-AI
|
fe3b1ede98cbc2213fcedb886c282e5236e97bfa
|
[
"MIT"
] | 1
|
2021-03-07T11:53:54.000Z
|
2021-03-07T11:53:54.000Z
|
Jarvan/Modules/admin.py
|
jabezborja/Jarvan
|
fe3b1ede98cbc2213fcedb886c282e5236e97bfa
|
[
"MIT"
] | null | null | null |
Jarvan/Modules/admin.py
|
jabezborja/Jarvan
|
fe3b1ede98cbc2213fcedb886c282e5236e97bfa
|
[
"MIT"
] | 2
|
2020-12-18T02:13:44.000Z
|
2021-05-04T00:00:20.000Z
|
from VA.speaker import speak
from VA.audio import recordAudio
import sqlite3
import os
class Admin:
"""docstring for Admin"""
def __init__(self, arg):
super(Admin, self).__init__()
self.arg = arg
def admin(self, response, conn, intCon):
os.system('cls')
speak("Setup mode activated")
print("To deactivate, say something that does not familiar in the setup mode or say exit.")
validate = recordAudio(intCon).lower()
if('exit' in validate):
speak("Setup mode deactivated")
return
if ('jokes' in validate):
self.jokes.addjokes(self.jokes, conn, intCon)
class jokes:
"""docstring for jokes"""
def __init__(self, arg):
super(jokes, self).__init__()
self.arg = arg
def countJokes(jokes):
count = 0
for joke in jokes:
count += 1
return count
def addjokes(self, conn, intCon):
speak("Setting up jokes")
cursor = conn.cursor()
cursor.execute("SELECT * FROM JOKES")
jokes = cursor.fetchall()
sqlite3_insert = "INSERT INTO JOKES (id, joke, answer) VALUES (?, ?, ?)"
idnum = self.countJokes(jokes) + 1
speak("What joke is it?")
joke = recordAudio(intCon).lower()
speak("Okay, what is the answer?")
answer = recordAudio(intCon).lower()
speak("Okay I am going to list it down")
data_tuple = (int(idnum), joke, answer)
cursor.execute(sqlite3_insert, data_tuple)
conn.commit()
speak("Joke added. That was lit.")
Admin.admin(Admin.admin, "none", conn, intCon)
| 22.257576
| 93
| 0.669163
|
14b66d2e6eeb022c640612636d30183177939c4e
| 541
|
py
|
Python
|
polling_stations/apps/data_importers/management/commands/import_calderdale.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 29
|
2015-03-10T08:41:34.000Z
|
2022-01-12T08:51:38.000Z
|
polling_stations/apps/data_importers/management/commands/import_calderdale.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 4,112
|
2015-04-01T21:27:38.000Z
|
2022-03-31T19:22:11.000Z
|
polling_stations/apps/data_importers/management/commands/import_calderdale.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 31
|
2015-03-18T14:52:50.000Z
|
2022-02-24T10:31:07.000Z
|
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "CLD"
addresses_name = "2021-04-16T12:55:22.326313/CMBC Polling Districts.csv"
stations_name = "2021-04-16T12:55:22.326313/CMBC Polling Stations.csv"
elections = ["2021-05-06"]
def address_record_to_dict(self, record):
uprn = record.uprn.strip().lstrip("0")
if uprn == "200001826572":
return None
return super().address_record_to_dict(record)
| 33.8125
| 77
| 0.713494
|
cd8cb68eeb05e8bc7b7b4ffc8d08399a4fd68a68
| 1,104
|
py
|
Python
|
goal_prox/models.py
|
clvrai/goal_prox_il
|
7c809b2ee575a69a14997068db06f3c1f3c8bd08
|
[
"MIT"
] | 4
|
2021-11-17T20:19:34.000Z
|
2022-03-31T04:21:26.000Z
|
goal_prox/models.py
|
clvrai/goal_prox_il
|
7c809b2ee575a69a14997068db06f3c1f3c8bd08
|
[
"MIT"
] | null | null | null |
goal_prox/models.py
|
clvrai/goal_prox_il
|
7c809b2ee575a69a14997068db06f3c1f3c8bd08
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from rlf.rl.model import BaseNet, Flatten, MLPBase
class GwImgEncoder(BaseNet):
"""
Custom image encoder to support the Grid World environment with image
observations (rather than flattened observations). This is important for
large grids.
"""
def __init__(self, obs_shape, hidden_size=64):
super().__init__(False, hidden_size, hidden_size)
# Network architecture inspired by https://github.com/lcswillems/rl-starter-files/blob/master/model.py
n = obs_shape[1]
m = obs_shape[2]
image_embedding_size = ((n - 1) // 2 - 2) * ((m - 1) // 2 - 2) * 64
self.net = nn.Sequential(
nn.Conv2d(obs_shape[0], 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU(),
Flatten(),
nn.Linear(image_embedding_size, hidden_size),
nn.ReLU(),
)
def forward(self, inputs, rnn_hxs, masks):
return self.net(inputs), rnn_hxs
| 31.542857
| 110
| 0.575181
|
4d91e927efd81b30e720f3516f6730132d7c9afa
| 2,589
|
py
|
Python
|
saq/modules/alerts.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 24
|
2019-09-21T21:09:45.000Z
|
2022-03-15T19:48:13.000Z
|
saq/modules/alerts.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 54
|
2019-09-16T20:06:30.000Z
|
2021-08-18T22:22:08.000Z
|
saq/modules/alerts.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 9
|
2019-09-08T13:35:55.000Z
|
2021-01-03T15:23:37.000Z
|
# vim: sw=4:ts=4:et
import fcntl
import gc
import json
import logging
import os
import os.path
import saq
import saq
import saq.database
from saq.analysis import Analysis, Observable
from saq.constants import *
from saq.database import get_db_connection, use_db, ALERT, DatabaseSession
from saq.error import report_exception
from saq.modules import AnalysisModule
from sqlalchemy.orm.exc import NoResultFound
# DEPRECATED
class ACEAlertsAnalysis(Analysis):
"""What other alerts have we seen this in?"""
def initialize_details(self):
self.details = []
@property
def jinja_template_path(self):
return "analysis/related_alerts.html"
def generate_summary(self):
if self.details:
return "Related Alerts Analysis ({0} alerts)".format(len(self.details))
return None
class ACEAlertDispositionAnalyzer(AnalysisModule):
"""Cancels any further analysis if the disposition has been set by the analyst."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_mode = self.config['target_mode']
def execute_pre_analysis(self):
self.check_disposition()
def execute_threaded(self):
self.check_disposition()
@use_db
def check_disposition(self, db, c):
c.execute("SELECT disposition FROM alerts WHERE uuid = %s", (self.root.uuid,))
row = c.fetchone()
# did the alert vanish from the database?
if row is None:
logging.warning("alert {} seems to have vanished from the database".format(self.root.uuid))
self.engine.cancel_analysis()
disposition = row[0]
if disposition is not None:
logging.info("alert {} has been dispositioned - canceling analysis".format(self.root.uuid))
self.engine.cancel_analysis()
class ACEDetectionAnalyzer(AnalysisModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_mode = self.config['target_mode']
def execute_post_analysis(self):
# do not alert on a root that has been whitelisted
if not saq.FORCED_ALERTS and self.root.whitelisted:
logging.debug("{} has been whitelisted".format(self.root))
return True
if saq.FORCED_ALERTS or self.root.has_detections():
logging.info("{} has {} detection points - changing mode to {}".format(
self.root, len(self.root.all_detection_points), self.target_mode))
self.root.analysis_mode = self.target_mode
return True
| 31.962963
| 103
| 0.675937
|
b3e62fc8f8fbf158640d5856fba004f8390e6d5a
| 236
|
py
|
Python
|
wagtail_adminsortable/models.py
|
Lh4cKg/wagtail-admin-sortable
|
4a48e855773913eb3ee74b6f9479bc126d6c4ec4
|
[
"MIT"
] | 4
|
2019-11-20T19:02:49.000Z
|
2020-11-12T13:10:39.000Z
|
wagtail_adminsortable/models.py
|
Lh4cKg/wagtail-admin-sortable
|
4a48e855773913eb3ee74b6f9479bc126d6c4ec4
|
[
"MIT"
] | null | null | null |
wagtail_adminsortable/models.py
|
Lh4cKg/wagtail-admin-sortable
|
4a48e855773913eb3ee74b6f9479bc126d6c4ec4
|
[
"MIT"
] | 4
|
2021-03-22T08:05:32.000Z
|
2022-03-10T17:18:08.000Z
|
from django.db import models
class AdminSortable(models.Model):
order = models.IntegerField(null=True, blank=True, editable=False)
sortable_field = 'order'
class Meta:
abstract = True
ordering = ['order']
| 21.454545
| 70
| 0.673729
|
a533c2eff86901d3c745879949d1f6451e8ec84a
| 5,208
|
py
|
Python
|
Inference_Pretrained/models.py
|
majedelhelou/BUIFD
|
58ee380546e120195ce02f6e0829dd2e0f2667cf
|
[
"MIT"
] | 11
|
2020-04-26T22:27:22.000Z
|
2021-11-02T14:40:13.000Z
|
Inference_Pretrained/models.py
|
IVRL/BUIFD
|
167bc0319a290ada91f4b991c09d05449dc0685d
|
[
"MIT"
] | 1
|
2021-05-09T12:46:05.000Z
|
2021-05-24T16:35:25.000Z
|
Inference_Pretrained/models.py
|
majedelhelou/BUIFD
|
58ee380546e120195ce02f6e0829dd2e0f2667cf
|
[
"MIT"
] | 5
|
2020-05-02T13:40:14.000Z
|
2021-06-09T16:29:46.000Z
|
import torch
import torch.nn as nn
import os
class DnCNN_RL(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(DnCNN_RL, self).__init__()
self.dncnn = DnCNN(channels=channels, num_of_layers=num_of_layers)
def forward(self, x):
noise = self.dncnn(x)
return noise
class BUIFD(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(BUIFD, self).__init__()
self.dncnn = DnCNN(channels=channels, num_of_layers=num_of_layers)
self.noisecnn = NoiseCNN(channels=channels)
self.FinalFusionLayers = FinalFusionLayers(channels=channels)
def forward(self, x):
noisy_input = x
# PRIOR:
noise = self.dncnn(x)
prior = noisy_input - noise
# NOISE LVL:
noise_level = self.noisecnn(x)
# FUSION:
denoised_image = self.FinalFusionLayers(noisy_input, prior, noise_level)
noise_out = noisy_input - denoised_image
return noise_out, noise_level
class DnCNN(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(DnCNN, self).__init__()
kernel_size = 3
padding = 1
features = 64
layers = []
layers.append(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_of_layers-2):
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.BatchNorm2d(features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False))
self.dncnn = nn.Sequential(*layers)
def forward(self, x):
out = self.dncnn(x)
return out
class NoiseCNN(nn.Module):
def __init__(self, channels, num_of_layers=5):
super(NoiseCNN, self).__init__()
kernel_size = 5
padding = 2
features = 64
layers = []
layers.append(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_of_layers):
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.BatchNorm2d(features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False))
self.noisecnn = nn.Sequential(*layers)
self.sigmoid_mapping = nn.Sigmoid()
def forward(self, x):
noise_level = self.noisecnn(x)
noise_level = self.sigmoid_mapping(noise_level)
return noise_level
class FinalFusionLayers(nn.Module):
def __init__(self, channels):
super(FinalFusionLayers, self).__init__()
kernel_size = 3
padding = 1
features = 16
dilation = 1
layers = []
layers.append(nn.Conv2d(in_channels=5*channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False, dilation=dilation))
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False, dilation=dilation))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False, dilation=dilation))
self.fusion_layers = nn.Sequential(*layers)
def forward(self, a, b, c):
noisy_input = a
prior = b
noise_level = c
channel_0 = noisy_input
channel_1 = prior
channel_2 = noise_level
channel_3 = noisy_input * (1-noise_level)
channel_4 = prior * noise_level
x = torch.cat((channel_0, channel_1, channel_2, channel_3, channel_4), 1)
fused_out = self.fusion_layers(x)
return fused_out
class BUIFD_name2(nn.Module):
# this class is the same as BUIFD
# the only difference is the variable name FusionLayers instead of FinalFusionLayers
# this is fixed in the training codes, but not in the paper's pretrained model's naming
# we only keep it here to reuse the exact same pretrained models as in the paper
def __init__(self, channels, num_of_layers=17):
super(BUIFD_name2, self).__init__()
self.dncnn = DnCNN(channels=channels, num_of_layers=num_of_layers)
self.noisecnn = NoiseCNN(channels=channels)
self.FusionLayers = FinalFusionLayers(channels=channels)
def forward(self, x):
noisy_input = x
# PRIOR:
noise = self.dncnn(x)
prior = noisy_input - noise
# NOISE LVL:
noise_level = self.noisecnn(x)
# FUSION:
denoised_image = self.FusionLayers(noisy_input, prior, noise_level)
noise_out = noisy_input - denoised_image
return noise_out, noise_level
| 32.962025
| 152
| 0.660714
|
48847f2669865b79b9258a63732c27c179141462
| 1,575
|
py
|
Python
|
234_palindromeLinkedList.py
|
stuti-rastogi/leetcode-python-solutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | 4
|
2018-07-24T08:36:42.000Z
|
2019-08-25T17:48:47.000Z
|
234_palindromeLinkedList.py
|
stuti-rastogi/leetcodesolutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | null | null | null |
234_palindromeLinkedList.py
|
stuti-rastogi/leetcodesolutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
# stack = []
# if (not head):
# return True
# curr = head
# while (curr):
# stack.append(curr.val)
# curr = curr.next
# curr = head
# while (curr):
# if (curr.val != stack.pop()):
# return False
# curr = curr.next
# return True
# O(1) space solution
if not head or not head.next:
return True
curr = head
count = 1
# length of list
while curr.next:
curr = curr.next
count = count + 1
# reversing first half of list
p = head
curr = head
half = count / 2
while half > 0:
tmp = p.next
if p != head:
p.next = curr
else:
p.next = None
curr = p
p = tmp
half -= 1
# pointer to beginning of second half
if count % 2 == 0:
secondHalf = p
else:
secondHalf = p.next
# curr was last element of first half
p = curr
while p:
if p.val != secondHalf.val:
return False
p = p.next
secondHalf = secondHalf.next
return True
| 24.230769
| 45
| 0.434921
|
85962af2028761d8f8f20279df4c5d0fda3c6d5a
| 3,020
|
py
|
Python
|
test/Auth_test.py
|
narhen/sensordata-api
|
6941c59c8d8e16b372f866f1f5c6946cdee5d871
|
[
"MIT"
] | null | null | null |
test/Auth_test.py
|
narhen/sensordata-api
|
6941c59c8d8e16b372f866f1f5c6946cdee5d871
|
[
"MIT"
] | null | null | null |
test/Auth_test.py
|
narhen/sensordata-api
|
6941c59c8d8e16b372f866f1f5c6946cdee5d871
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from flask.ext.bcrypt import Bcrypt
from common.db import Storage
import mock
import unittest
from auth import Auth, AuthenticatedUser
class AuthTestCase(unittest.TestCase):
def test_authenticate_success(self):
username, password = "user", "pass"
mock_storage = mock.create_autospec(Storage)
mock_bcrypt = mock.create_autospec(Bcrypt)
auth = Auth(mock_storage, mock_bcrypt)
db_result = {"id": 1, "name": "Name", "password": "pass"}
mock_storage.get_user_by_name_with_password.return_value = db_result
mock_bcrypt.check_password_hash.return_value = True
result = auth.authenticate(username, password)
mock_storage.get_user_by_name_with_password.assert_called_with(username)
mock_bcrypt.check_password_hash.assert_called_with(db_result["password"], password)
self.assertIsInstance(result, AuthenticatedUser)
def test_authenticate_failed_db_query(self):
username, password = "user", "pass"
mock_storage = mock.create_autospec(Storage)
mock_bcrypt = mock.create_autospec(Bcrypt)
auth = Auth(mock_storage, mock_bcrypt)
mock_storage.get_user_by_name_with_password.return_value = None
self.assertIsNone(auth.authenticate(username, password))
def test_authenticate_wrong_password(self):
username, password = "user", "incorrect password"
mock_storage = mock.create_autospec(Storage)
mock_bcrypt = mock.create_autospec(Bcrypt)
auth = Auth(mock_storage, mock_bcrypt)
db_result = {"id": 1, "name": "Name", "password": "correct password"}
mock_storage.get_user_by_name_with_password.return_value = db_result
mock_bcrypt.check_password_hash.return_value = False
result = auth.authenticate(username, password)
mock_storage.get_user_by_name_with_password.assert_called_with(username)
mock_bcrypt.check_password_hash.assert_called_with(db_result["password"], password)
self.assertIsNone(result)
def test_identity_success(self):
username, password = "user", "incorrect password"
mock_storage = mock.create_autospec(Storage)
mock_bcrypt = mock.create_autospec(Bcrypt)
auth = Auth(mock_storage, mock_bcrypt)
db_result = {"id": 1, "name": "Name"}
mock_storage.get_user_by_id.return_value = db_result
result = auth.identity({"identity": 1})
mock_storage.get_user_by_id.assert_called_with(1)
self.assertIsInstance(result, AuthenticatedUser)
def test_identity_failed_db_query(self):
username, password = "user", "incorrect password"
mock_storage = mock.create_autospec(Storage)
mock_bcrypt = mock.create_autospec(Bcrypt)
auth = Auth(mock_storage, mock_bcrypt)
mock_storage.get_user_by_id.return_value = None
result = auth.identity({"identity": 1})
mock_storage.get_user_by_id.assert_called_with(1)
self.assertIsNone(result)
| 39.736842
| 91
| 0.715894
|
61c3c93db87ee6ed0ada07a61363630fd29facf4
| 6,971
|
py
|
Python
|
kubernetes/client/models/v1beta1_daemon_set_list.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 1
|
2019-10-07T13:54:36.000Z
|
2019-10-07T13:54:36.000Z
|
kubernetes/client/models/v1beta1_daemon_set_list.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 8
|
2020-10-28T01:18:36.000Z
|
2021-06-11T01:06:15.000Z
|
kubernetes/client/models/v1beta1_daemon_set_list.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 1
|
2021-03-16T16:05:33.000Z
|
2021-03-16T16:05:33.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1DaemonSetList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1DaemonSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DaemonSetList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1DaemonSetList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1DaemonSetList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1DaemonSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1DaemonSetList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1DaemonSetList. # noqa: E501
A list of daemon sets. # noqa: E501
:return: The items of this V1beta1DaemonSetList. # noqa: E501
:rtype: list[V1beta1DaemonSet]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1DaemonSetList.
A list of daemon sets. # noqa: E501
:param items: The items of this V1beta1DaemonSetList. # noqa: E501
:type: list[V1beta1DaemonSet]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1DaemonSetList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1DaemonSetList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1DaemonSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1DaemonSetList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1DaemonSetList. # noqa: E501
:return: The metadata of this V1beta1DaemonSetList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1DaemonSetList.
:param metadata: The metadata of this V1beta1DaemonSetList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DaemonSetList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DaemonSetList):
return True
return self.to_dict() != other.to_dict()
| 33.839806
| 312
| 0.628174
|
35e272419ca3d9cdb054c4ca3dc2574415ebc83c
| 6,607
|
py
|
Python
|
py3status/modules/spotify.py
|
joernnilsson/py3status
|
a5f5e9663fd510aacd017256728bb06c3b0b6abe
|
[
"BSD-3-Clause"
] | null | null | null |
py3status/modules/spotify.py
|
joernnilsson/py3status
|
a5f5e9663fd510aacd017256728bb06c3b0b6abe
|
[
"BSD-3-Clause"
] | null | null | null |
py3status/modules/spotify.py
|
joernnilsson/py3status
|
a5f5e9663fd510aacd017256728bb06c3b0b6abe
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Display song currently playing in Spotify.
Configuration parameters:
button_next: button to switch to next song (default None)
button_play_pause: button to toggle play/pause (default None)
button_previous: button to switch to previous song (default None)
cache_timeout: how often to update the bar (default 5)
format: see placeholders below (default '{artist} : {title}')
format_down: define output if spotify is not running
(default 'Spotify not running')
format_stopped: define output if spotify is not playing
(default 'Spotify stopped')
sanitize_titles: whether to remove meta data from album/track title
(default True)
sanitize_words: which meta data to remove
*(default ['bonus', 'demo', 'edit', 'explicit', 'extended',
'feat', 'mono', 'remaster', 'stereo', 'version'])*
Format placeholders:
{album} album name
{artist} artiste name (first one)
{time} time duration of the song
{title} name of the song
Color options:
color_offline: Spotify is not running, defaults to color_bad
color_paused: Song is stopped or paused, defaults to color_degraded
color_playing: Song is playing, defaults to color_good
Requires:
spotify: a proprietary music streaming service
Examples:
```
spotify {
button_next = 4
button_play_pause = 1
button_previous = 5
format = "{title} by {artist} -> {time}"
format_down = "no Spotify"
}
```
@author Pierre Guilbert, Jimmy Garpehäll, sondrele, Andrwe
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': 'Rick Astley : Never Gonna Give You Up'}
paused
{'color': '#FFFF00', 'full_text': 'Rick Astley : Never Gonna Give You Up'}
stopped
{'color': '#FF0000', 'full_text': 'Spotify stopped'}
"""
import dbus
import re
from datetime import timedelta
from time import sleep
SPOTIFY_CMD = """dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify
/org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.{cmd}"""
class Py3status:
"""
"""
# available configuration parameters
button_next = None
button_play_pause = None
button_previous = None
cache_timeout = 5
format = "{artist} : {title}"
format_down = "Spotify not running"
format_stopped = "Spotify stopped"
sanitize_titles = True
sanitize_words = [
"bonus",
"demo",
"edit",
"explicit",
"extended",
"feat",
"mono",
"remaster",
"stereo",
"version",
]
def _spotify_cmd(self, action):
return SPOTIFY_CMD.format(cmd=action)
def post_config_hook(self):
"""
"""
# Match string after hyphen, comma, semicolon or slash containing any metadata word
# examples:
# - Remastered 2012
# / Radio Edit
# ; Remastered
self.after_delimiter = self._compile_re(
r"([\-,;/])([^\-,;/])*(META_WORDS_HERE).*"
)
# Match brackets with their content containing any metadata word
# examples:
# (Remastered 2017)
# [Single]
# (Bonus Track)
self.inside_brackets = self._compile_re(
r"([\(\[][^)\]]*?(META_WORDS_HERE)[^)\]]*?[\)\]])"
)
def _compile_re(self, expression):
"""
Compile given regular expression for current sanitize words
"""
meta_words = "|".join(self.sanitize_words)
expression = expression.replace("META_WORDS_HERE", meta_words)
return re.compile(expression, re.IGNORECASE)
def _get_text(self):
"""
Get the current song metadatas (artist - title)
"""
bus = dbus.SessionBus()
try:
self.__bus = bus.get_object(
"org.mpris.MediaPlayer2.spotify", "/org/mpris/MediaPlayer2"
)
self.player = dbus.Interface(self.__bus, "org.freedesktop.DBus.Properties")
try:
metadata = self.player.Get("org.mpris.MediaPlayer2.Player", "Metadata")
album = metadata.get("xesam:album")
artist = metadata.get("xesam:artist")[0]
microtime = metadata.get("mpris:length")
rtime = str(timedelta(microseconds=microtime))[:-7]
title = metadata.get("xesam:title")
if self.sanitize_titles:
album = self._sanitize_title(album)
title = self._sanitize_title(title)
playback_status = self.player.Get(
"org.mpris.MediaPlayer2.Player", "PlaybackStatus"
)
if playback_status.strip() == "Playing":
color = self.py3.COLOR_PLAYING or self.py3.COLOR_GOOD
else:
color = self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED
except Exception:
return (
self.format_stopped,
self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED,
)
return (
self.py3.safe_format(
self.format,
dict(title=title, artist=artist, album=album, time=rtime),
),
color,
)
except Exception:
return (self.format_down, self.py3.COLOR_OFFLINE or self.py3.COLOR_BAD)
def _sanitize_title(self, title):
"""
Remove redunant meta data from title and return it
"""
title = re.sub(self.inside_brackets, "", title)
title = re.sub(self.after_delimiter, "", title)
return title.strip()
def spotify(self):
"""
Get the current "artist - title" and return it.
"""
(text, color) = self._get_text()
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"color": color,
"full_text": text,
}
return response
def on_click(self, event):
"""
"""
button = event["button"]
if button == self.button_play_pause:
self.py3.command_run(self._spotify_cmd("PlayPause"))
sleep(0.1)
elif button == self.button_next:
self.py3.command_run(self._spotify_cmd("Next"))
sleep(0.1)
elif button == self.button_previous:
self.py3.command_run(self._spotify_cmd("Previous"))
sleep(0.1)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 31.018779
| 91
| 0.584683
|
86e5c6ec7159d9946594bec8ef433899edf5a1da
| 16,671
|
py
|
Python
|
methlab/shop/models.py
|
LDO-CERT/methlab
|
cf6f30cd1c47727a596ebafba2b1213808bd552c
|
[
"MIT"
] | 4
|
2021-03-17T10:09:31.000Z
|
2021-12-20T22:50:59.000Z
|
methlab/shop/models.py
|
LDO-CERT/methlab
|
cf6f30cd1c47727a596ebafba2b1213808bd552c
|
[
"MIT"
] | 5
|
2021-03-19T13:54:43.000Z
|
2021-10-01T20:21:03.000Z
|
methlab/shop/models.py
|
LDO-CERT/methlab
|
cf6f30cd1c47727a596ebafba2b1213808bd552c
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.db.models import Q
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields.related import ForeignKey
from django.template.defaultfilters import truncatechars
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
# CUSTOM FIELDS
from djgeojson.fields import PointField
from colorfield.fields import ColorField
from django_better_admin_arrayfield.models.fields import ArrayField
# TAGS
from taggit.managers import TaggableManager
from taggit.models import TagBase, GenericTaggedItemBase
# POSTGRES SWEETERS
import django.contrib.postgres.search as pg_search
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import (
SearchQuery,
SearchRank,
SearchVector,
TrigramSimilarity,
)
RESPONSE = (
(0, "Unknown"),
(1, "SPAM"),
(2, "HAM"),
(3, "Phishing"),
(4, "Social Engineering"),
(5, "Reconnaissance"),
(6, "BlackMail"),
(7, "CEO SCAM"),
(10, "Safe"),
)
TAXONOMIES = (
(0, "none"),
(1, "info"),
(2, "safe"),
(3, "suspicious"),
(4, "malicious"),
)
class InternalInfo(models.Model):
name = models.CharField(max_length=200)
imap_server = models.CharField(max_length=200)
imap_username = models.CharField(max_length=200)
imap_password = models.CharField(max_length=200)
imap_folder = models.CharField(max_length=200)
cortex_url = models.CharField(max_length=200)
cortex_api = models.CharField(max_length=200)
misp_url = models.CharField(max_length=200, blank=True, null=True)
misp_api = models.CharField(max_length=200, blank=True, null=True)
vip_list = ArrayField(models.CharField(max_length=100), blank=True, null=True)
vip_domain = models.CharField(max_length=200)
mimetype_whitelist = ArrayField(
models.CharField(max_length=100), blank=True, null=True
)
security_emails = ArrayField(models.EmailField(), blank=True, null=True)
honeypot_emails = ArrayField(
models.CharField(max_length=200), blank=True, null=True
)
internal_domains = ArrayField(
models.CharField(max_length=100), blank=True, null=True
)
http_proxy = models.CharField(max_length=200, blank=True, null=True)
https_proxy = models.CharField(max_length=200, blank=True, null=True)
cortex_expiration_days = models.IntegerField(default=30)
whois_expiration_days = models.IntegerField(default=30)
def __str__(self):
return self.name
class Analyzer(models.Model):
PRIORITY = (
(1, "Low"),
(2, "Medium"),
(3, "High"),
)
name = models.CharField(max_length=200, blank=True, null=True)
disabled = models.BooleanField(default=False)
supported_types = ArrayField(models.CharField(max_length=10), blank=True, null=True)
priority = models.PositiveIntegerField(choices=PRIORITY, default=1)
onpremise = models.BooleanField(default=False)
def __str__(self):
return self.name
class Report(models.Model):
response = models.JSONField(blank=True, null=True)
analyzer = models.ForeignKey(
Analyzer, on_delete=models.CASCADE, blank=True, null=True
)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
taxonomies = ArrayField(models.CharField(max_length=50), blank=True, null=True)
success = models.BooleanField(default=False)
date = models.DateField(auto_now_add=True)
class Whois(models.Model):
response = models.JSONField(blank=True, null=True)
date = models.DateField(auto_now_add=True)
def __str__(self):
return self.response
class Whitelist(models.Model):
WL_TYPE = (
("address", "address"),
("domain", "domain"),
("url", "url"),
("ip", "ip"),
("md5", "md5"),
("sha256", "sha256"),
)
value = models.CharField(max_length=1000)
type = models.CharField(max_length=8, choices=WL_TYPE)
class Meta:
constraints = [
models.UniqueConstraint(fields=["value", "type"], name="duplicated_wl")
]
def __str__(self):
return "[{}] {}".format(self.type, self.value)
class Flag(TagBase):
color = ColorField(default="#30357B")
visible = models.BooleanField(default=True)
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
def __str__(self):
return self.name
class CustomTag(GenericTaggedItemBase):
tag = models.ForeignKey(
Flag, related_name="%(app_label)s_%(class)ss", on_delete=models.CASCADE
)
class Attachment(models.Model):
binary = models.BooleanField()
charset = models.CharField(max_length=500, blank=True, null=True)
content_transfer_encoding = models.CharField(max_length=500, blank=True, null=True)
content_disposition = models.TextField(blank=True, null=True)
content_id = models.CharField(max_length=500, blank=True, null=True)
filename = ArrayField(models.CharField(max_length=500), blank=True, null=True)
filepath = models.CharField(max_length=500, blank=True, null=True)
mail_content_type = models.CharField(max_length=500, blank=True, null=True)
md5 = models.CharField(max_length=32, blank=True, null=True, unique=True)
sha1 = models.CharField(max_length=40, blank=True, null=True, unique=True)
sha256 = models.CharField(max_length=64, blank=True, null=True, unique=True)
reports = GenericRelation(Report, related_name="attachments")
tags = TaggableManager(through=CustomTag, blank=True)
whitelisted = models.BooleanField(default=False)
taxonomy = models.IntegerField(default=0, choices=TAXONOMIES)
def __str__(self):
return (
"{} {}".format(self.filename, self.md5)
if self.filename
else "{}".format(self.md5)
)
class Address(models.Model):
name = ArrayField(models.CharField(max_length=500), blank=True, null=True)
address = models.EmailField(unique=True)
domain = models.CharField(max_length=500)
mx_check = models.TextField(blank=True, null=True)
reports = GenericRelation(Report, related_name="addresses")
tags = TaggableManager(through=CustomTag, blank=True)
taxonomy = models.IntegerField(default=0, choices=TAXONOMIES)
class Meta:
verbose_name_plural = "addresses"
def __str__(self):
return self.address if self.address else ""
class Domain(models.Model):
domain = models.CharField(max_length=200)
dig = models.TextField(blank=True, null=True)
whois = ForeignKey(
Whois, related_name="domain", on_delete=models.CASCADE, null=True, blank=True
)
reports = GenericRelation(Report, related_name="domains")
tags = TaggableManager(through=CustomTag, blank=True)
whitelisted = models.BooleanField(default=False)
taxonomy = models.IntegerField(default=0, choices=TAXONOMIES)
def __str__(self):
return self.domain
class Ip(models.Model):
ip = models.GenericIPAddressField()
whois = ForeignKey(
Whois, related_name="ip", on_delete=models.CASCADE, null=True, blank=True
)
reports = GenericRelation(Report, related_name="ips")
tags = TaggableManager(through=CustomTag, blank=True)
whitelisted = models.BooleanField(default=False)
taxonomy = models.IntegerField(default=0, choices=TAXONOMIES)
def __str__(self):
return "{}".format(self.ip)
class Url(models.Model):
url = models.CharField(max_length=2000)
domain = models.ForeignKey(Domain, on_delete=models.CASCADE, null=True, blank=True)
reports = GenericRelation(Report, related_name="urls")
tags = TaggableManager(through=CustomTag, blank=True)
whitelisted = models.BooleanField(default=False)
taxonomy = models.IntegerField(default=0, choices=TAXONOMIES)
def __str__(self):
return self.url
class MailManager(models.Manager):
def get_queryset(self):
return (
super()
.get_queryset()
.exclude(tags__name__in=["SecInc"])
.exclude(subject__isnull=True)
.exclude(subject="")
)
def search(self, search_text):
search_vectors = (
SearchVector("text_plain", weight="A", config="english")
+ SearchVector("text_html", weight="A", config="english")
+ SearchVector("subject", weight="B", config="english")
)
search_query = SearchQuery(search_text)
search_rank = SearchRank(search_vectors, search_query)
subject_tr_si = TrigramSimilarity("subject", search_text)
qs = (
self.get_queryset()
.filter(search_vector=search_query)
.annotate(
rank=search_rank,
similarity=subject_tr_si,
)
.order_by("-rank")
)
return qs
class Mail(models.Model):
PROGRESS = (
(0, "new"),
(1, "processing"),
(2, "done"),
)
# WORKFLOW
progress = models.PositiveIntegerField(choices=PROGRESS, default=0)
official_response = models.PositiveIntegerField(choices=RESPONSE, default=0)
assignee = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, null=True, blank=True
)
# SUBMISSION INFO
parent = models.ForeignKey("self", blank=True, null=True, on_delete=models.CASCADE)
# to sort by submission_date :)
submission_date = models.DateTimeField(blank=True, null=True)
# MAIL INFO
message_id = models.CharField(max_length=1000)
subject = models.CharField(max_length=1000)
slug_subject = models.SlugField(max_length=1000, editable=False, default="")
date = models.DateTimeField(blank=True, null=True)
addresses = models.ManyToManyField(
Address, related_name="addresses", through="Mail_Addresses"
)
received = models.JSONField(blank=True, null=True)
headers = models.JSONField(blank=True, null=True)
text_plain = ArrayField(
models.TextField(blank=True, null=True), blank=True, null=True
)
text_html = ArrayField(
models.TextField(blank=True, null=True), blank=True, null=True
)
text_not_managed = ArrayField(
models.TextField(blank=True, null=True), blank=True, null=True
)
sender_ip_address = models.CharField(max_length=50, blank=True, null=True)
to_domains = ArrayField(models.CharField(max_length=500), blank=True, null=True)
# ADDITIONAL FIELDS
geom = PointField(blank=True, null=True)
dmark = models.TextField(blank=True, null=True)
dkim = models.TextField(blank=True, null=True)
spf = models.TextField(blank=True, null=True)
arc = models.JSONField(blank=True, null=True)
# IOC
ips = models.ManyToManyField(Ip, related_name="ips")
urls = models.ManyToManyField(Url, related_name="urls")
attachments = models.ManyToManyField(Attachment, related_name="attachments")
# TAGS
tags = TaggableManager(through=CustomTag, blank=True)
# STORAGE INFO
eml_path = models.CharField(max_length=500, blank=True, null=True)
attachments_path = models.CharField(max_length=500, blank=True, null=True)
# ATTACHED REPORT
reports = GenericRelation(Report, related_name="mails")
taxonomy = models.IntegerField(default=0, choices=TAXONOMIES)
# SEARCH FIELD
search_vector = pg_search.SearchVectorField(null=True)
objects = models.Manager()
external_objects = MailManager()
# Update search vectors works only in update
def save(self, *args, **kwargs):
if self._state.adding is False:
self.search_vector = (
SearchVector("text_plain", weight="A", config="english")
+ SearchVector("text_html", weight="A", config="english")
+ SearchVector("subject", weight="B", config="english")
)
self.slug_subject = slugify(self.subject, allow_unicode=True)
super().save(*args, **kwargs)
class Meta:
indexes = [GinIndex(fields=["search_vector"])]
@property
def sender(self):
sender = next(iter(self.mail_addresses_set.from_addresses()), None)
if sender:
flags = Flag.objects.all()
suspicious_tags = [x for x in flags if x.name.find("suspicious") != -1]
malicious_tags = [x for x in flags if x.name.find("malicious") != -1]
return [
sender.address.tags.filter(name__in=suspicious_tags).count(),
sender.address.tags.filter(name__in=malicious_tags).count(),
sender.address,
]
return None
@property
def receivers(self):
try:
info = InternalInfo.objects.first()
except:
info = None
recvs = [
x.address.address
for x in self.mail_addresses_set.all()
if x.field in ["to", "cc", "bcc"]
]
cleaned_recvs = []
if info and info.internal_domains:
for x in recvs:
if "@{}".format(x.split("@")[1]) in info.internal_domains:
cleaned_recvs.append(x.split("@")[0])
else:
cleaned_recvs.append(x)
else:
cleaned_recvs = recvs
return [", ".join(recvs), ", ".join(cleaned_recvs)]
@property
def tos(self):
return self.mail_addresses_set.to_addresses()
@property
def ccs(self):
return self.mail_addresses_set.cc_addresses()
@property
def bccs(self):
return self.mail_addresses_set.bcc_addresses()
@property
def reply(self):
return self.mail_addresses_set.reply_to_addresses()
@property
def short_id(self):
return truncatechars(self.message_id, 15)
@property
def short_subject(self):
return truncatechars(self.subject, 80)
@property
def tag_list(self):
return u", ".join(x.name for x in self.tags.all())
@property
def count_iocs(self):
return self.ips.count() + self.urls.count() + self.attachments.count()
@property
def render_iocs(self):
ips = self.ips.all()
ips_level = max([ip.taxonomy for ip in ips] + [0])
urls = self.urls.all()
urls_level = max([url.taxonomy for url in urls] + [0])
attachments = self.attachments.all()
attachments_level = max(
[attachment.taxonomy for attachment in attachments] + [0]
)
ioc_class = {
0: "bg-light text-dark",
1: "bg-light text-dark",
2: "bg-success",
3: "bg-warning text-dark",
4: "bg-danger",
}
return [
ioc_class[ips_level],
ips.count(),
ioc_class[urls_level],
urls.count(),
ioc_class[attachments_level],
attachments.count(),
]
def __str__(self):
return truncatechars(self.subject, 80) if self.subject else ""
class AddressQueryset(models.QuerySet):
def from_addresses(self):
return self.filter(field="from")
def to_addresses(self):
return self.filter(field="to")
def bcc_addresses(self):
return self.filter(field="bcc")
def cc_addresses(self):
return self.filter(field="cc")
def reply_to_addresses(self):
return self.filter(field="reply_to")
class AddressManager(models.Manager):
def get_queryset(self):
return AddressQueryset(self.model, using=self._db)
def from_addresses(self):
return self.get_queryset().from_addresses()
def to_addresses(self):
return self.get_queryset().to_addresses()
def bcc_addresses(self):
return self.get_queryset().bcc_addresses()
def cc_addresses(self):
return self.get_queryset().cc_addresses()
def reply_to_addresses(self):
return self.get_queryset().reply_to_addresses()
class Mail_Addresses(models.Model):
FIELDS = (
("from", "from"),
("to", "to"),
("bcc", "bcc"),
("cc", "cc"),
("reply_to", "reply_to"),
)
mail = models.ForeignKey(Mail, on_delete=models.CASCADE)
address = models.ForeignKey(Address, on_delete=models.CASCADE)
field = models.CharField(max_length=10, choices=FIELDS)
objects = AddressManager()
def __str__(self):
return "{}".format(self.address.address)
| 31.936782
| 88
| 0.658029
|
d052213245c7080100fd281bfab462530319295c
| 14,101
|
py
|
Python
|
robocorp-python-ls-core/src/robocorp_ls_core/workspace.py
|
anton264/robotframework-lsp
|
6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robocorp-python-ls-core/src/robocorp_ls_core/workspace.py
|
anton264/robotframework-lsp
|
6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robocorp-python-ls-core/src/robocorp_ls_core/workspace.py
|
anton264/robotframework-lsp
|
6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Original work Copyright 2017 Palantir Technologies, Inc. (MIT)
# Original work Copyright 2020 Open Law Library. (Apache 2)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from typing import Optional, Dict, List
import robocorp_ls_core # noqa -- for typing.
from robocorp_ls_core import uris
from robocorp_ls_core.basic import implements
from robocorp_ls_core.protocols import IWorkspace, IDocument, IDocumentSelection
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.uris import uri_scheme, to_fs_path
import threading
log = get_logger(__name__)
class Workspace(object):
"""
Note: only a single thread can mutate the workspace, but multiple threads
may read from it.
"""
def __init__(self, root_uri, workspace_folders=None) -> None:
from robocorp_ls_core.lsp import WorkspaceFolder
self._main_thread = threading.current_thread()
self._root_uri = root_uri
self._root_uri_scheme = uri_scheme(self._root_uri)
self._root_path = to_fs_path(self._root_uri)
self._folders: Dict[str, WorkspaceFolder] = {}
# Contains the docs with files considered open.
self._docs: Dict[str, IDocument] = {}
# Contains the docs pointing to the filesystem.
self._filesystem_docs: Dict[str, IDocument] = {}
if workspace_folders is not None:
for folder in workspace_folders:
self.add_folder(folder)
if root_uri and root_uri not in self._folders:
as_fs_path = uris.to_fs_path(root_uri)
name = os.path.basename(as_fs_path)
self.add_folder(WorkspaceFolder(root_uri, name))
def _check_in_mutate_thread(self):
curr_thread = threading.current_thread()
if self._main_thread is not curr_thread:
raise AssertionError(
f"Mutating the workspace can only be done at the thread: {self._main_thread}. Current thread: {curr_thread}"
)
def _create_document(self, doc_uri, source=None, version=None):
return Document(doc_uri, source=source, version=version)
def add_folder(self, folder):
"""
:param WorkspaceFolder folder:
"""
self._check_in_mutate_thread()
folders = self._folders.copy()
folders[folder.uri] = folder
self._folders = folders
def remove_folder(self, folder_uri):
self._check_in_mutate_thread()
folders = self._folders.copy()
folders.pop(folder_uri, None)
self._folders = folders
@implements(IWorkspace.iter_documents)
def iter_documents(self):
self._check_in_mutate_thread() # i.e.: we don't really mutate here, but this is not thread safe.
return self._docs.values()
@implements(IWorkspace.iter_folders)
def iter_folders(self):
return (
self._folders.values()
) # Ok, thread-safe (folders are always set as a whole)
@implements(IWorkspace.get_folder_paths)
def get_folder_paths(self) -> List[str]:
folders = self._folders # Ok, thread-safe (folders are always set as a whole)
return [uris.to_fs_path(ws_folder.uri) for ws_folder in folders.values()]
@implements(IWorkspace.get_document)
def get_document(self, doc_uri: str, accept_from_file: bool) -> Optional[IDocument]:
# Ok, thread-safe (does not mutate the _docs dict -- contents in the _filesystem_docs
# may end up stale or we may have multiple loads when we wouldn't need,
# but that should be ok).
doc = self._docs.get(doc_uri)
if doc is not None:
return doc
if accept_from_file:
doc = self._filesystem_docs.get(doc_uri)
if doc is not None:
if not doc.is_source_in_sync():
self._filesystem_docs.pop(doc_uri, None)
doc = None
if doc is None:
doc = self._create_document(doc_uri)
try:
_source = doc.source # Force loading current contents
except:
# Unable to load contents: file does not exist.
doc = None
else:
self._filesystem_docs[doc_uri] = doc
return doc
def is_local(self):
# Thread-safe (only accesses immutable data).
return (
self._root_uri_scheme == "" or self._root_uri_scheme == "file"
) and os.path.exists(self._root_path)
@implements(IWorkspace.put_document)
def put_document(
self, text_document: "robocorp_ls_core.lsp.TextDocumentItem"
) -> IDocument:
self._check_in_mutate_thread()
doc_uri = text_document.uri
doc = self._docs[doc_uri] = self._create_document(
doc_uri, source=text_document.text, version=text_document.version
)
try:
# In case the initial text wasn't passed, try to load it from source.
# If it doesn't work, set the initial source as empty.
_source = doc.source
except:
doc.source = ""
self._filesystem_docs.pop(doc_uri, None)
return doc
@implements(IWorkspace.remove_document)
def remove_document(self, uri: str) -> None:
self._check_in_mutate_thread()
self._docs.pop(uri, None)
@property
def root_path(self):
# Thread-safe (only accesses immutable data).
return self._root_path
@property
def root_uri(self):
# Thread-safe (only accesses immutable data).
return self._root_uri
def update_document(self, text_doc, change):
"""
:param TextDocumentItem text_doc:
:param TextDocumentContentChangeEvent change:
"""
self._check_in_mutate_thread()
doc_uri = text_doc["uri"]
doc = self._docs[doc_uri]
# Note: don't mutate an existing doc, always create a new one based on it
# (so, existing references won't have racing conditions).
new_doc = self._create_document(doc_uri, doc.source, text_doc["version"])
new_doc.apply_change(change)
self._docs[doc_uri] = new_doc
def __typecheckself__(self) -> None:
from robocorp_ls_core.protocols import check_implements
_: IWorkspace = check_implements(self)
class Document(object):
"""
Note: the doc isn't inherently thread-safe, so, the workspace should create
a new document instead of mutating the source.
Everything else (apart from changing the source) should be thread-safe
(even without locks -- sometimes we may end up calculating things more than
once, but that should not corrupt internal structures).
"""
def __init__(self, uri: str, source=None, version: Optional[str] = None):
self._main_thread = threading.current_thread()
self.uri = uri
self.version = version
self.path = uris.to_fs_path(uri) # Note: may be None.
self._source = source
self.__line_start_offsets = None
# Only set when the source is read from disk.
self._source_mtime = -1
def _check_in_mutate_thread(self):
curr_thread = threading.current_thread()
if self._main_thread is not curr_thread:
raise AssertionError(
f"Mutating the document can only be done at the thread: {self._main_thread}. Current thread: {curr_thread}"
)
def __str__(self):
return str(self.uri)
def __len__(self):
return len(self.source)
def __bool__(self):
return True
__nonzero__ = __bool__
def selection(self, line, col) -> IDocumentSelection:
from robocorp_ls_core.document_selection import DocumentSelection
return DocumentSelection(self, line, col)
@property
def _source(self) -> str:
return self.__source
@_source.setter
def _source(self, source: str) -> None:
# i.e.: when the source is set, reset the lines.
self._check_in_mutate_thread()
self.__source = source
self._clear_caches()
def _clear_caches(self):
self._check_in_mutate_thread()
self.__lines = None
self.__line_start_offsets = None
@property
def _lines(self):
lines = self.__lines
if lines is None:
lines = self.__lines = tuple(self.source.splitlines(True))
return lines
def get_internal_lines(self):
return self._lines
def iter_lines(self, keep_ends=True):
lines = self._lines
for line in lines:
if keep_ends:
yield line
else:
yield line.rstrip("\r\n")
# If the last line ends with a new line, yield the final empty string.
if line.endswith("\r") or line.endswith("\n"):
yield ""
def _compute_line_start_offsets(self):
line_start_offset_to_info = self.__line_start_offsets
if line_start_offset_to_info is None:
line_start_offset_to_info = []
offset = 0
for line in self.iter_lines():
line_start_offset_to_info.append(offset)
offset += len(line)
self.__line_start_offsets = line_start_offset_to_info
return line_start_offset_to_info
def offset_to_line_col(self, offset):
if offset < 0:
raise ValueError("Expected offset to be >0. Found: %s" % (offset,))
import bisect
line_start_offset_to_info = self._compute_line_start_offsets()
i_line = bisect.bisect_left(line_start_offset_to_info, offset)
if (
i_line >= len(line_start_offset_to_info)
or line_start_offset_to_info[i_line] > offset
):
i_line -= 1
line_start_offset = line_start_offset_to_info[i_line]
return (i_line, offset - line_start_offset)
def _load_source(self, mtime=None):
self._check_in_mutate_thread()
if mtime is None:
mtime = os.path.getmtime(self.path)
self._source_mtime = mtime
with io.open(self.path, "r", encoding="utf-8") as f:
self._source = f.read()
@implements(IDocument.is_source_in_sync)
def is_source_in_sync(self):
try:
mtime = os.path.getmtime(self.path)
return self._source_mtime == mtime
except Exception:
log.info("Unable to get mtime for: %s", self.path)
return False
@property
def source(self):
if self._source is None:
self._load_source()
return self._source
@source.setter
def source(self, source):
self._source = source
@implements(IDocument.get_line)
def get_line(self, line: int) -> str:
try:
return self._lines[line].rstrip("\r\n")
except IndexError:
return ""
def get_last_line(self):
try:
last_line = self._lines[-1]
if last_line.endswith("\r") or last_line.endswith("\n"):
return ""
return last_line
except IndexError:
return ""
def get_last_line_col(self):
lines = self._lines
if not lines:
return (0, 0)
else:
last_line = lines[-1]
if last_line.endswith("\r") or last_line.endswith("\n"):
return len(lines), 0
return len(lines) - 1, len(last_line)
def get_line_count(self):
lines = self._lines
return len(lines)
def apply_change(self, change):
"""Apply a change to the document."""
self._check_in_mutate_thread()
text = change["text"]
change_range = change.get("range")
self._apply_change(change_range, text)
def _apply_change(self, change_range, text):
self._check_in_mutate_thread()
if not change_range:
# The whole file has changed
self._source = text
return
start_line = change_range["start"]["line"]
start_col = change_range["start"]["character"]
end_line = change_range["end"]["line"]
end_col = change_range["end"]["character"]
# Check for an edit occurring at the very end of the file
if start_line == len(self._lines):
self._source = self.source + text
return
new = io.StringIO()
# Iterate over the existing document until we hit the edit range,
# at which point we write the new text, then loop until we hit
# the end of the range and continue writing.
for i, line in enumerate(self._lines):
if i < start_line:
new.write(line)
continue
if i > end_line:
new.write(line)
continue
if i == start_line:
new.write(line[:start_col])
new.write(text)
if i == end_line:
new.write(line[end_col:])
self._source = new.getvalue()
def apply_text_edits(self, text_edits):
self._check_in_mutate_thread()
for text_edit in reversed(text_edits):
self._apply_change(text_edit["range"], text_edit["newText"])
def __typecheckself__(self) -> None:
from robocorp_ls_core.protocols import check_implements
_: IDocument = check_implements(self)
| 33.257075
| 124
| 0.628821
|
40b576cd90b3010b534cb969dd47fc94c27a6265
| 347
|
py
|
Python
|
sampi/sdes/util.py
|
Jonathan-Lindbloom/sampi
|
2953e19d60df77b617779daaf90712f4f1099ff8
|
[
"MIT"
] | null | null | null |
sampi/sdes/util.py
|
Jonathan-Lindbloom/sampi
|
2953e19d60df77b617779daaf90712f4f1099ff8
|
[
"MIT"
] | null | null | null |
sampi/sdes/util.py
|
Jonathan-Lindbloom/sampi
|
2953e19d60df77b617779daaf90712f4f1099ff8
|
[
"MIT"
] | null | null | null |
import numpy as np
def draw_brownian(t, nsamps=1):
"""Generates an array of sample paths of brownian motion over the given time index. Assumes t0 = 0.
"""
wt = np.random.randn(len(t), nsamps)
wt[0, :] = 0
wt[1:,:] *= np.sqrt(np.repeat(np.diff(t)[:, np.newaxis], nsamps, axis=1))
wt = wt.cumsum(axis=0)
return wt
| 20.411765
| 103
| 0.608069
|
419f65f48f536c4001d7d49e4f9d3d7e38e05f77
| 571
|
py
|
Python
|
yanmark42/prog_in_py/ex_1/maxbw2numb.py
|
oss-python/Exercises-py
|
52694b6f87b7cf9bb817dfa8410332c24c51eda2
|
[
"MIT"
] | null | null | null |
yanmark42/prog_in_py/ex_1/maxbw2numb.py
|
oss-python/Exercises-py
|
52694b6f87b7cf9bb817dfa8410332c24c51eda2
|
[
"MIT"
] | 23
|
2020-04-29T10:47:34.000Z
|
2020-09-12T12:17:38.000Z
|
yanmark42/prog_in_py/ex_1/maxbw2numb.py
|
oss-python/exercises-py
|
52694b6f87b7cf9bb817dfa8410332c24c51eda2
|
[
"Unlicense"
] | 4
|
2020-04-04T09:18:06.000Z
|
2020-04-16T21:56:43.000Z
|
# Given two numbers the program prints the greater
# Implemented errors to be sure the user only inputs a number and not a letter or a symbol
print('Input 2 numbers\n')
while True:
try:
a = int(input('The first: '))
break
except ValueError: print("Just numbers!")
while True:
try:
b = int(input('The second: '))
break
except ValueError: print("Just numbers!")
if (a > b):
print(a, 'is the highest number')
elif (a < b):
print (b, 'is the highest number')
else:
print ('The numbers are equal')
| 21.961538
| 90
| 0.611208
|
ebcd2c4bdd403681190e36f0637197ef977eedc9
| 661
|
py
|
Python
|
001132StepikITclassPy/Stepik001132ITclassPyсh06_loopWhile_p01st05TASK04_20210224_secrets.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001132StepikITclassPy/Stepik001132ITclassPyсh06_loopWhile_p01st05TASK04_20210224_secrets.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001132StepikITclassPy/Stepik001132ITclassPyсh06_loopWhile_p01st05TASK04_20210224_secrets.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
'''
Напишите программу, которая предлагает ввести пароль и не переходит к выполнению основной части, пока не введён правильный пароль. Основная часть – вывод на экран «секретных сведений».
Sample Input 1:
1501
Sample Output 1:
Введите пароль:
Пароль верный!
Секретные сведения: я учусь в IT-классе.
Sample Input 2:
0115
1501
Sample Output 2:
Введите пароль:
Неверный пароль!
Введите пароль:
Пароль верный!
Секретные сведения: я учусь в IT-классе.
'''
print('Введите пароль:')
s = input()
while s != '1501':
print('Неверный пароль!')
print('Введите пароль:')
s = input()
print('Пароль верный!')
print('Секретные сведения: я учусь в IT-классе.')
| 20.65625
| 184
| 0.73525
|
341e21c4d05db72ffdc7bcd347d876032d63b87d
| 4,405
|
py
|
Python
|
city_scrapers/spiders/chi_police_retirement.py
|
sameerchandra/city-scrapers
|
c6e466a06f610e56fa876b6e93a53a347d732536
|
[
"MIT"
] | 1
|
2019-03-18T03:12:25.000Z
|
2019-03-18T03:12:25.000Z
|
city_scrapers/spiders/chi_police_retirement.py
|
sameerchandra/city-scrapers
|
c6e466a06f610e56fa876b6e93a53a347d732536
|
[
"MIT"
] | 1
|
2019-10-05T04:05:48.000Z
|
2019-10-05T04:05:48.000Z
|
city_scrapers/spiders/chi_police_retirement.py
|
firejava/city-scrapers
|
749f40bf1bd933726768d7d67e5211aef13af547
|
[
"MIT"
] | null | null | null |
import re
from datetime import datetime
from city_scrapers_core.constants import BOARD, COMMITTEE
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class ChiPoliceRetirementSpider(CityScrapersSpider):
name = "chi_police_retirement"
agency = "Policemen's Annuity and Benefit Fund of Chicago"
timezone = "America/Chicago"
allowed_domains = ["www.chipabf.org"]
start_urls = ["http://www.chipabf.org/ChicagoPolicePension/MonthlyMeetings.html"]
TAG_RE = re.compile(r'<[^>]+>')
def parse(self, response):
year = self._parse_year(response)
board_items = response.xpath('//*[@id="content0"]/div[3]/table').extract()[0].split('<tr>')
invest_items = response.xpath('//*[@id="content0"]/div[2]/table').extract()[0].split('<tr>')
date_items = board_items + invest_items
for date_item in date_items:
if 'table border' in date_item or 'NONE' in date_item:
continue
meeting = Meeting(
title=self._parse_title(date_item),
description='',
classification=self._parse_classification(date_item),
start=self._parse_start(date_item, year),
end=None,
time_notes="",
all_day=False,
location=self._parse_location(),
source=self._parse_source(response),
links=self._parse_links(date_item, response)
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_title(self, date_item):
"""Parse or generate meeting title."""
if 'Board' in date_item:
return 'Retirement Board'
else:
return 'Investment Committee'
def _parse_classification(self, date_item):
"""Parse or generate classification from allowed options."""
if 'Board' in date_item:
return BOARD
else:
return COMMITTEE
def _parse_start(self, date_item, year):
start = self._get_date_string(date_item, year)
return datetime.strptime(start, '%B %d %Y %I%p')
# see here for address: http://www.chipabf.org/ChicagoPolicePension/aboutus.html
def _parse_location(self):
"""Parse or generate location."""
return {
"address": "221 North LaSalle Street, Suite 1626,"
" Chicago, Illinois 60601-1203",
"name": "Policemen's Annuity and Benefit Fund",
}
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
def _parse_links(self, date_item, response):
links = []
raw_links = date_item.split('a href')
if len(raw_links) > 1:
raw_agenda = raw_links[1]
file_path = re.search(r'\"(.+?)\"', raw_agenda).group(1)
title = re.search(r'\>(.+?)\<', self._clean_escape_chars(raw_agenda)).group(1).strip()
agenda = {"href": response.urljoin(file_path), "title": title}
links.append(agenda)
if len(raw_links) > 2:
raw_minutes = raw_links[2]
file_path = re.search(r'\"(.+?)\"', raw_minutes).group(1)
title = re.search(r'\>(.+?)\<', raw_minutes).group(1).strip()
minutes = {"href": response.urljoin(file_path), "title": title}
links.append(minutes)
return links
def _parse_year(self, response):
return response.xpath('//*[@id="content0"]/div[3]/h2[1]/text()').extract()[0][:4]
def _clean_html_tags(self, date_item):
date_item = date_item.replace('<br>', ' ')
return self.TAG_RE.sub('', date_item).strip()
def _clean_escape_chars(self, s, space=False):
d_tab = s.replace('\t', '')
d_newl = d_tab.replace('\n', '')
if not space:
clean_s = d_newl.replace('\r', '')
else:
clean_s = d_newl.replace('\r', ' ')
return clean_s
def _get_date_string(self, date_item, year):
no_tags = self._clean_html_tags(date_item)
date_pieces = no_tags.split()[-5:]
date_pieces[1] = ''.join([num for num in date_pieces[1] if num.isdigit()])
date = ' '.join(date_pieces[0:2]) + ' ' + year + ' ' + date_pieces[2]
return date
| 37.974138
| 100
| 0.592054
|
29f6277105d71d2982c128b796f88e0bbe94322a
| 6,099
|
py
|
Python
|
src/dispatch/plugins/dispatch_google/groups/plugin.py
|
mclueppers/dispatch
|
b9e524ca10e5b2e95490b388db61c58e79e975e2
|
[
"Apache-2.0"
] | 1
|
2020-07-20T23:03:51.000Z
|
2020-07-20T23:03:51.000Z
|
src/dispatch/plugins/dispatch_google/groups/plugin.py
|
mclueppers/dispatch
|
b9e524ca10e5b2e95490b388db61c58e79e975e2
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/plugins/dispatch_google/groups/plugin.py
|
mclueppers/dispatch
|
b9e524ca10e5b2e95490b388db61c58e79e975e2
|
[
"Apache-2.0"
] | null | null | null |
"""
.. module: dispatch.plugins.dispatch_google_groups.plugin
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
"""
import logging
import time
from typing import Any, List
from googleapiclient.errors import HttpError
from tenacity import TryAgain, retry, retry_if_exception_type, stop_after_attempt, wait_exponential
from dispatch.decorators import apply, counter, timer
from dispatch.plugins.bases import ParticipantGroupPlugin
from dispatch.plugins.dispatch_google import groups as google_group_plugin
from dispatch.plugins.dispatch_google.common import get_service
from dispatch.plugins.dispatch_google.config import GOOGLE_USER_OVERRIDE, GOOGLE_DOMAIN
log = logging.getLogger(__name__)
@retry(
stop=stop_after_attempt(3),
retry=retry_if_exception_type(TryAgain),
wait=wait_exponential(multiplier=1, min=2, max=5),
)
def make_call(client: Any, func: Any, delay: int = None, propagate_errors: bool = False, **kwargs):
"""Make an google client api call."""
try:
data = getattr(client, func)(**kwargs).execute()
if delay:
time.sleep(delay)
return data
except HttpError as e:
if e.resp.status in [409]:
log.error(e.content.decode())
if propagate_errors:
raise e
raise TryAgain
def expand_group(client: Any, group_key: str):
"""Determines if an email is really a dl."""
# NOTE: Google Groups does not support other DLs as Group owners
# https://stackoverflow.com/questions/31552146/group-as-owner-or-manager-fails-with-400-error
try:
response = list_members(client, group_key, propagate_errors=True)
if response.get("members"):
return [x["email"] for x in response.get("members", [])]
except HttpError as e:
if e.resp.status == 404:
pass
return []
def add_member(client: Any, group_key: str, email: str, role: str):
"""Adds a member to a google group."""
members = [email]
if role == "OWNER":
members = expand_group(client, group_key)
for m in members:
body = {"email": m, "role": role}
if GOOGLE_USER_OVERRIDE:
log.warning("GOOGLE_USER_OVERIDE set. Using override.")
body["email"] = GOOGLE_USER_OVERRIDE
try:
make_call(
client.members(), "insert", groupKey=group_key, body=body, propagate_errors=True
)
except HttpError as e:
# we are okay with duplication errors upon insert
if e.resp.status in [409]:
continue
log.debug(f"Error adding group member. GroupKey={group_key} Body={body} ")
def remove_member(client: Any, group_key: str, email: str):
"""Removes member from google group."""
return make_call(client.members(), "delete", groupKey=group_key, memberKey=email)
def list_members(client: Any, group_key: str, **kwargs):
"""Lists all members of google group."""
return make_call(client.members(), "list", groupKey=group_key, **kwargs)
def create_group(client: Any, name: str, email: str, description: str):
"""Creates a new google group."""
body = {"email": email, "name": name, "adminCreated": False, "description": description}
return make_call(client.groups(), "insert", body=body, delay=3)
def delete_group(client: Any, group_key: str, **kwargs):
"""Delete a google group."""
return make_call(client.groups(), "delete", groupKey=group_key, **kwargs)
def list_groups(client: Any, **kwargs):
"""Lists all google groups available."""
return make_call(client.groups(), "list", **kwargs)
@apply(timer, exclude=["__init__"])
@apply(counter, exclude=["__init__"])
class GoogleGroupParticipantGroupPlugin(ParticipantGroupPlugin):
title = "Google Group Plugin - Participant Group Management"
slug = "google-group-participant-group"
description = "Uses Google Groups to help manage participant membership."
version = google_group_plugin.__version__
author = "Netflix"
author_url = "https://github.com/netflix/dispatch.git"
_schema = None
def __init__(self):
self.scopes = [
"https://www.googleapis.com/auth/admin.directory.group",
"https://www.googleapis.com/auth/apps.groups.settings",
]
def create(
self, name: str, participants: List[str], description: str = None, role: str = "MEMBER"
):
"""Creates a new Google Group."""
client = get_service("admin", "directory_v1", self.scopes)
group_key = f"{name.lower()}@{GOOGLE_DOMAIN}"
if not description:
description = "Group automatically created by Dispatch."
group = create_group(client, name, group_key, description)
for p in participants:
add_member(client, group_key, p, role)
group.update(
{
"weblink": f"https://groups.google.com/a/{GOOGLE_DOMAIN}/forum/#!forum/{group['name']}"
}
)
return group
def add(self, email: str, participants: List[str], role: str = "MEMBER"):
"""Adds participants to an existing Google Group."""
client = get_service("admin", "directory_v1", self.scopes)
for p in participants:
add_member(client, email, p, role)
def remove(self, email: str, participants: List[str]):
"""Removes participants from an existing Google Group."""
client = get_service("admin", "directory_v1", self.scopes)
for p in participants:
remove_member(client, email, p)
def list(self, email: str):
"""Lists members from an existing Google Group."""
client = get_service("admin", "directory_v1", self.scopes)
members = list_members(client, email)
return [m["email"] for m in members["members"]]
def delete(self, email: str):
"""Deletes an existing Google group."""
client = get_service("admin", "directory_v1", self.scopes)
delete_group(client, email)
| 34.851429
| 103
| 0.65355
|
f33d08dbfb5f9e6395dd92aa0d3d8a1576764f03
| 14,035
|
py
|
Python
|
Orio/orio/module/loop/submodule/opencl/opencl.py
|
HPCL/nametbd
|
1b588cd6ce94ab39a8ba6f89d9eb64e1d3726af5
|
[
"MIT"
] | null | null | null |
Orio/orio/module/loop/submodule/opencl/opencl.py
|
HPCL/nametbd
|
1b588cd6ce94ab39a8ba6f89d9eb64e1d3726af5
|
[
"MIT"
] | null | null | null |
Orio/orio/module/loop/submodule/opencl/opencl.py
|
HPCL/nametbd
|
1b588cd6ce94ab39a8ba6f89d9eb64e1d3726af5
|
[
"MIT"
] | null | null | null |
#
# Loop transformation submodule that implements OpenCL kernel generation
#
import os, ast
import orio.module.loop.submodule.submodule
import orio.main.util.globals as g
import transformation
OPENCL_DEVICE_QUERY_SKELET = r'''
#include <stdio.h>
#include <stdlib.h>
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define BProp(dev, name) printBProp(dev, #name, name)
#define UIProp(dev, name) printUIProp(dev, #name, name)
#define ULProp(dev, name) printULProp(dev, #name, name)
#define SizeProp(dev, name) printSizeProp(dev, #name, name)
#define SProp(dev, name) printSProp(dev, #name, name)
void printBProp(cl_device_id dev, char * name, cl_device_info prop) {
cl_bool boolValue;
clGetDeviceInfo(dev, prop, sizeof(cl_bool), &boolValue, NULL);
const char * v = boolValue ? "True" : "False";
printf("'%s',%s\n", name, v);
}
void printUIProp(cl_device_id dev, char * name, cl_device_info prop) {
cl_uint uintValue;
clGetDeviceInfo(dev, prop, sizeof(cl_uint), &uintValue, NULL);
printf("'%s',%u\n", name, uintValue);
}
void printULProp(cl_device_id dev, char * name, cl_device_info prop) {
cl_ulong ulongValue;
clGetDeviceInfo(dev, prop, sizeof(cl_ulong), &ulongValue, NULL);
printf("'%s',%llu\n", name, ulongValue);
}
void printSizeProp(cl_device_id dev, char * name, cl_device_info prop) {
size_t sizeValue;
clGetDeviceInfo(dev, prop, sizeof(size_t), &sizeValue, NULL);
printf("'%s',%zu\n", name, sizeValue);
}
void printSProp(cl_device_id dev, char * name, cl_device_info prop) {
size_t valueSize;
char * charValue;
clGetDeviceInfo(dev, prop, 0, NULL, &valueSize);
charValue = (char*) malloc(valueSize);
clGetDeviceInfo(dev, prop, valueSize, charValue, NULL);
printf("'%s','%s'\n", name, charValue);
free(charValue);
}
int main() {
cl_uint i, j;
char* info;
size_t infoSize;
cl_uint platformCount;
cl_platform_id *platforms;
const char* attributeNames[5] = { "CL_PLATFORM_NAME", "CL_PLATFORM_VENDOR",
"CL_PLATFORM_VERSION", "CL_PLATFORM_PROFILE", "CL_PLATFORM_EXTENSIONS" };
const cl_platform_info attributeTypes[5] = { CL_PLATFORM_NAME, CL_PLATFORM_VENDOR,
CL_PLATFORM_VERSION, CL_PLATFORM_PROFILE, CL_PLATFORM_EXTENSIONS };
const size_t attributeCount = sizeof(attributeNames) / sizeof(char*);
clGetPlatformIDs(5, NULL, &platformCount);
platforms = (cl_platform_id*) malloc(sizeof(cl_platform_id) * platformCount);
clGetPlatformIDs(platformCount, platforms, NULL);
// Platforms
for (i = 0; i < platformCount; i++) {
printf("'PLATFORM',%d\n", i);
for (j = 0; j < attributeCount; j++) {
clGetPlatformInfo(platforms[i], attributeTypes[j], 0, NULL, &infoSize);
info = (char*) malloc(infoSize);
clGetPlatformInfo(platforms[i], attributeTypes[j], infoSize, info, NULL);
printf("'%s','%s'\n", attributeNames[j], info);
free(info);
}
// Devices
cl_uint deviceCount;
cl_device_id* devices;
clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, 0, NULL, &deviceCount);
devices = (cl_device_id *) malloc(sizeof(cl_device_id) * deviceCount);
clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, deviceCount, devices, NULL);
cl_uint d;
for(d = 0; d < deviceCount; d++) {
printf("'DEVICE','%d'\n", d);
SProp(devices[d], CL_DEVICE_NAME);
SProp(devices[d], CL_DEVICE_VERSION);
SProp(devices[d], CL_DEVICE_OPENCL_C_VERSION);
SProp(devices[d], CL_DRIVER_VERSION);
UIProp(devices[d], CL_DEVICE_ADDRESS_BITS);
BProp(devices[d], CL_DEVICE_AVAILABLE);
BProp(devices[d], CL_DEVICE_COMPILER_AVAILABLE);
BProp(devices[d], CL_DEVICE_ENDIAN_LITTLE);
BProp(devices[d], CL_DEVICE_ERROR_CORRECTION_SUPPORT);
SProp(devices[d], CL_DEVICE_EXTENSIONS);
ULProp(devices[d], CL_DEVICE_GLOBAL_MEM_CACHE_SIZE);
UIProp(devices[d], CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE);
ULProp(devices[d], CL_DEVICE_GLOBAL_MEM_SIZE);
BProp(devices[d], CL_DEVICE_IMAGE_SUPPORT);
SizeProp(devices[d], CL_DEVICE_IMAGE2D_MAX_HEIGHT);
SizeProp(devices[d], CL_DEVICE_IMAGE2D_MAX_WIDTH);
SizeProp(devices[d], CL_DEVICE_IMAGE3D_MAX_DEPTH);
SizeProp(devices[d], CL_DEVICE_IMAGE3D_MAX_HEIGHT);
SizeProp(devices[d], CL_DEVICE_IMAGE3D_MAX_WIDTH);
ULProp(devices[d], CL_DEVICE_LOCAL_MEM_SIZE);
UIProp(devices[d], CL_DEVICE_MAX_CLOCK_FREQUENCY);
UIProp(devices[d], CL_DEVICE_MAX_COMPUTE_UNITS);
UIProp(devices[d], CL_DEVICE_MAX_CONSTANT_ARGS);
ULProp(devices[d], CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE);
ULProp(devices[d], CL_DEVICE_MAX_MEM_ALLOC_SIZE);
SizeProp(devices[d], CL_DEVICE_MAX_PARAMETER_SIZE);
UIProp(devices[d], CL_DEVICE_MAX_READ_IMAGE_ARGS);
UIProp(devices[d], CL_DEVICE_MAX_SAMPLERS);
SizeProp(devices[d], CL_DEVICE_MAX_WORK_GROUP_SIZE);
UIProp(devices[d], CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS);
cl_uint maxDim;
clGetDeviceInfo(devices[d], CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, sizeof(cl_uint), &maxDim, NULL);
size_t * itemSizes = malloc(sizeof(size_t) * maxDim);
clGetDeviceInfo(devices[d], CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(size_t)*maxDim, itemSizes, NULL);
printf("'CL_DEVICE_MAX_WORK_ITEM_SIZES',(");
size_t item;
for(item = 0; item < maxDim; ++item) {
printf("%zu,", itemSizes[item]);
}
printf(")\n");
free(itemSizes);
UIProp(devices[d], CL_DEVICE_MAX_WRITE_IMAGE_ARGS);
UIProp(devices[d], CL_DEVICE_MEM_BASE_ADDR_ALIGN);
UIProp(devices[d], CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE);
UIProp(devices[d], CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR);
UIProp(devices[d], CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT);
UIProp(devices[d], CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT);
UIProp(devices[d], CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG);
UIProp(devices[d], CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT);
UIProp(devices[d], CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE);
SProp(devices[d], CL_DEVICE_PROFILE);
SizeProp(devices[d], CL_DEVICE_PROFILING_TIMER_RESOLUTION);
}
free(devices);
}
free(platforms);
return 0;
}
'''
#----------------------------------------------------------------------------------------------------------------------
dev_props = None
class OpenCL(orio.module.loop.submodule.submodule.SubModule):
'''The OpenCL transformation submodule.'''
def __init__(self, perf_params = None, transf_args = None, stmt = None, language='opencl', tinfo=None):
'''To instantiate the transformation submodule.'''
orio.module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt, language)
self.tinfo = tinfo
self.props = None
#------------------------------------------------------------------------------------------------------------------
def readTransfArgs(self, perf_params, transf_args):
'''Process the given transformation arguments'''
# expected argument names
PLATFORM = 'platform'
DEVICE = 'device'
WORKGROUPS = 'workGroups'
WORKITEMS = 'workItemsPerGroup'
CB = 'cacheBlocks'
STREAMCOUNT = 'streamCount'
UIF = 'unrollInner'
CLFLAGS = 'clFlags'
THREADCOUNT = 'threadCount'
BLOCKCOUNT = 'blockCount'
VECHINT = 'vecHint'
SIZEHINT = 'sizeHint'
# default argument values
platform = 0
device = 0
workGroups = None
workItemsPerGroup = None
cacheBlocks = False
streamCount = 1
unrollInner = None
clFlags = None
vecHint = 0
sizeHint = False
# iterate over all transformation arguments
errors = ''
for aname, rhs, line_no in transf_args:
# evaluate the RHS expression
try:
rhs = eval(rhs, perf_params)
except Exception, e:
g.err('orio.module.loop.submodule.opencl.opencl: %s: failed to evaluate the argument expression: %s\n --> %s: %s' % (line_no, rhs,e.__class__.__name__, e))
if aname == PLATFORM:
# TODO: validate
platform = rhs
elif aname == DEVICE:
# TODO: validate
device = rhs
elif aname == WORKGROUPS:
# TODO: validate
workGroups = rhs
elif aname == WORKITEMS:
# TODO: validate
workItemsPerGroup = rhs
elif aname == CB:
# TODO: validate
cacheBlocks = rhs
elif aname == STREAMCOUNT:
# TODO: validate
streamCount = rhs
elif aname == UIF:
# TODO: validate
unrollInner = rhs
elif aname == CLFLAGS:
clFlags = rhs
elif aname == THREADCOUNT:
g.warn("Interpreting CUDA threadCount as OpenCL workItemsPerGroup")
workItemsPerGroup = rhs
elif aname == BLOCKCOUNT:
g.warn("Interpreting CUDA blockCount as OpenCL workGroups")
workGroups = rhs
elif aname == VECHINT:
vecHint = rhs
elif aname == SIZEHINT:
sizeHint = rhs
else:
g.err('%s: %s: unrecognized transformation argument: "%s"' % (self.__class__, line_no, aname))
if not errors == '':
raise Exception, ('%s: errors evaluating transformation args:\n%s' % (self.__class__, errors))
# return evaluated transformation arguments
return {
PLATFORM:platform,
DEVICE:device,
WORKGROUPS:workGroups,
WORKITEMS:workItemsPerGroup,
CB:cacheBlocks,
STREAMCOUNT:streamCount,
UIF:unrollInner,
CLFLAGS:clFlags,
VECHINT:vecHint,
SIZEHINT:sizeHint,}
#------------------------------------------------------------------------------------------------------------------
def getDeviceProps(self):
'''Get device properties'''
# write the query code
qsrc = "enum_opencl_props.c"
qexec = qsrc + ".o"
qout = qexec + ".props"
try:
f = open(qsrc, 'w')
f.write(OPENCL_DEVICE_QUERY_SKELET)
f.close()
except:
g.err('%s: cannot open file for writing: %s' % (self.__class__, qsrc))
# compile the query
if self.tinfo is not None and self.tinfo.build_cmd is not None:
cmd = self.tinfo.build_cmd
else:
cmd = 'gcc -framework OpenCL'
cmd += ' -o %s %s' % (qexec, qsrc)
status = os.system(cmd)
if status:
g.err('%s: failed to compile OpenCL device query code: "%s"' % (self.__class__, cmd))
# execute the query
runcmd = './%s > ./%s' % (qexec, qout)
status = os.system(runcmd)
if status:
g.err('%s: failed to execute OpenCL device query code: "%s"' % (self.__class__, runcmd))
os.remove(qsrc)
os.remove(qexec)
# read device properties
platforms = []
try:
f = open(qout, 'r')
mostRecentWasDevice = False
for line in f:
eline = ast.literal_eval(line)
if eline[0] == 'PLATFORM':
mostRecentWasDevice = False
platforms.append({'devices':[]})
elif eline[0] == 'DEVICE':
mostRecentWasDevice = True
platforms[-1]['devices'].append({})
else:
if mostRecentWasDevice:
platforms[-1]['devices'][-1][eline[0]] = eline[1]
else:
platforms[-1][eline[0]] = eline[1]
f.close()
#print platforms
except:
g.err('%s: cannot open query output file for reading: %s' % (self.__class__, qout))
# return queried device props
return platforms
#------------------------------------------------------------------------------------------------------------------
def openclify(self, stmt, targs):
'''Apply OpenCL transformations'''
g.debug('orio.module.loop.submodule.opencl.opencl: starting OpenCL transformations')
# perform transformation
t = transformation.Transformation(stmt, self.props, targs, self.tinfo)
transformed_stmt = t.transform()
# return the transformed statement
return transformed_stmt
#------------------------------------------------------------------------------------------------------------------
def transform(self):
'''The implementation of the abstract transform method for OpenCL'''
# read device properties
global dev_props # initialize device properties only once
if dev_props is None:
dev_props = self.getDeviceProps()
if self.props is None:
self.props = dev_props
# read all transformation arguments
targs = self.readTransfArgs(self.perf_params, self.transf_args)
g.Globals().metadata.update(targs)
# perform the transformation of the statement
transformed_stmt = self.openclify(self.stmt, targs)
return transformed_stmt
| 38.452055
| 171
| 0.572711
|
766ab72121cc18bd9dcde763548c8ff995bf894c
| 860
|
py
|
Python
|
submissions/valid-sudoku/solution.py
|
Wattyyy/LeetCode
|
13a9be056d0a0c38c2f8c8222b11dc02cb25a935
|
[
"MIT"
] | null | null | null |
submissions/valid-sudoku/solution.py
|
Wattyyy/LeetCode
|
13a9be056d0a0c38c2f8c8222b11dc02cb25a935
|
[
"MIT"
] | 1
|
2022-03-04T20:24:32.000Z
|
2022-03-04T20:31:58.000Z
|
submissions/valid-sudoku/solution.py
|
Wattyyy/LeetCode
|
13a9be056d0a0c38c2f8c8222b11dc02cb25a935
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/valid-sudoku
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
row = [set() for _ in range(9)]
col = [set() for _ in range(9)]
matrix = [[set() for _ in range(3)] for __ in range(3)]
for i in range(9):
for j in range(9):
char = board[i][j]
if char == ".":
continue
if char in row[i]:
return False
else:
row[i].add(char)
if char in col[j]:
return False
else:
col[j].add(char)
if char in matrix[i // 3][j // 3]:
return False
else:
matrix[i // 3][j // 3].add(char)
return True
| 26.875
| 63
| 0.398837
|
c376286ccd3a9fbd714f15729b2949a883024f1f
| 3,633
|
py
|
Python
|
empirical-analyses/bin/plot-div-models.py
|
joaks1/msbayes-experiments
|
72fcf3c26f6d92bdcc39343372552f45d72d8f7f
|
[
"CC-BY-4.0"
] | null | null | null |
empirical-analyses/bin/plot-div-models.py
|
joaks1/msbayes-experiments
|
72fcf3c26f6d92bdcc39343372552f45d72d8f7f
|
[
"CC-BY-4.0"
] | null | null | null |
empirical-analyses/bin/plot-div-models.py
|
joaks1/msbayes-experiments
|
72fcf3c26f6d92bdcc39343372552f45d72d8f7f
|
[
"CC-BY-4.0"
] | null | null | null |
#! /usr/bin/env python
import os
import sys
from pymsbayes import plotting
from pymsbayes.utils import parsing
from pymsbayes.utils.messaging import get_logger
import project_util
_LOG = get_logger(__name__)
def get_div_model_result_path(dmc_sim_result, iteration_index):
path = (dmc_sim_result.get_result_path_prefix(1,1,1) +
'{0}-div-model-results.txt'.format(iteration_index))
return path
def get_posterior_path(dmc_sim_result, iteration_index):
path = (dmc_sim_result.get_result_path_prefix(1,1,1) +
'{0}-posterior-sample.txt.gz'.format(iteration_index))
return path
def get_div_model_plot_grid(
info_path,
iteration_index = 99,
ordered = False,
margin_top = 0.99,
padding_between_vertical = 0.8):
dmc = parsing.DMCSimulationResults(info_path)
div_model_path = get_div_model_result_path(dmc, iteration_index)
if ordered:
div_model_path = get_posterior_path(dmc, iteration_index)
p = plotting.UnorderedDivergenceModelPlotGrid(
div_model_results_path = div_model_path,
num_top_models = 10,
height = 10.0,
width = 8.0,
data_label_size = 10.0,
plot_label_schema = 'uppercase',
plot_label_offset = 0,
plot_label_size = 12.0,
y_title = 'Divergence time ($4N_C$ generations)',
y_title_size = 14.0,
y_tick_label_size = 10.0,
right_text_size = 10.0,
margin_left = 0.03,
margin_bottom = 0.0,
margin_right = 1,
margin_top = margin_top,
padding_between_vertical = padding_between_vertical)
return p.create_grid()
def main_cli():
pg = get_div_model_plot_grid(
info_path = project_util.PHILIPPINES_DPP_INFO,
iteration_index = 99)
path = os.path.join(project_util.PLOT_DIR, 'philippines-dpp-div-models.pdf')
pg.savefig(path)
pg = get_div_model_plot_grid(
info_path = project_util.PHILIPPINES_DPP_INFORM_INFO,
iteration_index = 99)
path = os.path.join(project_util.PLOT_DIR, 'philippines-dpp-inform-div-models.pdf')
pg.savefig(path)
pg = get_div_model_plot_grid(
info_path = project_util.PHILIPPINES_DPP_SIMPLE_INFO,
iteration_index = 99)
path = os.path.join(project_util.PLOT_DIR, 'philippines-dpp-simple-div-models.pdf')
pg.savefig(path)
pg = get_div_model_plot_grid(
info_path = project_util.PHILIPPINES_UNIFORM_INFO,
iteration_index = 99,
margin_top = 0.985,
padding_between_vertical = 0.9)
path = os.path.join(project_util.PLOT_DIR, 'philippines-uniform-div-models.pdf')
pg.savefig(path)
pg = get_div_model_plot_grid(
info_path = project_util.PHILIPPINES_OLD_INFO,
iteration_index = 99,
margin_top = 0.985,
padding_between_vertical = 0.9)
path = os.path.join(project_util.PLOT_DIR, 'philippines-old-div-models.pdf')
pg.savefig(path)
pg = get_div_model_plot_grid(
info_path = project_util.NP_DPP_UNORDERED_INFO,
iteration_index = 249)
path = os.path.join(project_util.PLOT_DIR, 'negros-panay-dpp-unordered-div-models.pdf')
pg.savefig(path)
pg = get_div_model_plot_grid(
info_path = project_util.NP_DPP_ORDERED_INFO,
iteration_index = 249,
ordered = True)
path = os.path.join(project_util.PLOT_DIR, 'negros-panay-dpp-ordered-div-models.pdf')
pg.savefig(path)
if __name__ == '__main__':
main_cli()
| 34.932692
| 91
| 0.661712
|
d9e0f54b724d3b44db158c6d57e7220d28cf7b8a
| 9,668
|
py
|
Python
|
tensorflow/contrib/slim/python/slim/evaluation_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/contrib/slim/python/slim/evaluation_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 48
|
2016-07-26T00:11:55.000Z
|
2022-02-23T13:36:33.000Z
|
tensorflow/contrib/slim/python/slim/evaluation_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import time
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.slim.python.slim import evaluation
from tensorflow.contrib.training.python.training import evaluation as evaluation_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import input
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
FLAGS = flags.FLAGS
def GenerateTestData(num_classes, batch_size):
inputs = np.random.rand(batch_size, num_classes)
np.random.seed(0)
labels = np.random.randint(low=0, high=num_classes, size=batch_size)
labels = labels.reshape((batch_size,))
return inputs, labels
def TestModel(inputs):
scale = variables.Variable(1.0, trainable=False)
# Scaling the outputs wont change the result...
outputs = math_ops.multiply(inputs, scale)
return math_ops.argmax(outputs, 1), scale
def GroundTruthAccuracy(inputs, labels, batch_size):
predictions = np.argmax(inputs, 1)
num_correct = np.sum(predictions == labels)
return float(num_correct) / batch_size
class EvaluationTest(test.TestCase):
def setUp(self):
super(EvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = variables_lib.get_or_create_global_step()
self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
self._labels = constant_op.constant(labels, dtype=dtypes.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
self._labels)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to a checkpoint directory:
saver = saver_lib.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
class Object(object):
def __init__(self):
self.hook_was_run = False
obj = Object()
# Create a custom session run hook.
class CustomHook(session_run_hook.SessionRunHook):
def __init__(self, obj):
self.obj = obj
def end(self, session):
self.obj.hook_was_run = True
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'',
chkpt_dir,
logdir,
eval_op=update_op,
final_op=value_op,
hooks=[CustomHook(obj)],
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
# Validate that custom hook ran.
self.assertTrue(obj.hook_was_run)
def _create_names_to_metrics(self, predictions, labels):
accuracy0, update_op0 = metric_ops.streaming_accuracy(predictions, labels)
accuracy1, update_op1 = metric_ops.streaming_accuracy(predictions + 1,
labels)
names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
return names_to_values, names_to_updates
def _verify_summaries(self, output_dir, names_to_values):
"""Verifies that the given `names_to_values` are found in the summaries.
Args:
output_dir: An existing directory where summaries are found.
names_to_values: A dictionary of strings to values.
"""
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = summary_iterator.summary_iterator(output_filepath[0])
summaries = [e.summary for e in events if e.summary.value]
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name])
def testLatestCheckpointReturnsNoneAfterTimeout(self):
start = time.time()
ret = evaluation_lib.wait_for_new_checkpoint(
'/non-existent-dir', 'foo', timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one time.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
def testMonitorCheckpointsLoopTimeout(self):
ret = list(
evaluation_lib.checkpoints_iterator(
'/non-existent-dir', timeout=0))
self.assertEqual(ret, [])
def testWithEpochLimit(self):
predictions_limited = input.limit_epochs(self._predictions, num_epochs=1)
labels_limited = input.limit_epochs(self._labels, num_epochs=1)
value_op, update_op = metric_ops.streaming_accuracy(
predictions_limited, labels_limited)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to a checkpoint directory:
saver = saver_lib.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
max_number_of_evaluations=1, num_evals=10000)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
class SingleEvaluationTest(test.TestCase):
def setUp(self):
super(SingleEvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = variables_lib.get_or_create_global_step()
self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
self._labels = constant_op.constant(labels, dtype=dtypes.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testErrorRaisedIfCheckpointDoesntExist(self):
checkpoint_path = os.path.join(self.get_temp_dir(),
'this_file_doesnt_exist')
log_dir = os.path.join(self.get_temp_dir(), 'error_raised')
with self.assertRaises(errors.NotFoundError):
evaluation.evaluate_once('', checkpoint_path, log_dir)
def testRestoredModelPerformance(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
# First, save out the current model to a checkpoint:
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
# Next, determine the metric to evaluate:
value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
self._labels)
# Run the evaluation and verify the results:
accuracy_value = evaluation.evaluate_once(
'', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
if __name__ == '__main__':
test.main()
| 37.618677
| 84
| 0.716798
|
9adc1a7df990d30f98fa1a596515b68594c12ed7
| 33
|
py
|
Python
|
misc/sys.py
|
NaaYaa-oops/py-change-mac-address
|
df5d50519ca42c3b269eb6795ef697acee8e14de
|
[
"MIT"
] | null | null | null |
misc/sys.py
|
NaaYaa-oops/py-change-mac-address
|
df5d50519ca42c3b269eb6795ef697acee8e14de
|
[
"MIT"
] | null | null | null |
misc/sys.py
|
NaaYaa-oops/py-change-mac-address
|
df5d50519ca42c3b269eb6795ef697acee8e14de
|
[
"MIT"
] | null | null | null |
import sys
sys.tracebacklimit = 0
| 16.5
| 22
| 0.818182
|
a71867e953311004a5f7695795e8dc130fcbfe10
| 2,887
|
py
|
Python
|
backend/kesaseteli/applications/migrations/0015_set_real_schools.py
|
City-of-Helsinki/kesaseteli
|
964f801c2dba72c4105b6e436b12b821b199d6d2
|
[
"MIT"
] | 2
|
2021-05-10T09:28:35.000Z
|
2021-05-17T12:15:34.000Z
|
backend/kesaseteli/applications/migrations/0015_set_real_schools.py
|
City-of-Helsinki/yjdh
|
1c07576b456d2be9c3171363450ed46de2c1bbcb
|
[
"MIT"
] | 931
|
2021-05-21T15:24:35.000Z
|
2022-03-31T20:07:40.000Z
|
backend/kesaseteli/applications/migrations/0015_set_real_schools.py
|
City-of-Helsinki/yjdh
|
1c07576b456d2be9c3171363450ed46de2c1bbcb
|
[
"MIT"
] | 6
|
2021-07-06T11:07:02.000Z
|
2022-02-07T12:42:21.000Z
|
from django.db import migrations, transaction
REAL_SCHOOL_LIST = [
"Aleksis Kiven peruskoulu",
"Apollon yhteiskoulu",
"Arabian peruskoulu",
"Aurinkolahden peruskoulu",
"Botby grundskola",
"Elias-koulu",
"Englantilainen koulu",
"Grundskolan Norsen",
"Haagan peruskoulu",
"Helsingin Juutalainen Yhteiskoulu",
"Helsingin Kristillinen koulu",
"Helsingin Montessori-koulu",
"Helsingin Rudolf Steiner -koulu",
"Helsingin Saksalainen koulu",
"Helsingin Suomalainen yhteiskoulu",
"Helsingin Uusi yhteiskoulu",
"Helsingin eurooppalainen koulu",
"Helsingin normaalilyseo",
"Helsingin ranskalais-suomalainen koulu",
"Helsingin yhteislyseo",
"Helsingin yliopiston Viikin normaalikoulu",
"Herttoniemen yhteiskoulu",
"Hiidenkiven peruskoulu",
"Hoplaxskolan",
"International School of Helsinki",
"Itäkeskuksen peruskoulu",
"Jätkäsaaren peruskoulu",
"Kalasataman peruskoulu",
"Kankarepuiston peruskoulu",
"Kannelmäen peruskoulu",
"Karviaistien koulu",
"Kruununhaan yläasteen koulu",
"Kruunuvuorenrannan peruskoulu",
"Kulosaaren yhteiskoulu",
"Käpylän peruskoulu",
"Laajasalon peruskoulu",
"Latokartanon peruskoulu",
"Lauttasaaren yhteiskoulu",
"Maatullin peruskoulu",
"Malmin peruskoulu",
"Marjatta-koulu",
"Maunulan yhteiskoulu",
"Meilahden yläasteen koulu",
"Merilahden peruskoulu",
"Minervaskolan",
"Munkkiniemen yhteiskoulu",
"Myllypuron peruskoulu",
"Naulakallion koulu",
"Oulunkylän yhteiskoulu",
"Outamon koulu",
"Pakilan yläasteen koulu",
"Pasilan peruskoulu",
"Pitäjänmäen peruskoulu",
"Pohjois-Haagan yhteiskoulu",
"Porolahden peruskoulu",
"Puistolan peruskoulu",
"Puistopolun peruskoulu",
"Pukinmäenkaaren peruskoulu",
"Ressu Comprehensive School",
"Ressun peruskoulu",
"Sakarinmäen peruskoulu",
"Solakallion koulu",
"Sophie Mannerheimin koulu",
"Suomalais-venäläinen koulu",
"Suutarinkylän peruskoulu",
"Taivallahden peruskoulu",
"Toivolan koulu",
"Torpparinmäen peruskoulu",
"Töölön yhteiskoulu",
"Valteri-koulu",
"Vartiokylän yläasteen koulu",
"Vesalan peruskoulu",
"Vuoniityn peruskoulu",
"Yhtenäiskoulu",
"Zacharias Topeliusskolan",
"Åshöjdens grundskola",
"Östersundom skola",
]
def set_real_schools(apps, schema_editor):
school_model = apps.get_model("applications", "School")
with transaction.atomic():
school_model.objects.all().delete()
for school in REAL_SCHOOL_LIST:
school_model.objects.create(name=school)
class Migration(migrations.Migration):
dependencies = [
("applications", "0014_set_default_schools"),
]
operations = [
migrations.RunPython(set_real_schools, migrations.RunPython.noop),
]
| 28.303922
| 74
| 0.692068
|
ea2b909b86a10ecd9b7601b658352748794199cc
| 1,564
|
py
|
Python
|
zwave_js_server/model/log_config.py
|
firstof9/zwave-js-server-python
|
728d1e44277b8c69fac06eba1f8362c281762bf7
|
[
"Apache-2.0"
] | 53
|
2021-01-09T18:47:34.000Z
|
2022-03-16T21:54:41.000Z
|
zwave_js_server/model/log_config.py
|
firstof9/zwave-js-server-python
|
728d1e44277b8c69fac06eba1f8362c281762bf7
|
[
"Apache-2.0"
] | 130
|
2021-01-06T21:34:46.000Z
|
2022-03-29T18:44:14.000Z
|
zwave_js_server/model/log_config.py
|
firstof9/zwave-js-server-python
|
728d1e44277b8c69fac06eba1f8362c281762bf7
|
[
"Apache-2.0"
] | 17
|
2021-01-07T21:55:29.000Z
|
2022-03-29T08:08:50.000Z
|
"""Provide a model for the log config."""
from dataclasses import dataclass
from typing import Optional, TypedDict, cast
from ..const import LogLevel
class LogConfigDataType(TypedDict, total=False):
"""Represent a log config data dict type."""
enabled: bool
level: int
logToFile: bool
filename: str
forceConsole: bool
@dataclass
class LogConfig:
"""Represent a log config dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/log/shared.ts#L85
# Must include at least one key
enabled: Optional[bool] = None
level: Optional[LogLevel] = None
log_to_file: Optional[bool] = None
filename: Optional[str] = None
force_console: Optional[bool] = None
def to_dict(self) -> LogConfigDataType:
"""Return LogConfigDataType dict from self."""
data = {
"enabled": self.enabled,
"level": self.level.value if self.level else None,
"logToFile": self.log_to_file,
"filename": self.filename,
"forceConsole": self.force_console,
}
return cast(LogConfigDataType, {k: v for k, v in data.items() if v is not None})
@classmethod
def from_dict(cls, data: LogConfigDataType) -> "LogConfig":
"""Return LogConfig from LogConfigDataType dict."""
return cls(
data.get("enabled"),
LogLevel(data["level"]) if "level" in data else None,
data.get("logToFile"),
data.get("filename"),
data.get("forceConsole"),
)
| 30.666667
| 95
| 0.629156
|
ccb8e9205e5f29123f69441b5cc34b69fa35a6bb
| 47,131
|
py
|
Python
|
seaborn/matrix.py
|
llzenoll/seaborn
|
56743dbf2663f30bf55949b3dfe984bf6177ba6a
|
[
"BSD-3-Clause"
] | 2
|
2019-01-13T19:21:05.000Z
|
2021-04-01T05:12:15.000Z
|
seaborn/matrix.py
|
llzenoll/seaborn
|
56743dbf2663f30bf55949b3dfe984bf6177ba6a
|
[
"BSD-3-Clause"
] | null | null | null |
seaborn/matrix.py
|
llzenoll/seaborn
|
56743dbf2663f30bf55949b3dfe984bf6177ba6a
|
[
"BSD-3-Clause"
] | null | null | null |
"""Functions to visualize matrices of data."""
from __future__ import division
import itertools
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
from . import cm
from .axisgrid import Grid
from .utils import (despine, axis_ticklabels_overlap, relative_luminance,
to_utf8)
from .external.six import string_types
__all__ = ["heatmap", "clustermap"]
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(to_utf8, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(to_utf8, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.colorConverter.to_rgb
if isinstance(colors, pd.DataFrame):
# Convert dataframe
return pd.DataFrame({col: colors[col].map(to_rgb)
for col in colors})
elif isinstance(colors, pd.Series):
return colors.map(to_rgb)
else:
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
# Get the positions and used label for the ticks
nx, ny = data.T.shape
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, string_types) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, string_types) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Sort out the annotations
if annot is None:
annot = False
annot_data = None
elif isinstance(annot, bool):
if annot:
annot_data = plot_data
else:
annot_data = None
else:
try:
annot_data = annot.values
except AttributeError:
annot_data = annot
if annot.shape != plot_data.shape:
raise ValueError('Data supplied to "annot" must be the same '
'shape as the data to plot.')
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, string_types):
self.cmap = mpl.cm.get_cmap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
height, width = self.annot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
for x, y, m, color, val in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors(),
self.annot_data.flat):
if m is not np.ma.masked:
lum = relative_luminance(color)
text_color = ".15" if lum > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
ax.text(x, y, annotation, **text_kwargs)
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
# Possibly add a colorbar
if self.cbar:
cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# If rasterized is passed to pcolormesh, also rasterize the
# colorbar to avoid white lines on the PDF rendering
if kws.get('rasterized', False):
cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, string_types) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, string_types) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
if hasattr(ax.figure.canvas, "get_renderer"):
ax.figure.draw(ax.figure.canvas.get_renderer())
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None, **kwargs):
"""Plot rectangular data as a color-encoded matrix.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments.
cmap : matplotlib colormap name or object, or list of colors, optional
The mapping from data values to color space. If not provided, the
default will depend on whether ``center`` is set.
center : float, optional
The value at which to center the colormap when plotting divergant data.
Using this parameter will change the default ``cmap`` if none is
specified.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool or rectangular dataset, optional
If True, write the data value in each cell. If an array-like with the
same shape as ``data``, then use this to annotate the heatmap instead
of the raw data.
fmt : string, optional
String formatting code to use when adding annotations.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
xticklabels, yticklabels : "auto", bool, list-like, or int, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label. If "auto", try to densely plot non-overlapping labels.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
See also
--------
clustermap : Plot a matrix using hierachical clustering to arrange the
rows and columns.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0 with a diverging colormap:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data, center=0)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["January", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
self.dependent_coord = self.dendrogram['dcoord']
self.independent_coord = self.dendrogram['icoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
linkage = hierarchy.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
# vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
linkage = fastcluster.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
line_kwargs = dict(linewidths=.5, colors='k')
if self.rotate and self.axis == 0:
lines = LineCollection([list(zip(x, y))
for x, y in zip(self.dependent_coord,
self.independent_coord)],
**line_kwargs)
else:
lines = LineCollection([list(zip(x, y))
for x, y in zip(self.independent_coord,
self.dependent_coord)],
**line_kwargs)
ax.add_collection(lines)
number_of_leaves = len(self.reordered_ind)
max_dependent_coord = max(map(max, self.dependent_coord))
if self.rotate:
ax.yaxis.set_ticks_position('right')
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_ylim(0, number_of_leaves * 10)
ax.set_xlim(0, max_dependent_coord * 1.05)
ax.invert_xaxis()
ax.invert_yaxis()
else:
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_xlim(0, number_of_leaves * 10)
ax.set_ylim(0, max_dependent_coord * 1.05)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
if hasattr(ax.figure.canvas, "get_renderer"):
ax.figure.draw(ax.figure.canvas.get_renderer())
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
self.row_colors, self.row_color_labels = \
self._preprocess_colors(data, row_colors, axis=0)
self.col_colors, self.col_color_labels = \
self._preprocess_colors(data, col_colors, axis=1)
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2])
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1])
self.ax_row_dendrogram.set_axis_off()
self.ax_col_dendrogram.set_axis_off()
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def _preprocess_colors(self, data, colors, axis):
"""Preprocess {row/col}_colors to extract labels and convert colors."""
labels = None
if colors is not None:
if isinstance(colors, (pd.DataFrame, pd.Series)):
# Ensure colors match data indices
if axis == 0:
colors = colors.ix[data.index]
else:
colors = colors.ix[data.columns]
# Replace na's with background color
# TODO We should set these to transparent instead
colors = colors.fillna('white')
# Extract color values and labels from frame/series
if isinstance(colors, pd.DataFrame):
labels = list(colors.columns)
colors = colors.T.values
else:
if colors.name is None:
labels = [""]
else:
labels = [colors.name]
colors = colors.values
colors = _convert_colors(colors)
return colors, labels
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('center', None)
kws.pop('annot', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('robust', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
# Plot the row colors
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
# Get row_color labels
if self.row_color_labels is not None:
row_color_labels = self.row_color_labels
else:
row_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=row_color_labels, yticklabels=False, **kws)
# Adjust rotation of labels
if row_color_labels is not False:
plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)
else:
despine(self.ax_row_colors, left=True, bottom=True)
# Plot the column colors
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
# Get col_color labels
if self.col_color_labels is not None:
col_color_labels = self.col_color_labels
else:
col_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=col_color_labels, **kws)
# Adjust rotation of labels, place on right side
if col_color_labels is not False:
self.ax_col_colors.yaxis.tick_right()
plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", "auto")
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", "auto")
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, **kws)
ytl = self.ax_heatmap.get_yticklabels()
ytl_rot = None if not ytl else ytl[0].get_rotation()
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
if ytl_rot is not None:
ytl = self.ax_heatmap.get_yticklabels()
plt.setp(ytl, rotation=ytl_rot)
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a matrix dataset as a hierarchically-clustered heatmap.
Parameters
----------
data: 2D array-like
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
To use different metrics (or methods) for rows and columns, you may
construct each linkage matrix yourself and provide them as
{row,col}_linkage.
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like or pandas DataFrame/Series, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists or DataFrame for multiple color levels of labeling.
If given as a DataFrame or Series, labels for the colors are extracted
from the DataFrames column names or from the name of the Series.
DataFrame/Series colors are also matched to the data by their
index, ensuring colors are drawn in the correct order.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
Plot a clustered heatmap:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> iris = sns.load_dataset("iris")
>>> species = iris.pop("species")
>>> g = sns.clustermap(iris)
Use a different similarity metric:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, metric="correlation")
Use a different clustering method:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, method="single")
Use a different colormap and ignore outliers in colormap limits:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, cmap="mako", robust=True)
Change the size of the figure:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, figsize=(6, 7))
Plot one of the axes in its original organization:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, col_cluster=False)
Add colored labels:
.. plot::
:context: close-figs
>>> lut = dict(zip(species.unique(), "rbg"))
>>> row_colors = species.map(lut)
>>> g = sns.clustermap(iris, row_colors=row_colors)
Standardize the data within the columns:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, standard_scale=1)
Normalize the data within the rows:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, z_score=0)
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
**kwargs)
| 36.198925
| 97
| 0.592179
|
c6e31c27fbb5c2121b5ac1d8cbc87c8f10872a92
| 584
|
py
|
Python
|
mining/compute_probabilities_of_finality.py
|
kevaundray/research
|
16f20848c614b580071fed3d2ff1dc69688fa4f4
|
[
"MIT"
] | 1,351
|
2015-09-22T08:17:10.000Z
|
2022-03-31T22:48:07.000Z
|
mining/compute_probabilities_of_finality.py
|
kevaundray/research
|
16f20848c614b580071fed3d2ff1dc69688fa4f4
|
[
"MIT"
] | 42
|
2016-08-31T14:43:29.000Z
|
2021-12-05T23:10:31.000Z
|
mining/compute_probabilities_of_finality.py
|
LaudateCorpus1/research
|
6e8b7b367e7f1b18b4b92151df01dfeaa0774a23
|
[
"MIT"
] | 334
|
2015-09-20T10:15:23.000Z
|
2022-03-28T17:46:57.000Z
|
import math
BLKTIME = 17
X = 0.28
faclog = [1]
for i in range(5000):
faclog.append(faclog[-1] * len(faclog))
def fac(x):
return faclog[x]
def poisson(expected, actual):
if expected == 0:
return 1 if actual == 0 else 0
return 2.718281828 ** (-expected + actual * math.log(expected) - math.log(fac(actual)))
def p_we_win(k, x):
return 1 - (x / (1.0 - x)) ** k
def p_we_win_after(s):
p = 0
for i in range(4000):
p += poisson(s * 1.0 / BLKTIME, i) * p_we_win(i, X)
return p
for i in range(0, 7200, 12):
print i, p_we_win_after(i)
| 20.857143
| 91
| 0.590753
|
66409d417fd78fc45ab30d9be9a83009f9b61d51
| 11,333
|
py
|
Python
|
src/mp_api/core/client.py
|
jmmshn/api
|
5254a453f6ec749793639e4ec08bea14628c7dc3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/mp_api/core/client.py
|
jmmshn/api
|
5254a453f6ec749793639e4ec08bea14628c7dc3
|
[
"BSD-3-Clause-LBNL"
] | 159
|
2020-11-16T16:02:31.000Z
|
2022-03-28T15:03:38.000Z
|
src/mp_api/core/client.py
|
jmmshn/api
|
5254a453f6ec749793639e4ec08bea14628c7dc3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# coding: utf-8
"""
This module provides classes to interface with the Materials Project REST
API v3 to enable the creation of data structures and pymatgen objects using
Materials Project data.
"""
import json
import platform
import sys
from json import JSONDecodeError
from typing import Dict, Optional, List, Union
from urllib.parse import urljoin
from os import environ
import warnings
import requests
from monty.json import MontyDecoder
from pymatgen import __version__ as pmg_version # type: ignore
from requests.exceptions import RequestException
from pydantic import BaseModel
# TODO: think about how to migrate from PMG_MAPI_KEY
DEFAULT_API_KEY = environ.get("MP_API_KEY", None)
DEFAULT_ENDPOINT = environ.get("MP_API_ENDPOINT", "https://api.materialsproject.org/")
class BaseRester:
"""
Base client class with core stubs
"""
suffix: Optional[str] = None
document_model: Optional[BaseModel] = None
def __init__(
self,
api_key=DEFAULT_API_KEY,
endpoint=DEFAULT_ENDPOINT,
debug=True,
version=None,
include_user_agent=True,
):
"""
Args:
api_key (str): A String API key for accessing the MaterialsProject
REST interface. Please obtain your API key at
https://www.materialsproject.org/dashboard. If this is None,
the code will check if there is a "PMG_MAPI_KEY" setting.
If so, it will use that environment variable. This makes
easier for heavy users to simply add this environment variable to
their setups and MPRester can then be called without any arguments.
endpoint (str): Url of endpoint to access the MaterialsProject REST
interface. Defaults to the standard Materials Project REST
address at "https://api.materialsproject.org", but
can be changed to other urls implementing a similar interface.
version (Optional[str]): Specify the database snapshot to query.
include_user_agent (bool): If True, will include a user agent with the
HTTP request including information on pymatgen and system version
making the API request. This helps MP support pymatgen users, and
is similar to what most web browsers send with each page request.
Set to False to disable the user agent.
"""
self.api_key = api_key
self.endpoint = endpoint
self.debug = debug
self.version = version
if self.suffix:
self.endpoint = urljoin(self.endpoint, self.suffix)
if not self.endpoint.endswith("/"):
self.endpoint += "/"
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
if include_user_agent:
pymatgen_info = "pymatgen/" + pmg_version
python_info = "Python/{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
platform_info = "{}/{}".format(platform.system(), platform.release())
self.session.headers["user-agent"] = "{} ({} {})".format(
pymatgen_info, python_info, platform_info
)
def __enter__(self):
"""
Support for "with" context.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Support for "with" context.
"""
self.session.close()
def _make_request(self, sub_url, monty_decode: bool = True):
"""
Helper function to make requests
Arguments:
sub_url: the URL to request
monty_decode: Decode the data using monty into python objects
"""
print("_make_request is going away", sub_url)
if not self.endpoint.endswith("/"):
self.endpoint += "/"
url = self.endpoint + sub_url
if self.debug:
print(f"URL: {url}")
try:
response = self.session.get(url, verify=True)
if response.status_code == 200:
if monty_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
return data
else:
try:
data = json.loads(response.text)["detail"]
except (JSONDecodeError, KeyError):
data = "Response {}".format(response.text)
if isinstance(data, str):
message = data
else:
try:
message = ", ".join(
"{} - {}".format(entry["loc"][1], entry["msg"])
for entry in data
)
except (KeyError, IndexError):
message = str(data)
raise MPRestError(
f"REST query returned with error status code {response.status_code} "
f"on URL {response.url} with message:\n{message}"
)
except RequestException as ex:
raise MPRestError(str(ex))
def _query_resource(
self,
criteria: Optional[Dict] = None,
fields: Optional[List[str]] = None,
monty_decode: bool = True,
suburl: Optional[str] = None,
):
"""
Query the endpoint for a Resource containing a list of documents
and meta information about pagination and total document count.
For the end-user, methods .query() and .count() are intended to be
easier to use.
Arguments:
criteria: dictionary of criteria to filter down
fields: list of fields to return
monty_decode: Decode the data using monty into python objects
suburl: make a request to a specified sub-url
Returns:
A Resource, a dict with two keys, "data" containing a list of documents, and
"meta" containing meta information, e.g. total number of documents
available.
"""
if criteria:
criteria = {k: v for k, v in criteria.items() if v is not None}
else:
criteria = {}
if fields:
criteria["fields"] = ",".join(fields)
try:
url = self.endpoint
if suburl:
url = urljoin(self.endpoint, suburl)
if not url.endswith("/"):
url += "/"
response = self.session.get(url, verify=True, params=criteria)
if response.status_code == 200:
if monty_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
if self.document_model:
data["data"] = [self.document_model.construct(**d) for d in data["data"]] # type: ignore
return data
else:
try:
data = json.loads(response.text)["detail"]
except (JSONDecodeError, KeyError):
data = "Response {}".format(response.text)
if isinstance(data, str):
message = data
else:
try:
message = ", ".join(
"{} - {}".format(entry["loc"][1], entry["msg"])
for entry in data
)
except (KeyError, IndexError):
message = str(data)
raise MPRestError(
f"REST query returned with error status code {response.status_code} "
f"on URL {response.url} with message:\n{message}"
)
except RequestException as ex:
raise MPRestError(str(ex))
def query(
self,
criteria: Optional[Dict] = None,
fields: Optional[List[str]] = None,
monty_decode: bool = True,
suburl: Optional[str] = None,
):
"""
Query the endpoint for a list of documents.
Arguments:
criteria: dictionary of criteria to filter down
fields: list of fields to return
monty_decode: Decode the data using monty into python objects
suburl: make a request to a specified sub-url
Returns:
A list of documents
"""
return self._query_resource(
criteria=criteria, fields=fields, monty_decode=monty_decode, suburl=suburl
).get("data")
def query_by_task_id(
self,
task_id,
fields: Optional[List[str]] = None,
monty_decode: bool = True,
version: Optional[str] = None,
):
"""
Query the endpoint for a single document.
Arguments:
task_id: a task_id key
fields: list of fields to return
monty_decode: Decode the data using monty into python objects
Returns:
A single document.
"""
if fields is None:
criteria = {"all_fields": True, "limit": 1} # type: dict
else:
criteria = {"limit": 1}
if version:
criteria["version"] = version
if isinstance(fields, str):
fields = (fields,)
results = self.query(
criteria=criteria, fields=fields, monty_decode=monty_decode, suburl=task_id
)
if not results:
warnings.warn(f"No result for record {task_id}.")
return
elif len(results) > 1:
raise ValueError(
f"Multiple records for {task_id}, this shouldn't happen. Please report as a bug."
)
else:
return results[0]
def count(self, criteria: Optional[Dict] = None) -> Union[int, str]:
"""
Return a count of total documents.
:param criteria: As in .query()
:return:
"""
try:
criteria = criteria or {}
criteria[
"limit"
] = 1 # we just want the meta information, only ask for single document
results = self._query_resource(
criteria=criteria, monty_decode=False
) # do not waste cycles Monty decoding
return results["meta"]["total"]
except Exception:
return "unknown"
@property
def available_fields(self) -> List[str]:
if self.document_model is None:
return ["Unknown fields."]
return list(self.document_model().fields.keys()) # type: ignore
def __repr__(self):
return f"<{self.__class__.__name__} {self.endpoint}>"
def __str__(self):
if self.document_model is None:
return self.__repr__()
return (
f"{self.__class__.__name__} connected to {self.endpoint}\n\n"
f"Available fields: {', '.join(self.available_fields)}\n\n"
f"Available documents: {self.count()}"
)
class MPRestError(Exception):
"""
Raised when the query has problems, e.g., bad query format.
"""
| 33.430678
| 109
| 0.552016
|
a4cb52605fe8e8c60ad805fef6821b8c6f16b98e
| 846
|
py
|
Python
|
catalog/forms.py
|
T1mL3arn/MDN-Django-Tutorial
|
4738e7acce61a163f119817a35b22c5c3b98bf5a
|
[
"MIT"
] | 1
|
2019-01-10T10:45:48.000Z
|
2019-01-10T10:45:48.000Z
|
catalog/forms.py
|
T1mL3arn/MDN-Django-Tutorial
|
4738e7acce61a163f119817a35b22c5c3b98bf5a
|
[
"MIT"
] | 3
|
2020-02-12T03:10:44.000Z
|
2021-06-20T05:55:51.000Z
|
catalog/forms.py
|
T1mL3arn/MDN-Django-Tutorial
|
4738e7acce61a163f119817a35b22c5c3b98bf5a
|
[
"MIT"
] | null | null | null |
import datetime
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
class RenewBookForm(forms.Form):
renewal_date = forms.DateField(help_text='Enter a date between now and 4 weeks (default 3).')
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
today = datetime.date.today()
# Check if a date is not in the past.
if data < today:
raise ValidationError(_('Invalid date - renewal in past'))
# Check if a date is in the allowed range (+4 weeks from today).
if data > today + datetime.timedelta(weeks=4):
raise ValidationError(_('Invalid date - renewal more than 4 weeks ahead'))
# Remember to always return the cleaned data.
return data
| 35.25
| 97
| 0.676123
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.