hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9629e7a7a9bef340102ccfb4ccbdd868576c42f6
| 7,336
|
py
|
Python
|
cirq-core/cirq/sim/clifford/act_on_clifford_tableau_args.py
|
livenson/Cirq
|
b2fa642895089fba385999d675ab65d57a53e0df
|
[
"Apache-2.0"
] | 1
|
2022-02-02T07:13:54.000Z
|
2022-02-02T07:13:54.000Z
|
cirq-core/cirq/sim/clifford/act_on_clifford_tableau_args.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/sim/clifford/act_on_clifford_tableau_args.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A protocol for implementing high performance clifford tableau evolutions
for Clifford Simulator."""
from typing import Any, Dict, TYPE_CHECKING, List, Sequence
import numpy as np
from cirq.ops import common_gates
from cirq.ops import global_phase_op
from cirq.sim.clifford.act_on_stabilizer_args import ActOnStabilizerArgs
if TYPE_CHECKING:
import cirq
class ActOnCliffordTableauArgs(ActOnStabilizerArgs):
"""State and context for an operation acting on a clifford tableau."""
def __init__(
self,
tableau: 'cirq.CliffordTableau',
prng: np.random.RandomState,
log_of_measurement_results: Dict[str, Any],
qubits: Sequence['cirq.Qid'] = None,
):
"""Inits ActOnCliffordTableauArgs.
Args:
tableau: The CliffordTableau to act on. Operations are expected to
perform inplace edits of this object.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
prng: The pseudo random number generator to use for probabilistic
effects.
log_of_measurement_results: A mutable object that measurements are
being recorded into.
"""
super().__init__(prng, qubits, log_of_measurement_results)
self.tableau = tableau
def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:
"""Returns the measurement from the tableau."""
return [self.tableau._measure(self.qubit_map[q], self.prng) for q in qubits]
| 40.98324
| 91
| 0.594193
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A protocol for implementing high performance clifford tableau evolutions
for Clifford Simulator."""
from typing import Any, Dict, TYPE_CHECKING, List, Sequence
import numpy as np
from cirq.ops import common_gates
from cirq.ops import global_phase_op
from cirq.sim.clifford.act_on_stabilizer_args import ActOnStabilizerArgs
if TYPE_CHECKING:
import cirq
class ActOnCliffordTableauArgs(ActOnStabilizerArgs):
"""State and context for an operation acting on a clifford tableau."""
def __init__(
self,
tableau: 'cirq.CliffordTableau',
prng: np.random.RandomState,
log_of_measurement_results: Dict[str, Any],
qubits: Sequence['cirq.Qid'] = None,
):
"""Inits ActOnCliffordTableauArgs.
Args:
tableau: The CliffordTableau to act on. Operations are expected to
perform inplace edits of this object.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
prng: The pseudo random number generator to use for probabilistic
effects.
log_of_measurement_results: A mutable object that measurements are
being recorded into.
"""
super().__init__(prng, qubits, log_of_measurement_results)
self.tableau = tableau
def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:
"""Returns the measurement from the tableau."""
return [self.tableau._measure(self.qubit_map[q], self.prng) for q in qubits]
def _on_copy(self, target: 'ActOnCliffordTableauArgs', deep_copy_buffers: bool = True):
target.tableau = self.tableau.copy()
def sample(
self,
qubits: Sequence['cirq.Qid'],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
# Unnecessary for now but can be added later if there is a use case.
raise NotImplementedError()
def _x(self, g: common_gates.XPowGate, axis: int):
exponent = g.exponent
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('X exponent must be half integer') # coverage: ignore
tableau = self.tableau
effective_exponent = exponent % 2
if effective_exponent == 0.5:
tableau.xs[:, axis] ^= tableau.zs[:, axis]
tableau.rs[:] ^= tableau.xs[:, axis] & tableau.zs[:, axis]
elif effective_exponent == 1:
tableau.rs[:] ^= tableau.zs[:, axis]
elif effective_exponent == 1.5:
tableau.rs[:] ^= tableau.xs[:, axis] & tableau.zs[:, axis]
tableau.xs[:, axis] ^= tableau.zs[:, axis]
def _y(self, g: common_gates.YPowGate, axis: int):
exponent = g.exponent
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('Y exponent must be half integer') # coverage: ignore
tableau = self.tableau
effective_exponent = exponent % 2
if effective_exponent == 0.5:
tableau.rs[:] ^= tableau.xs[:, axis] & (~tableau.zs[:, axis])
(tableau.xs[:, axis], tableau.zs[:, axis]) = (
tableau.zs[:, axis].copy(),
tableau.xs[:, axis].copy(),
)
elif effective_exponent == 1:
tableau.rs[:] ^= tableau.xs[:, axis] ^ tableau.zs[:, axis]
elif effective_exponent == 1.5:
tableau.rs[:] ^= ~(tableau.xs[:, axis]) & tableau.zs[:, axis]
(tableau.xs[:, axis], tableau.zs[:, axis]) = (
tableau.zs[:, axis].copy(),
tableau.xs[:, axis].copy(),
)
def _z(self, g: common_gates.ZPowGate, axis: int):
exponent = g.exponent
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('Z exponent must be half integer') # coverage: ignore
tableau = self.tableau
effective_exponent = exponent % 2
if effective_exponent == 0.5:
tableau.rs[:] ^= tableau.xs[:, axis] & tableau.zs[:, axis]
tableau.zs[:, axis] ^= tableau.xs[:, axis]
elif effective_exponent == 1:
tableau.rs[:] ^= tableau.xs[:, axis]
elif effective_exponent == 1.5:
tableau.rs[:] ^= tableau.xs[:, axis] & (~tableau.zs[:, axis])
tableau.zs[:, axis] ^= tableau.xs[:, axis]
def _h(self, g: common_gates.HPowGate, axis: int):
exponent = g.exponent
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('H exponent must be integer') # coverage: ignore
self._y(common_gates.YPowGate(exponent=0.5), axis)
self._x(common_gates.XPowGate(), axis)
def _cz(self, g: common_gates.CZPowGate, control_axis: int, target_axis: int):
exponent = g.exponent
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('CZ exponent must be integer') # coverage: ignore
tableau = self.tableau
(tableau.xs[:, target_axis], tableau.zs[:, target_axis]) = (
tableau.zs[:, target_axis].copy(),
tableau.xs[:, target_axis].copy(),
)
tableau.rs[:] ^= tableau.xs[:, target_axis] & tableau.zs[:, target_axis]
tableau.rs[:] ^= (
tableau.xs[:, control_axis]
& tableau.zs[:, target_axis]
& (~(tableau.xs[:, target_axis] ^ tableau.zs[:, control_axis]))
)
tableau.xs[:, target_axis] ^= tableau.xs[:, control_axis]
tableau.zs[:, control_axis] ^= tableau.zs[:, target_axis]
(tableau.xs[:, target_axis], tableau.zs[:, target_axis]) = (
tableau.zs[:, target_axis].copy(),
tableau.xs[:, target_axis].copy(),
)
tableau.rs[:] ^= tableau.xs[:, target_axis] & tableau.zs[:, target_axis]
def _cx(self, g: common_gates.CXPowGate, control_axis: int, target_axis: int):
exponent = g.exponent
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('CX exponent must be integer') # coverage: ignore
tableau = self.tableau
tableau.rs[:] ^= (
tableau.xs[:, control_axis]
& tableau.zs[:, target_axis]
& (~(tableau.xs[:, target_axis] ^ tableau.zs[:, control_axis]))
)
tableau.xs[:, target_axis] ^= tableau.xs[:, control_axis]
tableau.zs[:, control_axis] ^= tableau.zs[:, target_axis]
def _global_phase(self, g: global_phase_op.GlobalPhaseGate):
pass
| 4,852
| 0
| 243
|
b9ae65bc8f8cdccfa8ec57ea6f590f702a485148
| 5,875
|
py
|
Python
|
openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0005.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0005.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0005.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : python驱动pyog
Case Name : 普通模式连接数据库,连接信息错误
Description :
1.创建库、用户,用户密码不含特殊字符,并赋权
2.配置pg_hba入口
3.连接数据库
Expect :
1.执行成功
2.执行成功
3.连接失败,有相应提示信息
History :
"""
import os
import re
import unittest
import py_opengauss
from py_opengauss.exceptions import AuthenticationSpecificationError, \
ClientCannotConnectError, ConnectionRejectionError, ServerNotReadyError
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
| 37.660256
| 84
| 0.621787
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : python驱动pyog
Case Name : 普通模式连接数据库,连接信息错误
Description :
1.创建库、用户,用户密码不含特殊字符,并赋权
2.配置pg_hba入口
3.连接数据库
Expect :
1.执行成功
2.执行成功
3.连接失败,有相应提示信息
History :
"""
import os
import re
import unittest
import py_opengauss
from py_opengauss.exceptions import AuthenticationSpecificationError, \
ClientCannotConnectError, ConnectionRejectionError, ServerNotReadyError
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class ConnPython5(unittest.TestCase):
def setUp(self):
self.pri_user = Node('PrimaryDbUser')
self.pri_sh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.LOG = Logger()
self.db_name = 'py_db'
self.db_user = 'py_user'
text = '----Opengauss_Function_Connect_Python_Case0005 start----'
self.LOG.info(text)
def test_conn(self):
text = '----step1: 创建库、用户,用户密码不含特殊字符,并赋权 expect: 成功----'
self.LOG.info(text)
sql_cmd = f"drop database if exists {self.db_name}; " \
f"create database {self.db_name}; " \
f"create user {self.db_user} with password " \
f"'{macro.PASSWD_INITIAL}'; " \
f"grant all privileges on database {self.db_name} " \
f"to {self.db_user};"
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.LOG.info(sql_res)
expect = f'{self.constant.DROP_DATABASE_SUCCESS}(.*)' \
f'{self.constant.CREATE_DATABASE_SUCCESS}(.*)' \
f'{self.constant.CREATE_ROLE_SUCCESS_MSG}(.*)' \
f'{self.constant.GRANT_SUCCESS_MSG}'
regex_res = re.search(expect, sql_res, re.S)
self.assertIsNotNone(regex_res, text)
text = '----step2: 配置pg_hba入口 expect: 成功----'
self.LOG.info(text)
host_cmd = "ifconfig -a|grep inet6 -a2|" \
"grep broadcast|awk '{print $2}'"
self.host = os.popen(host_cmd).readlines()[0].strip()
guc_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_guc reload -D {macro.DB_INSTANCE_PATH} ' \
f'-h "host {self.db_name} {self.db_user} ' \
f'{self.host}/32 sha256"'
self.LOG.info(guc_cmd)
guc_res = self.pri_user.sh(guc_cmd).result()
self.LOG.info(guc_res)
self.assertIn(self.constant.GSGUC_SUCCESS_MSG, guc_res, text)
text = '----step3: 连接数据库 expect: 连接失败,有相应提示信息----'
self.LOG.info(text)
conn_info = f'pq://er_user:{macro.PASSWD_INITIAL}@' \
f'{self.pri_user.db_host}:{self.pri_user.db_port}/' \
f'{self.db_name}'
with self.assertRaises(
(ClientCannotConnectError, AuthenticationSpecificationError)):
py_opengauss.open(conn_info)
conn_info = f'pq://{self.db_user}:er_password@' \
f'{self.pri_user.db_host}:{self.pri_user.db_port}/' \
f'{self.db_name}'
with self.assertRaises(
(ClientCannotConnectError, AuthenticationSpecificationError)):
py_opengauss.open(conn_info)
conn_info = f'pq://{self.db_user}:{macro.PASSWD_INITIAL}@' \
f'10.10.10.10:{self.pri_user.db_port}/' \
f'{self.db_name}'
with self.assertRaises(
(ClientCannotConnectError, ConnectionRejectionError)):
py_opengauss.open(conn_info)
er_port = str(int(self.pri_user.db_port) + 1)
conn_info = f'pq://{self.db_user}:{macro.PASSWD_INITIAL}@' \
f'{self.pri_user.db_host}:{er_port}/' \
f'{self.db_name}'
with self.assertRaises(
(ClientCannotConnectError, ServerNotReadyError)):
py_opengauss.open(conn_info)
conn_info = f'pq://{self.db_user}:{macro.PASSWD_INITIAL}@' \
f'{self.pri_user.db_host}:{self.pri_user.db_port}/' \
f'er_db'
with self.assertRaises(
(ClientCannotConnectError, AuthenticationSpecificationError)):
py_opengauss.open(conn_info)
def tearDown(self):
text = '----run teardown----'
self.LOG.info(text)
text_1 = '----还原pg_hba入口 expect: 成功----'
self.LOG.info(text_1)
guc_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_guc reload -D {macro.DB_INSTANCE_PATH} ' \
f'-h "host {self.db_name} {self.db_user} ' \
f'{self.host}/32"'
self.LOG.info(guc_cmd)
guc_res = self.pri_user.sh(guc_cmd).result()
self.LOG.info(guc_res)
text_2 = '----删除库、用户,用户密码 expect: 成功----'
self.LOG.info(text_2)
sql_cmd = f'drop database if exists {self.db_name}; ' \
f'drop user if exists {self.db_user};'
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.LOG.info(sql_res)
text = '----Opengauss_Function_Connect_Python_Case0005 end----'
self.LOG.info(text)
text = '----断言teardown执行成功----'
self.LOG.info(text)
self.assertIn(self.constant.GSGUC_SUCCESS_MSG, guc_res, text_1)
expect = f'{self.constant.DROP_DATABASE_SUCCESS}(.*)' \
f'{self.constant.DROP_ROLE_SUCCESS_MSG}'
regex_res = re.match(expect, sql_res, re.S)
self.assertIsNotNone(regex_res, text_2)
| 4,784
| 16
| 103
|
961d257aa7ee40fc569bdad4eceb6e9d5d1eb49c
| 6,178
|
py
|
Python
|
main/environment.py
|
zzong2006/space-filling-curve-with-RF-learning
|
30823745dae91240c0977185fb1831c9b4771a40
|
[
"MIT"
] | 1
|
2021-05-15T23:27:58.000Z
|
2021-05-15T23:27:58.000Z
|
main/environment.py
|
zzong2006/space-filling-curve-with-RF-learning
|
30823745dae91240c0977185fb1831c9b4771a40
|
[
"MIT"
] | null | null | null |
main/environment.py
|
zzong2006/space-filling-curve-with-RF-learning
|
30823745dae91240c0977185fb1831c9b4771a40
|
[
"MIT"
] | null | null | null |
from itertools import combinations
import numpy as np
import matplotlib.pyplot as plt
from utils import *
import configparser
import torch
class CurveEnvironment:
"""
Curve 환경, 강화 학습 모델이 생성한 action을 기반으로 커브의 순서를 바꾸고, 바뀐 커브를 기반으로 reward를 측정
"""
def __init__(self, order=3, dim=2, data_size=10, init_curve='zig-zag', normalize=True, life=10, seed=1234):
"""
:param order: Curve iteration 개수
:param dim: 차원 수
:param data_size: 활성화 데이터 개수
:param init_curve: 초기 커브, 이 커브의 순서를 바꾸면서 최적의 커브를 찾음
:param normalize: 주어진 coordinate를 normalize 할것인지?
:param life: 한 episode 당 주어지는 목숨
:param seed: 활성화 데이터 생성 시드
"""
self.order = order
self.dim = dim
self.data_size = data_size
self.total_grid = 2 ** (order * dim)
self.side = int(np.sqrt(self.total_grid)) # grid 세로 또는 가로 개수
self.init_curve = init_curve
self.normalized = normalize
self.debug = dict() # 디버그용 정보가 담긴 dictionary. 주로, cost 정보를 담음
np.random.seed(seed)
# 임의의 데이터 분포 생성
self.data_index = np.random.choice(self.total_grid, size=data_size, replace=False)
self.data_coord = np.array(
list(map(lambda x: list([x // self.side, x % self.side]), self.data_index))) # 생성된 데이터의 좌표 구성
# episode 종료 기준
self.life = life # life 가 0에 도달하면 episode 종료
self.ori_life = life
# 커브 생성
self.curve_coord = self.reset()
# reward 측정용 기준
self.min_cost = self.get_l2_norm_locality()
self.prev_cost = self.min_cost
@staticmethod
def build_init_coords(self):
"""
초기 곡선 타입에 따른 n 차원 좌표 list를 만드는 함수, list 내 좌표 배치 순서는 곡선 타입을 따름
:return:
"""
coords = None
try:
if self.init_curve == 'zig-zag':
whole_index = np.arange(self.total_grid)
coords = np.array(list(map(lambda x: list([x // self.side, x % self.side]), whole_index)))
elif self.init_curve == 'hilbert':
coords = HilbertCurve(dimension=self.dim).getCoords(order=self.order)
elif self.init_curve == 'z':
coords = ZCurve(dimension=self.dim).getCoords(order=self.order)
else:
raise Exception('Curve type must be "zig-zag" or "hilbert" or "z".')
except Exception as e:
print(e)
finally:
return coords
def reset(self):
"""
n 차원 곡선 좌표 list를 생성하고, 해당 좌표의 활성화 데이터 여부를 표시하는 함수
또한 reward 측정을 위한 기준을 초기화함
:return:
"""
self.curve_coord = self.build_init_coords() # 곡선을 n 차원 좌표 list로 구성
avail = np.zeros(shape=(self.total_grid, 1), dtype=np.int)
# 이미 생성된 활성화 데이터의 좌표가 일치되는 곳을 활성화
for index in map(lambda x: np.where(np.all(self.curve_coord == x, axis=1)), self.data_coord):
avail[index] = 1 # 활성화 데이터 여부 표시
self.curve_coord = np.concatenate((avail, self.curve_coord), axis=1)
if self.normalized: # do feature scaling
self.curve_coord = CurveEnvironment.normalize_state(self.curve_coord)
self.min_cost = self.get_l2_norm_locality()
self.prev_cost = self.min_cost
self.life = self.ori_life
return self.curve_coord
def get_l2_norm_locality(self):
"""
l2 norm ratio locality 측정 함수
sum(1 - (l2 norm/ l1 norm)) 의 형태
:return:
"""
avail_data = []
for idx, point in enumerate(self.curve_coord):
if point[0] == 1: # 활성화 데이터인 경우
avail_data.append([point[1], point[2], idx])
cost = 0
# 활성화된 데이터만 모음, 결과는 (x, y, 데이터 순서)
for (x, y) in combinations(avail_data, 2):
dist_2d = np.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2)
dist_1d = np.abs(x[2] - y[2])
# Locality Ratio 가 1과 가까운지 측정
cost += np.abs(1 - (dist_1d / dist_2d))
return cost
def get_reward(self):
"""
보상 측정 함수, l2_norm_locality가 감소한 경우 positive reward를 부여한다. 그 외에는 0 또는 negative reward
:return:
"""
curr_cost = self.get_l2_norm_locality()
reward = 0
self.debug['cost'] = curr_cost
if self.min_cost < curr_cost: # 최소 cost 보다 작아지지 못할 경우
if self.prev_cost < curr_cost:
self.life -= 1
reward = -1
elif self.prev_cost > curr_cost: # 최소 cost 보다 작아지지 못했지만, 이전 커브 cost 보다는 작아졌을 경우
reward = 0
else:
reward = 0
elif self.prev_cost == curr_cost:
reward = 0
else:
reward = max(1, abs(curr_cost - self.min_cost))
self.min_cost = curr_cost # 최소 cost 갱신
self.prev_cost = curr_cost # 이전 cost 갱신
return reward
if '__main__' == __name__:
test_env = CurveEnvironment()
for curve_name in ['z', 'hilbert', 'zig-zag']:
test_env = CurveEnvironment(init_curve=curve_name)
print(test_env.get_l2_norm_locality())
print(test_env.get_reward())
| 33.215054
| 111
| 0.570411
|
from itertools import combinations
import numpy as np
import matplotlib.pyplot as plt
from utils import *
import configparser
import torch
class CurveEnvironment:
"""
Curve 환경, 강화 학습 모델이 생성한 action을 기반으로 커브의 순서를 바꾸고, 바뀐 커브를 기반으로 reward를 측정
"""
def __init__(self, order=3, dim=2, data_size=10, init_curve='zig-zag', normalize=True, life=10, seed=1234):
"""
:param order: Curve iteration 개수
:param dim: 차원 수
:param data_size: 활성화 데이터 개수
:param init_curve: 초기 커브, 이 커브의 순서를 바꾸면서 최적의 커브를 찾음
:param normalize: 주어진 coordinate를 normalize 할것인지?
:param life: 한 episode 당 주어지는 목숨
:param seed: 활성화 데이터 생성 시드
"""
self.order = order
self.dim = dim
self.data_size = data_size
self.total_grid = 2 ** (order * dim)
self.side = int(np.sqrt(self.total_grid)) # grid 세로 또는 가로 개수
self.init_curve = init_curve
self.normalized = normalize
self.debug = dict() # 디버그용 정보가 담긴 dictionary. 주로, cost 정보를 담음
np.random.seed(seed)
# 임의의 데이터 분포 생성
self.data_index = np.random.choice(self.total_grid, size=data_size, replace=False)
self.data_coord = np.array(
list(map(lambda x: list([x // self.side, x % self.side]), self.data_index))) # 생성된 데이터의 좌표 구성
# episode 종료 기준
self.life = life # life 가 0에 도달하면 episode 종료
self.ori_life = life
# 커브 생성
self.curve_coord = self.reset()
# reward 측정용 기준
self.min_cost = self.get_l2_norm_locality()
self.prev_cost = self.min_cost
@staticmethod
def normalize_state(state):
min_val = np.min(state, axis=0, keepdims=True)
max_val = np.max(state, axis=0, keepdims=True)
state = (state - min_val) / (max_val - min_val)
return state
def build_init_coords(self):
"""
초기 곡선 타입에 따른 n 차원 좌표 list를 만드는 함수, list 내 좌표 배치 순서는 곡선 타입을 따름
:return:
"""
coords = None
try:
if self.init_curve == 'zig-zag':
whole_index = np.arange(self.total_grid)
coords = np.array(list(map(lambda x: list([x // self.side, x % self.side]), whole_index)))
elif self.init_curve == 'hilbert':
coords = HilbertCurve(dimension=self.dim).getCoords(order=self.order)
elif self.init_curve == 'z':
coords = ZCurve(dimension=self.dim).getCoords(order=self.order)
else:
raise Exception('Curve type must be "zig-zag" or "hilbert" or "z".')
except Exception as e:
print(e)
finally:
return coords
def reset(self):
"""
n 차원 곡선 좌표 list를 생성하고, 해당 좌표의 활성화 데이터 여부를 표시하는 함수
또한 reward 측정을 위한 기준을 초기화함
:return:
"""
self.curve_coord = self.build_init_coords() # 곡선을 n 차원 좌표 list로 구성
avail = np.zeros(shape=(self.total_grid, 1), dtype=np.int)
# 이미 생성된 활성화 데이터의 좌표가 일치되는 곳을 활성화
for index in map(lambda x: np.where(np.all(self.curve_coord == x, axis=1)), self.data_coord):
avail[index] = 1 # 활성화 데이터 여부 표시
self.curve_coord = np.concatenate((avail, self.curve_coord), axis=1)
if self.normalized: # do feature scaling
self.curve_coord = CurveEnvironment.normalize_state(self.curve_coord)
self.min_cost = self.get_l2_norm_locality()
self.prev_cost = self.min_cost
self.life = self.ori_life
return self.curve_coord
def plot_curve(self, ):
fig, ax = plt.subplots(1, figsize=(10, 10))
show_points(self.data_coord, self.side, ax, index=False)
if self.init_curve == 'hilbert':
temp_curve = HilbertCurve(self.dim)
show_line_by_index_order(np.array(temp_curve.getCoords(self.order)), ax)
elif self.init_curve == 'zig-zag':
grid_index = np.arange(self.total_grid)
show_line_by_index_order(grid_index, ax)
plt.show(block=True)
def get_l2_norm_locality(self):
"""
l2 norm ratio locality 측정 함수
sum(1 - (l2 norm/ l1 norm)) 의 형태
:return:
"""
avail_data = []
for idx, point in enumerate(self.curve_coord):
if point[0] == 1: # 활성화 데이터인 경우
avail_data.append([point[1], point[2], idx])
cost = 0
# 활성화된 데이터만 모음, 결과는 (x, y, 데이터 순서)
for (x, y) in combinations(avail_data, 2):
dist_2d = np.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2)
dist_1d = np.abs(x[2] - y[2])
# Locality Ratio 가 1과 가까운지 측정
cost += np.abs(1 - (dist_1d / dist_2d))
return cost
def get_reward(self):
"""
보상 측정 함수, l2_norm_locality가 감소한 경우 positive reward를 부여한다. 그 외에는 0 또는 negative reward
:return:
"""
curr_cost = self.get_l2_norm_locality()
reward = 0
self.debug['cost'] = curr_cost
if self.min_cost < curr_cost: # 최소 cost 보다 작아지지 못할 경우
if self.prev_cost < curr_cost:
self.life -= 1
reward = -1
elif self.prev_cost > curr_cost: # 최소 cost 보다 작아지지 못했지만, 이전 커브 cost 보다는 작아졌을 경우
reward = 0
else:
reward = 0
elif self.prev_cost == curr_cost:
reward = 0
else:
reward = max(1, abs(curr_cost - self.min_cost))
self.min_cost = curr_cost # 최소 cost 갱신
self.prev_cost = curr_cost # 이전 cost 갱신
return reward
def step(self, action: tuple):
a, b = action
self.curve_coord[[a, b]] = self.curve_coord[[b, a]] # grid 순서 swap
reward = self.get_reward()
done = False
if self.life == 0:
done = True
return self.curve_coord, reward, done, self.debug
if '__main__' == __name__:
test_env = CurveEnvironment()
for curve_name in ['z', 'hilbert', 'zig-zag']:
test_env = CurveEnvironment(init_curve=curve_name)
print(test_env.get_l2_norm_locality())
print(test_env.get_reward())
| 947
| 0
| 80
|
145d5556f43decc68e22fba23de0af8fd6b8a78f
| 806
|
py
|
Python
|
template_engine/jinja2_filters.py
|
samuelcouch/the-blue-alliance
|
8c7977071dbe9d9745ffdfbb03e37bd016792511
|
[
"MIT"
] | null | null | null |
template_engine/jinja2_filters.py
|
samuelcouch/the-blue-alliance
|
8c7977071dbe9d9745ffdfbb03e37bd016792511
|
[
"MIT"
] | null | null | null |
template_engine/jinja2_filters.py
|
samuelcouch/the-blue-alliance
|
8c7977071dbe9d9745ffdfbb03e37bd016792511
|
[
"MIT"
] | null | null | null |
from email import utils
import re
import time
import urllib
def strftime(datetime, formatstr):
"""
Uses Python's strftime with some tweaks
"""
return datetime.strftime(formatstr).lstrip("0").replace(" 0", " ")
# def slugify(s):
# """
# Use Django's slugify method
# """
# return defaultfilters.slugify(s)
| 17.148936
| 70
| 0.611663
|
from email import utils
import re
import time
import urllib
def digits(s):
if not s:
return ''
if type(s) is int:
return s
return re.sub('[^0-9]', '', s)
def floatformat(num, num_decimals):
return "%.{}f".format(num_decimals) % num
def strftime(datetime, formatstr):
"""
Uses Python's strftime with some tweaks
"""
return datetime.strftime(formatstr).lstrip("0").replace(" 0", " ")
def strip_frc(s):
if not s:
return ''
return s[3:]
def urlencode(s):
return urllib.quote(s.encode('utf8'))
def rfc2822(datetime):
tt = datetime.timetuple()
timestamp = time.mktime(tt)
return utils.formatdate(timestamp)
# def slugify(s):
# """
# Use Django's slugify method
# """
# return defaultfilters.slugify(s)
| 345
| 0
| 115
|
4d02e68b8ef0c65c77338fd1b55733f17d364293
| 12,812
|
py
|
Python
|
custom_components/openmediavault/omv_controller.py
|
firstof9/homeassistant-openmediavault
|
3a0cd5ae34b045e90b06b15760329e987c87b5fd
|
[
"Apache-2.0"
] | 41
|
2020-04-20T15:57:14.000Z
|
2022-03-24T18:16:28.000Z
|
custom_components/openmediavault/omv_controller.py
|
firstof9/homeassistant-openmediavault
|
3a0cd5ae34b045e90b06b15760329e987c87b5fd
|
[
"Apache-2.0"
] | 68
|
2020-04-27T08:36:47.000Z
|
2022-03-31T07:04:47.000Z
|
custom_components/openmediavault/omv_controller.py
|
firstof9/homeassistant-openmediavault
|
3a0cd5ae34b045e90b06b15760329e987c87b5fd
|
[
"Apache-2.0"
] | 13
|
2020-09-11T12:45:51.000Z
|
2022-03-30T06:58:05.000Z
|
"""OpenMediaVault Controller."""
import asyncio
import time
from datetime import timedelta
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import DOMAIN
from .helper import parse_api
from .omv_api import OpenMediaVaultAPI
# ---------------------------
# OMVControllerData
# ---------------------------
class OMVControllerData(object):
"""OMVControllerData Class."""
def __init__(self, hass, config_entry):
"""Initialize OMVController."""
self.hass = hass
self.config_entry = config_entry
self.name = config_entry.data[CONF_NAME]
self.host = config_entry.data[CONF_HOST]
self.data = {
"hwinfo": {},
"disk": {},
"fs": {},
# "service": {},
}
self.listeners = []
self.lock = asyncio.Lock()
self.api = OpenMediaVaultAPI(
hass,
config_entry.data[CONF_HOST],
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
config_entry.data[CONF_SSL],
config_entry.data[CONF_VERIFY_SSL],
)
self._force_update_callback = None
self._force_hwinfo_update_callback = None
# ---------------------------
# async_init
# ---------------------------
# ---------------------------
# signal_update
# ---------------------------
@property
def signal_update(self):
"""Event to signal new data."""
return f"{DOMAIN}-update-{self.name}"
# ---------------------------
# async_reset
# ---------------------------
async def async_reset(self):
"""Reset dispatchers."""
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
return True
# ---------------------------
# connected
# ---------------------------
def connected(self):
"""Return connected state."""
return self.api.connected()
# ---------------------------
# force_hwinfo_update
# ---------------------------
@callback
async def force_hwinfo_update(self, _now=None):
"""Trigger update by timer."""
await self.async_hwinfo_update()
# ---------------------------
# async_hwinfo_update
# ---------------------------
async def async_hwinfo_update(self):
"""Update OpenMediaVault hardware info."""
try:
await asyncio.wait_for(self.lock.acquire(), timeout=30)
except:
return
await self.hass.async_add_executor_job(self.get_hwinfo)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_disk)
self.lock.release()
# ---------------------------
# force_update
# ---------------------------
@callback
async def force_update(self, _now=None):
"""Trigger update by timer."""
await self.async_update()
# ---------------------------
# async_update
# ---------------------------
async def async_update(self):
"""Update OMV data."""
if self.api.has_reconnected():
await self.async_hwinfo_update()
try:
await asyncio.wait_for(self.lock.acquire(), timeout=10)
except:
return
await self.hass.async_add_executor_job(self.get_hwinfo)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_fs)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_smart)
# await self.hass.async_add_executor_job(self.get_service)
async_dispatcher_send(self.hass, self.signal_update)
self.lock.release()
# ---------------------------
# get_hwinfo
# ---------------------------
def get_hwinfo(self):
"""Get hardware info from OMV."""
self.data["hwinfo"] = parse_api(
data=self.data["hwinfo"],
source=self.api.query("System", "getInformation"),
vals=[
{"name": "hostname", "default": "unknown"},
{"name": "version", "default": "unknown"},
{"name": "cpuUsage", "default": 0},
{"name": "memTotal", "default": 0},
{"name": "memUsed", "default": 0},
{"name": "uptime", "default": "0 days 0 hours 0 minutes 0 seconds"},
{"name": "configDirty", "type": "bool", "default": False},
{"name": "rebootRequired", "type": "bool", "default": False},
{"name": "pkgUpdatesAvailable", "type": "bool", "default": False},
],
ensure_vals=[{"name": "memUsage", "default": 0}],
)
if int(self.data["hwinfo"]["version"].split(".")[0])>5:
tmp = self.data["hwinfo"]["uptime"]
pos = abs( int(tmp) )
day = pos / (3600*24)
rem = pos % (3600*24)
hour = rem / 3600
rem = rem % 3600
mins = rem / 60
secs = rem % 60
res = '%d days %02d hours %02d minutes %02d seconds' % (day, hour, mins, secs)
if int(tmp) < 0:
res = "-%s" % res
tmp = res.split(" ")
else:
tmp = self.data["hwinfo"]["uptime"].split(" ")
self.data["hwinfo"]["uptimeEpoch"] = int(tmp[0]) * 24 + int(tmp[2])
self.data["hwinfo"]["cpuUsage"] = round(self.data["hwinfo"]["cpuUsage"], 1)
if int(self.data["hwinfo"]["memTotal"]) > 0:
mem = (
int(self.data["hwinfo"]["memUsed"])
/ int(self.data["hwinfo"]["memTotal"])
) * 100
else:
mem = 0
self.data["hwinfo"]["memUsage"] = round(mem, 1)
# ---------------------------
# get_disk
# ---------------------------
def get_disk(self):
"""Get all filesystems from OMV."""
self.data["disk"] = parse_api(
data=self.data["disk"],
source=self.api.query("DiskMgmt", "enumerateDevices"),
key="devicename",
vals=[
{"name": "devicename"},
{"name": "canonicaldevicefile"},
{"name": "size", "default": "unknown"},
{"name": "israid", "type": "bool", "default": False},
{"name": "isroot", "type": "bool", "default": False},
],
ensure_vals=[
{"name": "devicemodel", "default": "unknown"},
{"name": "serialnumber", "default": "unknown"},
{"name": "firmwareversion", "default": "unknown"},
{"name": "sectorsize", "default": "unknown"},
{"name": "rotationrate", "default": "unknown"},
{"name": "writecacheis", "default": "unknown"},
{"name": "smartsupportis", "default": "unknown"},
{"name": "Raw_Read_Error_Rate", "default": "unknown"},
{"name": "Spin_Up_Time", "default": "unknown"},
{"name": "Start_Stop_Count", "default": "unknown"},
{"name": "Reallocated_Sector_Ct", "default": "unknown"},
{"name": "Seek_Error_Rate", "default": "unknown"},
{"name": "Load_Cycle_Count", "default": "unknown"},
{"name": "Temperature_Celsius", "default": "unknown"},
{"name": "UDMA_CRC_Error_Count", "default": "unknown"},
{"name": "Multi_Zone_Error_Rate", "default": "unknown"},
],
)
for uid in self.data["disk"]:
tmp_data = parse_api(
data={},
source=self.api.query(
"Smart",
"getInformation",
{"devicefile": self.data["disk"][uid]["canonicaldevicefile"]},
),
vals=[
{"name": "devicemodel", "default": "unknown"},
{"name": "serialnumber", "default": "unknown"},
{"name": "firmwareversion", "default": "unknown"},
{"name": "sectorsize", "default": "unknown"},
{"name": "rotationrate", "default": "unknown"},
{"name": "writecacheis", "type": "bool", "default": False},
{"name": "smartsupportis", "type": "bool", "default": False},
],
)
if not tmp_data:
continue
self.data["disk"][uid]["devicemodel"] = tmp_data["devicemodel"]
self.data["disk"][uid]["serialnumber"] = tmp_data["serialnumber"]
self.data["disk"][uid]["firmwareversion"] = tmp_data["firmwareversion"]
self.data["disk"][uid]["sectorsize"] = tmp_data["sectorsize"]
self.data["disk"][uid]["rotationrate"] = tmp_data["rotationrate"]
self.data["disk"][uid]["writecacheis"] = tmp_data["writecacheis"]
self.data["disk"][uid]["smartsupportis"] = tmp_data["smartsupportis"]
# ---------------------------
# get_smart
# ---------------------------
# ---------------------------
# get_fs
# ---------------------------
def get_fs(self):
"""Get all filesystems from OMV."""
self.data["fs"] = parse_api(
data=self.data["fs"],
source=self.api.query("FileSystemMgmt", "enumerateFilesystems"),
key="uuid",
vals=[
{"name": "uuid"},
{"name": "parentdevicefile", "default": "unknown"},
{"name": "label", "default": "unknown"},
{"name": "type", "default": "unknown"},
{"name": "mountpoint", "default": "unknown"},
{"name": "available", "default": "unknown"},
{"name": "size", "default": "unknown"},
{"name": "percentage", "default": "unknown"},
{"name": "_readonly", "type": "bool", "default": False},
{"name": "_used", "type": "bool", "default": False},
],
skip=[{"name": "type", "value": "swap"}],
)
for uid in self.data["fs"]:
self.data["fs"][uid]["size"] = round(
int(self.data["fs"][uid]["size"]) / 1073741824, 1
)
self.data["fs"][uid]["available"] = round(
int(self.data["fs"][uid]["available"]) / 1073741824, 1
)
# ---------------------------
# get_service
# ---------------------------
# def get_service(self):
# """Get OMV services status"""
# self.data["service"] = parse_api(
# data=self.data["service"],
# source=self.api.query("Services", "getStatus"),
# key="name",
# vals=[
# {"name": "name"},
# {"name": "title", "default": "unknown"},
# {"name": "enabled", "type": "bool", "default": False},
# {"name": "running", "type": "bool", "default": False},
# ],
# )
| 36.501425
| 90
| 0.46956
|
"""OpenMediaVault Controller."""
import asyncio
import time
from datetime import timedelta
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import DOMAIN
from .helper import parse_api
from .omv_api import OpenMediaVaultAPI
# ---------------------------
# OMVControllerData
# ---------------------------
class OMVControllerData(object):
"""OMVControllerData Class."""
def __init__(self, hass, config_entry):
"""Initialize OMVController."""
self.hass = hass
self.config_entry = config_entry
self.name = config_entry.data[CONF_NAME]
self.host = config_entry.data[CONF_HOST]
self.data = {
"hwinfo": {},
"disk": {},
"fs": {},
# "service": {},
}
self.listeners = []
self.lock = asyncio.Lock()
self.api = OpenMediaVaultAPI(
hass,
config_entry.data[CONF_HOST],
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
config_entry.data[CONF_SSL],
config_entry.data[CONF_VERIFY_SSL],
)
self._force_update_callback = None
self._force_hwinfo_update_callback = None
# ---------------------------
# async_init
# ---------------------------
async def async_init(self):
self._force_update_callback = async_track_time_interval(
self.hass, self.force_update, timedelta(seconds=60)
)
self._force_hwinfo_update_callback = async_track_time_interval(
self.hass, self.force_hwinfo_update, timedelta(seconds=3600)
)
# ---------------------------
# signal_update
# ---------------------------
@property
def signal_update(self):
"""Event to signal new data."""
return f"{DOMAIN}-update-{self.name}"
# ---------------------------
# async_reset
# ---------------------------
async def async_reset(self):
"""Reset dispatchers."""
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
return True
# ---------------------------
# connected
# ---------------------------
def connected(self):
"""Return connected state."""
return self.api.connected()
# ---------------------------
# force_hwinfo_update
# ---------------------------
@callback
async def force_hwinfo_update(self, _now=None):
"""Trigger update by timer."""
await self.async_hwinfo_update()
# ---------------------------
# async_hwinfo_update
# ---------------------------
async def async_hwinfo_update(self):
"""Update OpenMediaVault hardware info."""
try:
await asyncio.wait_for(self.lock.acquire(), timeout=30)
except:
return
await self.hass.async_add_executor_job(self.get_hwinfo)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_disk)
self.lock.release()
# ---------------------------
# force_update
# ---------------------------
@callback
async def force_update(self, _now=None):
"""Trigger update by timer."""
await self.async_update()
# ---------------------------
# async_update
# ---------------------------
async def async_update(self):
"""Update OMV data."""
if self.api.has_reconnected():
await self.async_hwinfo_update()
try:
await asyncio.wait_for(self.lock.acquire(), timeout=10)
except:
return
await self.hass.async_add_executor_job(self.get_hwinfo)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_fs)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_smart)
# await self.hass.async_add_executor_job(self.get_service)
async_dispatcher_send(self.hass, self.signal_update)
self.lock.release()
# ---------------------------
# get_hwinfo
# ---------------------------
def get_hwinfo(self):
"""Get hardware info from OMV."""
self.data["hwinfo"] = parse_api(
data=self.data["hwinfo"],
source=self.api.query("System", "getInformation"),
vals=[
{"name": "hostname", "default": "unknown"},
{"name": "version", "default": "unknown"},
{"name": "cpuUsage", "default": 0},
{"name": "memTotal", "default": 0},
{"name": "memUsed", "default": 0},
{"name": "uptime", "default": "0 days 0 hours 0 minutes 0 seconds"},
{"name": "configDirty", "type": "bool", "default": False},
{"name": "rebootRequired", "type": "bool", "default": False},
{"name": "pkgUpdatesAvailable", "type": "bool", "default": False},
],
ensure_vals=[{"name": "memUsage", "default": 0}],
)
if int(self.data["hwinfo"]["version"].split(".")[0])>5:
tmp = self.data["hwinfo"]["uptime"]
pos = abs( int(tmp) )
day = pos / (3600*24)
rem = pos % (3600*24)
hour = rem / 3600
rem = rem % 3600
mins = rem / 60
secs = rem % 60
res = '%d days %02d hours %02d minutes %02d seconds' % (day, hour, mins, secs)
if int(tmp) < 0:
res = "-%s" % res
tmp = res.split(" ")
else:
tmp = self.data["hwinfo"]["uptime"].split(" ")
self.data["hwinfo"]["uptimeEpoch"] = int(tmp[0]) * 24 + int(tmp[2])
self.data["hwinfo"]["cpuUsage"] = round(self.data["hwinfo"]["cpuUsage"], 1)
if int(self.data["hwinfo"]["memTotal"]) > 0:
mem = (
int(self.data["hwinfo"]["memUsed"])
/ int(self.data["hwinfo"]["memTotal"])
) * 100
else:
mem = 0
self.data["hwinfo"]["memUsage"] = round(mem, 1)
# ---------------------------
# get_disk
# ---------------------------
def get_disk(self):
"""Get all filesystems from OMV."""
self.data["disk"] = parse_api(
data=self.data["disk"],
source=self.api.query("DiskMgmt", "enumerateDevices"),
key="devicename",
vals=[
{"name": "devicename"},
{"name": "canonicaldevicefile"},
{"name": "size", "default": "unknown"},
{"name": "israid", "type": "bool", "default": False},
{"name": "isroot", "type": "bool", "default": False},
],
ensure_vals=[
{"name": "devicemodel", "default": "unknown"},
{"name": "serialnumber", "default": "unknown"},
{"name": "firmwareversion", "default": "unknown"},
{"name": "sectorsize", "default": "unknown"},
{"name": "rotationrate", "default": "unknown"},
{"name": "writecacheis", "default": "unknown"},
{"name": "smartsupportis", "default": "unknown"},
{"name": "Raw_Read_Error_Rate", "default": "unknown"},
{"name": "Spin_Up_Time", "default": "unknown"},
{"name": "Start_Stop_Count", "default": "unknown"},
{"name": "Reallocated_Sector_Ct", "default": "unknown"},
{"name": "Seek_Error_Rate", "default": "unknown"},
{"name": "Load_Cycle_Count", "default": "unknown"},
{"name": "Temperature_Celsius", "default": "unknown"},
{"name": "UDMA_CRC_Error_Count", "default": "unknown"},
{"name": "Multi_Zone_Error_Rate", "default": "unknown"},
],
)
for uid in self.data["disk"]:
tmp_data = parse_api(
data={},
source=self.api.query(
"Smart",
"getInformation",
{"devicefile": self.data["disk"][uid]["canonicaldevicefile"]},
),
vals=[
{"name": "devicemodel", "default": "unknown"},
{"name": "serialnumber", "default": "unknown"},
{"name": "firmwareversion", "default": "unknown"},
{"name": "sectorsize", "default": "unknown"},
{"name": "rotationrate", "default": "unknown"},
{"name": "writecacheis", "type": "bool", "default": False},
{"name": "smartsupportis", "type": "bool", "default": False},
],
)
if not tmp_data:
continue
self.data["disk"][uid]["devicemodel"] = tmp_data["devicemodel"]
self.data["disk"][uid]["serialnumber"] = tmp_data["serialnumber"]
self.data["disk"][uid]["firmwareversion"] = tmp_data["firmwareversion"]
self.data["disk"][uid]["sectorsize"] = tmp_data["sectorsize"]
self.data["disk"][uid]["rotationrate"] = tmp_data["rotationrate"]
self.data["disk"][uid]["writecacheis"] = tmp_data["writecacheis"]
self.data["disk"][uid]["smartsupportis"] = tmp_data["smartsupportis"]
# ---------------------------
# get_smart
# ---------------------------
def get_smart(self):
for uid in self.data["disk"]:
tmp_data = parse_api(
data={},
source=self.api.query(
"Smart",
"getAttributes",
{"devicefile": self.data["disk"][uid]["canonicaldevicefile"]},
),
key="attrname",
vals=[
{"name": "attrname"},
{"name": "threshold", "default": 0},
{"name": "rawvalue", "default": 0},
],
)
if not tmp_data:
continue
vals = [
"Raw_Read_Error_Rate",
"Spin_Up_Time",
"Start_Stop_Count",
"Reallocated_Sector_Ct",
"Seek_Error_Rate",
"Load_Cycle_Count",
"Temperature_Celsius",
"UDMA_CRC_Error_Count",
"Multi_Zone_Error_Rate",
]
for tmp_val in vals:
if tmp_val in tmp_data:
self.data["disk"][uid][tmp_val] = tmp_data[tmp_val]["rawvalue"]
# ---------------------------
# get_fs
# ---------------------------
def get_fs(self):
"""Get all filesystems from OMV."""
self.data["fs"] = parse_api(
data=self.data["fs"],
source=self.api.query("FileSystemMgmt", "enumerateFilesystems"),
key="uuid",
vals=[
{"name": "uuid"},
{"name": "parentdevicefile", "default": "unknown"},
{"name": "label", "default": "unknown"},
{"name": "type", "default": "unknown"},
{"name": "mountpoint", "default": "unknown"},
{"name": "available", "default": "unknown"},
{"name": "size", "default": "unknown"},
{"name": "percentage", "default": "unknown"},
{"name": "_readonly", "type": "bool", "default": False},
{"name": "_used", "type": "bool", "default": False},
],
skip=[{"name": "type", "value": "swap"}],
)
for uid in self.data["fs"]:
self.data["fs"][uid]["size"] = round(
int(self.data["fs"][uid]["size"]) / 1073741824, 1
)
self.data["fs"][uid]["available"] = round(
int(self.data["fs"][uid]["available"]) / 1073741824, 1
)
# ---------------------------
# get_service
# ---------------------------
# def get_service(self):
# """Get OMV services status"""
# self.data["service"] = parse_api(
# data=self.data["service"],
# source=self.api.query("Services", "getStatus"),
# key="name",
# vals=[
# {"name": "name"},
# {"name": "title", "default": "unknown"},
# {"name": "enabled", "type": "bool", "default": False},
# {"name": "running", "type": "bool", "default": False},
# ],
# )
| 1,433
| 0
| 52
|
79540db7343cd37c04169f2c2a9534f0c0ea7d5c
| 1,187
|
py
|
Python
|
code/math_examples.py
|
rustam-fork/ml-course-uz
|
e1554d4c69bf0e421aa596d77aab65639df1ff73
|
[
"MIT"
] | 21
|
2018-01-05T09:24:49.000Z
|
2021-04-24T03:25:25.000Z
|
code/math_examples.py
|
rustam-fork/ml-course-uz
|
e1554d4c69bf0e421aa596d77aab65639df1ff73
|
[
"MIT"
] | 1
|
2019-11-11T18:34:53.000Z
|
2019-11-13T15:56:10.000Z
|
code/math_examples.py
|
rustam-fork/ml-course-uz
|
e1554d4c69bf0e421aa596d77aab65639df1ff73
|
[
"MIT"
] | 13
|
2018-01-05T10:26:47.000Z
|
2022-01-25T07:48:33.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
| 27.604651
| 118
| 0.57877
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def draw_parabola(steps=50):
x = np.linspace(-4, 4, steps)
plt.plot(x, x ** 2)
plt.axvline(x=0, color='b', linestyle='dashed')
def draw_paraboloid(steps=50):
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, steps)
y = np.linspace(-1, 1, steps)
X, Y = np.meshgrid(x, y)
Z = X ** 2 + Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
def draw_mishra_bird():
fig = plt.figure(figsize=(14, 10))
x = np.arange(-10, 1, 0.1)
y = np.arange(-6, 0.5, 0.1)
X, Y = np.meshgrid(x, y)
ax = plt.gca(projection='3d')
Z = np.sin(Y) * np.exp((1 - np.cos(X)) ** 2) + np.cos(X) * np.cos(X) * np.exp((1 - np.sin(Y)) ** 2) + (X - Y) ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
ax.view_init(20, -60)
def draw_hyperbolic_paraboloid():
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, 50)
y = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(x, y)
Z = X ** 2 - Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
| 975
| 0
| 92
|
246e0749cdd1714c65c0961416623be200a020e0
| 3,849
|
py
|
Python
|
trees/trees.py
|
patinousward/machine-learning
|
124b6c2e1a317f21238985ee4b74b638f5358b81
|
[
"MIT"
] | null | null | null |
trees/trees.py
|
patinousward/machine-learning
|
124b6c2e1a317f21238985ee4b74b638f5358b81
|
[
"MIT"
] | null | null | null |
trees/trees.py
|
patinousward/machine-learning
|
124b6c2e1a317f21238985ee4b74b638f5358b81
|
[
"MIT"
] | null | null | null |
from math import log
import operator
# 计算香农熵
# axis 表示第几列的纬度index ,value表示这个纬度的值作为分界
# 多数表决
# 创建树的函数代码
| 37.009615
| 97
| 0.665887
|
from math import log
import operator
# 计算香农熵
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1] # 数据的最后一列
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0 # 默认值为0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries #获取概率,也就是这个特征出现的次数/总样本
shannonEnt -= prob * log(prob,2) # 以2为底求对数,香农熵的公式
return shannonEnt
def createDataSet():
dataSet = [
[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']
]
labels = ['no surfacing','flipppers']
return dataSet,labels
# axis 表示第几列的纬度index ,value表示这个纬度的值作为分界
def splitDataSet(dataSet,axis,value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] # 从开头到axis列的数据
reducedFeatVec.extend(featVec[axis + 1:])
# 上面两行代码意思是排除了当前行数据中的value值,意义为何要排除,因为要递归做决策树,下次运算不能再出现
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) -1 # 随便挑一行数据,-1是因为最后一列是结果
baseEntropy = calcShannonEnt(dataSet) # 计算当前的香农熵
bestInfoGain = 0.0;bestFeature = -1
for i in range(numFeatures): # i代表第i个特征
# 将dataSet中的数据按行依次放入example中,然后取得example中的example[i]元素,放入列表featList中
# example 是自定义的元素变量 可以拆开看 1. for example in dataSet 2. featList = [example[i]]
featList = [example[i] for example in dataSet] # 其实就是获取dataset的第i列的数据
uniqueVals = set(featList) # 去重 因为要判断以哪个value为分界
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
# 这里有点类似权重,subDataSet中,每部分数据占的比重
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet) # subDataSet每部分数据加起来
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
# 多数表决
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount,key = operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
# 创建树的函数代码
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet] # 取dataSet/subDataset最后一列,就是是否是鱼的
if classList.count(classList[0]) == len(classList): # classList 列表里面都是同一个字符串,说明分类完全相同
return classList[0]
if len(dataSet[0]) == 1 : # dataSet剩下最后一列 example :[[x1],[x2]..]
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat]) # 解除数组中的变量对数据的引用,del语句作用在变量上,而不是数据对象上,数组=删除这个数据
featValues = [example[bestFeat] for example in dataSet] # 取出当前best特征的列的值
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] # 复制
# splitDataSet 的返回值就是参数dataSet中少一列(当前最优feature)的数据
# 子调用的返回结果会附着在父的上面形成树状
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
return myTree
# result {'no surfacing': {0: 'no', 1: {'flipppers': {0: 'no', 1: 'yes'}}}}
# no surfacing 就是是否上浮,flipppers是否有脚蹼
# 先找出最优的特征,就是是否上浮,然后value有2中,0和1
# 当为0时,通过分割数据集,发现样本分类一样,直接返回分类,就是'no'
# 当为1时,通过分割数据集,发现样本分类不一样,而且并没有剩下最后一列(这个案例,剩下最后一列就是分类)
# 继续选择最优的列,这里就只有是否有脚蹼了
# 当按是否有脚蹼为0分割子集的时候,分类都是否,所以直接返回‘no’,同理,为1分割的时候,分类都是是,所以直接返回‘yes’
# 这里隐藏了 if len(dataSet[0]) == 1 的情况,假设分割1的时候,分类不同,这时候就会走这行代码,分类不同,但是已经走完所有特征了,这里直接使用投票的方式解决
| 4,815
| 0
| 135
|
504a306dad3ee3153b338e1d454699034e9ee967
| 1,850
|
py
|
Python
|
code/Solution_0052_totalNQueens.py
|
qizhenkang/myLeetCode
|
cb9edce69567eba9d96ce756507a5a7ac6e74293
|
[
"MIT"
] | null | null | null |
code/Solution_0052_totalNQueens.py
|
qizhenkang/myLeetCode
|
cb9edce69567eba9d96ce756507a5a7ac6e74293
|
[
"MIT"
] | null | null | null |
code/Solution_0052_totalNQueens.py
|
qizhenkang/myLeetCode
|
cb9edce69567eba9d96ce756507a5a7ac6e74293
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 20:00:44 2021
@author: qizhe
"""
if __name__ == '__main__':
solu = Solution()
input_List = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
input_List = [[0,1,2,3],[4,5,6,7],[8,9,10,11],[12,13,14,15]]
# input_List = 1
result = solu.solveNQueens(5)
output_Str = ' result = ' + str(result)
print(output_Str)
| 27.61194
| 114
| 0.463784
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 20:00:44 2021
@author: qizhe
"""
class Solution:
def solveNQueens(self, n: int):
"""
典型的回溯问题
读题目,他要的是,所有解,不是找到一个就可以
第三个思路对了,但性能很差,想办法剪枝一下,感觉主要问题在于valid 的恢复上面
答案:
核心思路:
1、答案搞了3个集合,也就是三个约束来构造,就很简单了,然后优化搞一个位运算
2、还有思路是每次都判断一下,感觉这个不太好
改进:
用第一个思路,就改了几行,性能突飞猛进
优化了一下位运算
"""
# 第三次尝试 N皇后问题,本质上是一个回溯问题,然后要有一个 valid数组记录就可以了
# 难点是,要找全,就意味着,所有情况都要考虑到
# 回溯的思路:
# 1、下一个可行位置,递归搜索,从而保证所有情况都考虑到,这是最关键的一步
# 2、感觉难点是,如何处理valid数组的复原问题
def dfs(n,current,invalid,i):
if len(current) == n:
# 找到新答案,加入集合
return 1
if i >= n:
return 0
# 只在这一行搜就可以了
result = 0
for y in range(n):
if not invalid[0] >> y & 0b1 and not invalid[1] >> y+i & 0b1 and not invalid[2] >> y-i + n & 0b1 :
# 更新valid数组 只需要更新的是 左斜下方,正下方,右斜下方
invalid[0] |= 0b1 << y
invalid[1] |= 0b1 << y+i
invalid[2] |= 0b1 << y-i + n
result += dfs(n,current+[i],invalid,i+1)
invalid[0] &= ~(0b1 << y)
invalid[1] &= ~(0b1 << y+i)
invalid[2] &= ~(0b1 << y-i + n)
return result
# 位运算改进
invalid = [0,0,0]
current = []
return dfs(n,current,invalid,0)
if __name__ == '__main__':
solu = Solution()
input_List = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
input_List = [[0,1,2,3],[4,5,6,7],[8,9,10,11],[12,13,14,15]]
# input_List = 1
result = solu.solveNQueens(5)
output_Str = ' result = ' + str(result)
print(output_Str)
| 850
| 1,233
| 23
|
5ebadfa1130eda16e18e9a569211fa885b32c271
| 1,835
|
py
|
Python
|
preprocess_hdf5_data.py
|
trane293/multimodal_brain_synthesis
|
29291f091c15121707fd0fdcb1245d2ad8b944e2
|
[
"MIT"
] | null | null | null |
preprocess_hdf5_data.py
|
trane293/multimodal_brain_synthesis
|
29291f091c15121707fd0fdcb1245d2ad8b944e2
|
[
"MIT"
] | null | null | null |
preprocess_hdf5_data.py
|
trane293/multimodal_brain_synthesis
|
29291f091c15121707fd0fdcb1245d2ad8b944e2
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
'''
if 't1.'
i = 0
seq_name = 't1'
elif 't2.' in imagefile:
i = 1
seq_name = 't2'
elif 't1ce.' in imagefile:
i = 2
seq_name = 't1ce'
elif 'flair.' in imagefile:
i = 3
seq_name = 'flair'
'''
import platform
# to make the code portable even on cedar,you need to add conditions here
node_name = platform.node()
if node_name == 'XPS15':
# this is my laptop, so the cedar-rm directory is at a different place
mount_path_prefix = '/home/anmol/'
hdf5_filepath = mount_path_prefix + 'BRATS_Combined.h5'
save_path = '/home/anmol/mounts/cedar-rm/scratch/asa224/Datasets/BRATS2018/mm_synthesis/validation_data/'
elif 'computecanada' in node_name: # we're in compute canada, maybe in an interactive node, or a scheduler node.
hdf5_filepath = '/scratch/asa224/asa224/Datasets/BRATS2018/HDF5_Datasets/' + 'BRATS2018.h5'
save_path = "/scratch/asa224/asa224/Datasets/BRATS2018/mm_synthesis/validation_data/"
hdf5_file = h5py.File(hdf5_filepath, 'r')
hf = hdf5_file['original_data']
hgg_data = hf['validation_data']
pat_names = hf['validation_data_pat_name'][:]
#save the patient names first
np.save(open(save_path + 'pat_names_validation.npz', 'wb'), pat_names)
t1 = hgg_data[:,0,...]
t1 = np.swapaxes(t1, 3, 2)
t1 = np.swapaxes(t1, 2, 1)
np.save(open(save_path + 'T1.npz', 'wb'), t1)
del t1
t2 = hgg_data[:,1,...]
t2 = np.swapaxes(t2, 3, 2)
t2 = np.swapaxes(t2, 2, 1)
np.save(open(save_path + 'T2.npz', 'wb'), t2)
del t2
t1ce = hgg_data[:,2,...]
t1ce = np.swapaxes(t1ce, 3, 2)
t1ce = np.swapaxes(t1ce, 2, 1)
np.save(open(save_path + 'T1CE.npz', 'wb'), t1ce)
del t1ce
t2flair = hgg_data[:,3,...]
t2flair = np.swapaxes(t2flair, 3, 2)
t2flair = np.swapaxes(t2flair, 2, 1)
np.save(open(save_path + 'T2FLAIR.npz', 'wb'), t2flair)
del t2flair
print('Done!')
| 28.230769
| 112
| 0.683379
|
import h5py
import numpy as np
'''
if 't1.'
i = 0
seq_name = 't1'
elif 't2.' in imagefile:
i = 1
seq_name = 't2'
elif 't1ce.' in imagefile:
i = 2
seq_name = 't1ce'
elif 'flair.' in imagefile:
i = 3
seq_name = 'flair'
'''
import platform
# to make the code portable even on cedar,you need to add conditions here
node_name = platform.node()
if node_name == 'XPS15':
# this is my laptop, so the cedar-rm directory is at a different place
mount_path_prefix = '/home/anmol/'
hdf5_filepath = mount_path_prefix + 'BRATS_Combined.h5'
save_path = '/home/anmol/mounts/cedar-rm/scratch/asa224/Datasets/BRATS2018/mm_synthesis/validation_data/'
elif 'computecanada' in node_name: # we're in compute canada, maybe in an interactive node, or a scheduler node.
hdf5_filepath = '/scratch/asa224/asa224/Datasets/BRATS2018/HDF5_Datasets/' + 'BRATS2018.h5'
save_path = "/scratch/asa224/asa224/Datasets/BRATS2018/mm_synthesis/validation_data/"
hdf5_file = h5py.File(hdf5_filepath, 'r')
hf = hdf5_file['original_data']
hgg_data = hf['validation_data']
pat_names = hf['validation_data_pat_name'][:]
#save the patient names first
np.save(open(save_path + 'pat_names_validation.npz', 'wb'), pat_names)
t1 = hgg_data[:,0,...]
t1 = np.swapaxes(t1, 3, 2)
t1 = np.swapaxes(t1, 2, 1)
np.save(open(save_path + 'T1.npz', 'wb'), t1)
del t1
t2 = hgg_data[:,1,...]
t2 = np.swapaxes(t2, 3, 2)
t2 = np.swapaxes(t2, 2, 1)
np.save(open(save_path + 'T2.npz', 'wb'), t2)
del t2
t1ce = hgg_data[:,2,...]
t1ce = np.swapaxes(t1ce, 3, 2)
t1ce = np.swapaxes(t1ce, 2, 1)
np.save(open(save_path + 'T1CE.npz', 'wb'), t1ce)
del t1ce
t2flair = hgg_data[:,3,...]
t2flair = np.swapaxes(t2flair, 3, 2)
t2flair = np.swapaxes(t2flair, 2, 1)
np.save(open(save_path + 'T2FLAIR.npz', 'wb'), t2flair)
del t2flair
print('Done!')
| 0
| 0
| 0
|
e81417a710f461b78ac0a327b034ccca776c6883
| 1,035
|
py
|
Python
|
tests/test_hessian.py
|
blevine37/pySpawn17
|
4fa65cfc3b4d399bcb586506782d00f86b453139
|
[
"MIT"
] | 18
|
2018-03-30T16:11:13.000Z
|
2021-08-22T18:57:12.000Z
|
tests/test_hessian.py
|
dfedorov1988/MCDMS
|
215e18327ad9f806d82eb54d101d657d0ac29bd6
|
[
"MIT"
] | 3
|
2018-03-30T17:26:51.000Z
|
2021-08-17T08:49:24.000Z
|
tests/test_hessian.py
|
dfedorov1988/MCDMS
|
215e18327ad9f806d82eb54d101d657d0ac29bd6
|
[
"MIT"
] | 6
|
2018-11-21T15:30:38.000Z
|
2021-07-05T05:37:15.000Z
|
import numpy as np
import pyspawn
pyspawn.import_methods.into_hessian(pyspawn.potential.terachem_cas)
hess = pyspawn.hessian()
ndims = 18
istate = 0
pos = np.asarray([ 0.000000000, 0.000000000, 0.101944554,
0.000000000, 0.000000000, 2.598055446,
0.000000000, 1.743557978, 3.672987826,
0.000000000, -1.743557978, 3.672987826,
0.000000000, 1.743557978, -0.972987826,
0.000000000, -1.743557978, -0.972987826])
dr = 0.001
atoms = ['C', 'C', 'H', 'H', 'H', 'H']
tc_options = {
"method": 'hf',
"basis": '6-31g**',
"atoms": atoms,
"charge": 0,
"spinmult": 1,
"closed_shell": True,
"restricted": True,
"precision": "double",
"threall": 1.0e-20,
}
hess.set_numdims(ndims)
hess.set_istate(istate)
hess.set_positions(pos)
hess.set_tc_options(tc_options)
hess.build_hessian_hdf5_semianalytical(dr)
| 23
| 67
| 0.547826
|
import numpy as np
import pyspawn
pyspawn.import_methods.into_hessian(pyspawn.potential.terachem_cas)
hess = pyspawn.hessian()
ndims = 18
istate = 0
pos = np.asarray([ 0.000000000, 0.000000000, 0.101944554,
0.000000000, 0.000000000, 2.598055446,
0.000000000, 1.743557978, 3.672987826,
0.000000000, -1.743557978, 3.672987826,
0.000000000, 1.743557978, -0.972987826,
0.000000000, -1.743557978, -0.972987826])
dr = 0.001
atoms = ['C', 'C', 'H', 'H', 'H', 'H']
tc_options = {
"method": 'hf',
"basis": '6-31g**',
"atoms": atoms,
"charge": 0,
"spinmult": 1,
"closed_shell": True,
"restricted": True,
"precision": "double",
"threall": 1.0e-20,
}
hess.set_numdims(ndims)
hess.set_istate(istate)
hess.set_positions(pos)
hess.set_tc_options(tc_options)
hess.build_hessian_hdf5_semianalytical(dr)
| 0
| 0
| 0
|
0037bdf517203ae654e04612f90ba818f7dbac6d
| 2,434
|
py
|
Python
|
setup.py
|
mmcauliffe/Conch
|
7668612d7a610d0f5ae3332f990e71b26c5e8b34
|
[
"MIT"
] | 33
|
2015-06-10T19:36:54.000Z
|
2017-09-18T23:57:46.000Z
|
setup.py
|
mmcauliffe/Conch
|
7668612d7a610d0f5ae3332f990e71b26c5e8b34
|
[
"MIT"
] | 5
|
2015-03-22T07:05:21.000Z
|
2017-08-16T03:48:19.000Z
|
setup.py
|
mmcauliffe/Conch
|
7668612d7a610d0f5ae3332f990e71b26c5e8b34
|
[
"MIT"
] | 10
|
2015-04-29T05:51:51.000Z
|
2017-07-12T18:52:18.000Z
|
import sys
import os
from setuptools import setup
from setuptools.command.test import test as TestCommand
import conch
if __name__ == '__main__':
setup(name='conch-sounds',
version=conch.__version__,
description='Analyze acoustic similarity in Python',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing :: Linguistic',
],
keywords='phonetics, acoustics similarity',
url='https://github.com/mmcauliffe/Conch',
download_url='https://github.com/mmcauliffe/Conch/tarball/{}'.format(
conch.__version__),
author='Michael McAuliffe',
author_email='michael.e.mcauliffe@gmail.com',
packages=['conch',
'conch.analysis',
'conch.analysis.amplitude_envelopes',
'conch.analysis.formants',
'conch.analysis.intensity',
'conch.analysis.mfcc',
'conch.analysis.pitch',
'conch.distance'],
package_data={'conch.analysis.pitch': ['*.praat'],
'conch.analysis.formants': ['*.praat'],
'conch.analysis.intensity': ['*.praat'],
'conch.analysis.mfcc': ['*.praat']},
install_requires=[
'numpy',
'scipy',
'praatio ~= 5.0',
'librosa',
'pyraat'
],
cmdclass={'test': PyTest},
extras_require={
'testing': ['pytest'],
}
)
| 34.28169
| 91
| 0.527116
|
import sys
import os
from setuptools import setup
from setuptools.command.test import test as TestCommand
import conch
def readme():
with open('README.md') as f:
return f.read()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '-x', '--tb=long', 'tests']
if os.environ.get('TRAVIS', False):
self.test_args.insert(0, '--runslow')
self.test_suite = True
def run_tests(self):
if __name__ == '__main__': # Fix for multiprocessing infinite recursion on Windows
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
if __name__ == '__main__':
setup(name='conch-sounds',
version=conch.__version__,
description='Analyze acoustic similarity in Python',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing :: Linguistic',
],
keywords='phonetics, acoustics similarity',
url='https://github.com/mmcauliffe/Conch',
download_url='https://github.com/mmcauliffe/Conch/tarball/{}'.format(
conch.__version__),
author='Michael McAuliffe',
author_email='michael.e.mcauliffe@gmail.com',
packages=['conch',
'conch.analysis',
'conch.analysis.amplitude_envelopes',
'conch.analysis.formants',
'conch.analysis.intensity',
'conch.analysis.mfcc',
'conch.analysis.pitch',
'conch.distance'],
package_data={'conch.analysis.pitch': ['*.praat'],
'conch.analysis.formants': ['*.praat'],
'conch.analysis.intensity': ['*.praat'],
'conch.analysis.mfcc': ['*.praat']},
install_requires=[
'numpy',
'scipy',
'praatio ~= 5.0',
'librosa',
'pyraat'
],
cmdclass={'test': PyTest},
extras_require={
'testing': ['pytest'],
}
)
| 499
| 5
| 99
|
0b85d81800350d0c5bcb9d6960d0c2633d6cec54
| 1,121
|
py
|
Python
|
pobx/aio/operators.py
|
nardi/pobx
|
91a0ce371def5ba8622c41faae5faa1199f16118
|
[
"MIT"
] | 2
|
2021-01-31T06:45:59.000Z
|
2021-02-01T01:20:10.000Z
|
pobx/aio/operators.py
|
nardi/pobx
|
91a0ce371def5ba8622c41faae5faa1199f16118
|
[
"MIT"
] | null | null | null |
pobx/aio/operators.py
|
nardi/pobx
|
91a0ce371def5ba8622c41faae5faa1199f16118
|
[
"MIT"
] | null | null | null |
import aioreactive as rx
from aioreactive import AsyncAnonymousObserver
from aioreactive.subject import AsyncSubject
from ..utils import dropargs, asyncinit
@asyncinit
| 29.5
| 95
| 0.67083
|
import aioreactive as rx
from aioreactive import AsyncAnonymousObserver
from aioreactive.subject import AsyncSubject
from ..utils import dropargs, asyncinit
@asyncinit
class BufferOperator():
async def __init__(self, boundaries):
self.emitter = AsyncSubject()
self.buffer = []
async def emit_buffer():
buffer = self.buffer
self.buffer = []
await self.emitter.asend(buffer)
self.subs = [
await boundaries.subscribe_async(AsyncAnonymousObserver(dropargs(emit_buffer)))
]
async def __call__(self, receiver):
async def add_to_buffer(x):
self.buffer.append(x)
self.subs.append(await receiver.subscribe_async(AsyncAnonymousObserver(add_to_buffer)))
return self
async def subscribe_async(self, *args, **kwargs):
return await self.emitter.subscribe_async(*args, **kwargs)
async def dispose_async(self):
for sub in self.subs:
await sub.dispose_async()
self.buffer.clear()
async def buffer(boundaries):
return await BufferOperator(boundaries)
| 798
| 2
| 152
|
2dc21830f816f4bcf684cb12098052219e1f3815
| 6,400
|
py
|
Python
|
router/run.py
|
Financial-Times/paasport
|
9f5ec95b99f03c06dc800907f6f0750f5eec7a35
|
[
"MIT"
] | 2
|
2015-04-21T10:50:15.000Z
|
2015-04-22T15:48:45.000Z
|
router/run.py
|
Financial-Times/paasport
|
9f5ec95b99f03c06dc800907f6f0750f5eec7a35
|
[
"MIT"
] | null | null | null |
router/run.py
|
Financial-Times/paasport
|
9f5ec95b99f03c06dc800907f6f0750f5eec7a35
|
[
"MIT"
] | 1
|
2020-07-30T14:50:38.000Z
|
2020-07-30T14:50:38.000Z
|
#!/usr/bin/python
# Layer 7 Router
#
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import json, re, os, subprocess
PORT_NUMBER = 8080
# Writes config for a given service
globaladdons = {
'example': [
'ec2-52-16-13-243.eu-west-1.compute.amazonaws.com',
],
}
for name, machines in globaladdons.iteritems():
data = {
'name': name,
'machines': machines,
}
writeservice('addon', data)
writedefaultvcl()
#This class will handles any incoming request from
#the browser
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| 29.090909
| 175
| 0.66375
|
#!/usr/bin/python
# Layer 7 Router
#
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import json, re, os, subprocess
PORT_NUMBER = 8080
class UserError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def writedefaultvcl():
vcl = "vcl 4.0;\n\nimport directors;\n"
services = []
for filename in os.listdir('/etc/varnish/addon'):
vcl += "include \"addon/"+filename+"\";\n"
for filename in os.listdir('/etc/varnish/service'):
vcl += "include \"service/"+filename+"\";\n"
services.append(filename.split('.',1)[0])
vcl += "\n\nbackend default {\n\t.host = \"127.0.0.1\";\n\t.port = \""+str(PORT_NUMBER)+"\";\n}\n\nsub vcl_recv {\n"
for service in services:
vcl += "\tcall service_"+service+";\n"
vcl += "}\n\n"
# Write the VCL to disk
with open('/etc/varnish/default.vcl', 'w') as outfile:
outfile.write(vcl)
subprocess.check_call(["sudo", "service", "varnish", "reload"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Writes config for a given service
def writeservice(servicetype, data):
# Validate inputs
machines = data.setdefault('machines', [])
addons = data.setdefault('addons', [])
if not isinstance(machines, list):
raise UserError('The property "machines" must be a list')
if not isinstance(addons, list):
raise UserError('The property "addons" must be a list')
if len(machines) == 0:
raise UserError('Each service should have atleast one machine to serve traffic from. Syntax: {"machines":["hostname-a"]}')
for addon in addons:
if addon not in globaladdons:
raise UserError('Can\'t find addon "'+addon+'"')
if servicetype == 'addon':
if 'name' not in data:
raise UserError('Addon has no name')
data['directorname'] = 'addon_'+ data['name']
else:
if 'hostname' not in data:
raise UserError('The property "hostname" is required. This is normally dervied from a variable in the URL.')
data['directorname'] = re.sub(r'\W+', '', data['hostname'])
# Put sanitised inputs back on the data dictionary
# (leave any other keys untouched)
data['machines'] = machines
data['addons'] = addons
# write the json to disk in case we need it later
with open('data/'+servicetype+'/'+data['directorname']+'.json', 'w') as outfile:
json.dump(data, outfile)
vcl = ""
backendnames = []
for backend in data['machines']:
if ':' in backend:
(backendhost, backendport) = backend.rsplit(':', 2)
else:
backendhost = backend
backendport = "80"
backendname = data['directorname'] + re.sub(r'\W+', '', backend)
backendnames.append(backendname)
vcl += "backend "+backendname+" {\n\t.host = \""+backendhost+"\";\n\t.port = \""+backendport+"\";\n\t.probe = {\n\t\t.url = \"/__gtg\";\n\t}\n}\n\n"
vcl += "sub vcl_init {\n\tnew "+data['directorname']+" = directors.random();\n"
for backendname in backendnames:
vcl += "\t"+data['directorname']+".add_backend("+backendname+", 1);\n"
vcl += "}\n\n"
if servicetype == 'service':
vcl += "sub "+servicetype+"_"+data['directorname']+" {\n\tif (req.http.Host == \""+data['hostname']+"\") {\n\t\tset req.backend_hint = "+data['directorname']+".backend();\n"
firstaddon = True
for addon in data['addons']:
if firstaddon:
vcl += "\t\tif "
firstaddon = False
else:
vcl += " elseif "
vcl += "(req.http.paas_addons !~ \""+addon+"\") {\n\t\t\tset req.backend_hint = addon_"+addon+".backend();\n\t\t}"
vcl += "\n\t}\n}\n\n"
# Write the VCL to disk
with open('/etc/varnish/'+servicetype+'/'+data['directorname']+'.vcl', 'w') as outfile:
outfile.write(vcl)
globaladdons = {
'example': [
'ec2-52-16-13-243.eu-west-1.compute.amazonaws.com',
],
}
for name, machines in globaladdons.iteritems():
data = {
'name': name,
'machines': machines,
}
writeservice('addon', data)
writedefaultvcl()
#This class will handles any incoming request from
#the browser
class myHandler(BaseHTTPRequestHandler):
servicematch = re.compile('/service/(?P<hostname>[^/]+)')
#Handler for the GET requests
def do_GET(self):
try:
service = self.servicematch.search(self.path)
if service is not None:
hostname = service.group('hostname')
try:
with open('data/service/'+re.sub(r'\W+', '', hostname)+'.json', 'r') as outfile:
output = outfile.read()
except IOError as e:
raise UserError("Service Not Found with hostname "+hostname)
self.send_response(200)
self.send_header('Content-type','text/json')
self.end_headers()
self.wfile.write(output)
return
else:
raise UserError("Not Found")
except UserError as e:
if "Not Found" in e.value:
self.send_response(404)
else:
self.send_response(400)
self.send_header('Content-type','text/plain')
self.end_headers()
self.wfile.write(e.value + "\n")
return
except Exception as e:
self.send_response(500)
self.send_header('Content-type','text/plain')
self.end_headers()
self.wfile.write("Internal Error: " + str(e) + "\n")
def do_PUT(self):
try:
service = self.servicematch.search(self.path)
if service is not None:
if not 'Content-Length' in self.headers:
raise UserError("No Content-Length Header")
varLen = int(self.headers['Content-Length'])
jsoninput = self.rfile.read(varLen)
try:
data = json.loads(jsoninput)
except:
raise UserError("Expected JSON")
data['hostname'] = service.group('hostname')
writeservice('service', data)
writedefaultvcl()
self.send_response(201)
self.send_header('Content-type','text/plain')
self.end_headers()
self.wfile.write("Service Set\n")
return
else:
raise UserError("Not Found")
except UserError as e:
if e.value == "Not Found":
self.send_response(404)
else:
self.send_response(400)
self.send_header('Content-type','text/plain')
self.end_headers()
self.wfile.write(e.value + "\n")
return
except Exception as e:
self.send_response(500)
self.send_header('Content-type','text/plain')
self.end_headers()
self.wfile.write("Internal Error: " + str(e) + "\n")
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| 5,288
| 163
| 136
|
4c771c595f5a2b799d9ea019a22c62e966df3bf9
| 1,849
|
py
|
Python
|
tests/unit/test_compact.py
|
armon/pypred
|
4f96575a9c05ab2829c0808737b146d8cc632cb7
|
[
"BSD-3-Clause"
] | 40
|
2015-03-16T02:15:51.000Z
|
2022-02-04T20:54:47.000Z
|
tests/unit/test_compact.py
|
armon/pypred
|
4f96575a9c05ab2829c0808737b146d8cc632cb7
|
[
"BSD-3-Clause"
] | 10
|
2015-01-02T16:23:37.000Z
|
2021-07-15T07:28:12.000Z
|
tests/unit/test_compact.py
|
armon/pypred
|
4f96575a9c05ab2829c0808737b146d8cc632cb7
|
[
"BSD-3-Clause"
] | 17
|
2015-06-11T14:25:09.000Z
|
2021-07-14T07:30:04.000Z
|
from pypred import ast, compact
| 31.87931
| 93
| 0.566252
|
from pypred import ast, compact
class TestCompact(object):
def test_compact(self):
l = ast.Literal('foo')
v = ast.Number(42)
gt = ast.CompareOperator('>', l, v)
l1 = ast.Literal('foo')
v1 = ast.Number(42)
lt = ast.CompareOperator('<', l1, v1)
n = ast.LogicalOperator('or', gt, lt)
compact.compact(n)
# Literal and number should be de-dupped
assert l is n.right.left
assert v is n.right.right
def test_names(self):
n1 = ast.Literal("foo")
assert ("Literal", "foo") == compact.node_name(n1)
n2 = ast.Number(12)
assert ("Number", 12) == compact.node_name(n2)
n3 = ast.Constant(True)
assert ("Constant", True) == compact.node_name(n3)
n4 = ast.Regex("^tubez$")
assert ("Regex", "^tubez$") == compact.node_name(n4)
n5 = ast.Undefined()
assert "Undefined" == compact.node_name(n5)
n6 = ast.Empty()
assert "Empty" == compact.node_name(n6)
n7 = ast.NegateOperator(n3)
assert ("NegateOperator", ("Constant", True)) == compact.node_name(n7)
n8 = ast.CompareOperator('=', n1, n2)
n8_name = compact.node_name(n8)
assert ("CompareOperator", "=", ("Literal", "foo"), ("Number", 12)) == n8_name
n9 = ast.MatchOperator(n1, n4)
n9_name = compact.node_name(n9)
assert ("MatchOperator", ("Literal", "foo"), ("Regex", "^tubez$")) == n9_name
n10 = ast.ContainsOperator(n1, n2)
n10_name = compact.node_name(n10)
assert ("ContainsOperator", ("Literal", "foo"), ("Number", 12.0)) == n10_name
n11 = ast.LogicalOperator('and', n1, n3)
n11_name = compact.node_name(n11)
assert ("LogicalOperator", "and", ("Literal", "foo"), ("Constant", True)) == n11_name
| 1,734
| 5
| 77
|
ba89dd4cb6362cf1ebc95ae0ce94f8263fa0dede
| 2,621
|
py
|
Python
|
docs/build_cli_docs.py
|
TedrosGitHub/TSA-yatsm
|
8e328f366c8fd94d5cc57cd2cc42080c43d1f391
|
[
"MIT"
] | 59
|
2015-02-03T19:56:17.000Z
|
2022-03-17T13:45:23.000Z
|
docs/build_cli_docs.py
|
TedrosGitHub/TSA-yatsm
|
8e328f366c8fd94d5cc57cd2cc42080c43d1f391
|
[
"MIT"
] | 97
|
2015-02-12T05:18:38.000Z
|
2020-06-09T16:10:38.000Z
|
docs/build_cli_docs.py
|
TedrosGitHub/TSA-yatsm
|
8e328f366c8fd94d5cc57cd2cc42080c43d1f391
|
[
"MIT"
] | 35
|
2015-02-27T19:43:23.000Z
|
2021-06-21T02:29:14.000Z
|
#!/usr/bin/env python
""" Build CLI help pages to RST for dynamic inclusion of help messages
This solves the problem of not being able to install YATSM on readthedocs
because of its complicated dependencies without the need to mock out
basically every import. Just run this script before pushing any new changes
to the documentation to make sure the ``yatsm [subcommand] --help`` usage
is up to date.
"""
from contextlib import contextmanager
import errno
import os
import subprocess
import sys
import click
import click_plugins
from yatsm.cli.main import cli as yatsm_cli
# Add YATSM to sys.path
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(here, '..'))
@contextmanager
def redirect_stdout(stream):
""" Redirect stdout to file to capture click's printouts
NOTE:
Available as contextlib.redirect_stdout in Python 3.4, but
re-coded here for compatibility with Python 2.7.
See https://bugs.python.org/issue15805
"""
old_stream = sys.stdout
sys.stdout = stream
try:
yield
finally:
sys.stdout = old_stream
if __name__ == '__main__':
help_docs_dst = make_destination()
# CLICK COMMAND LINE
for cmd in [yatsm_cli] + yatsm_cli.commands.values():
if isinstance(cmd, click_plugins.core.BrokenCommand):
continue
name = 'yatsm {}'.format(cmd.name) if cmd.name != 'cli' else 'yatsm'
dst = os.path.join(help_docs_dst,
'{}.txt'.format(name.replace(' ', '_')))
cmd_help_to_rst(cmd, dst, name)
# SCRIPTS IN yatsm/scripts
script_dir = os.path.join(here, '..', 'scripts')
os.environ['PATH'] += '{sep}{path}'.format(sep=os.pathsep, path=script_dir)
for script in os.listdir(script_dir):
script_name = os.path.splitext(script)[0]
dst = os.path.join(help_docs_dst, '{}.txt'.format(script_name))
with open(dst, 'w') as fid:
fid.write('$ {} -h\n'.format(script))
fid.flush()
subprocess.Popen([script, '-h'], stdout=fid).communicate()
| 30.126437
| 79
| 0.638306
|
#!/usr/bin/env python
""" Build CLI help pages to RST for dynamic inclusion of help messages
This solves the problem of not being able to install YATSM on readthedocs
because of its complicated dependencies without the need to mock out
basically every import. Just run this script before pushing any new changes
to the documentation to make sure the ``yatsm [subcommand] --help`` usage
is up to date.
"""
from contextlib import contextmanager
import errno
import os
import subprocess
import sys
import click
import click_plugins
from yatsm.cli.main import cli as yatsm_cli
# Add YATSM to sys.path
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(here, '..'))
def make_destination():
# Output directory
help_docs_dst = os.path.join(here, 'cli', 'usage')
try:
os.makedirs(help_docs_dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return help_docs_dst
@contextmanager
def redirect_stdout(stream):
""" Redirect stdout to file to capture click's printouts
NOTE:
Available as contextlib.redirect_stdout in Python 3.4, but
re-coded here for compatibility with Python 2.7.
See https://bugs.python.org/issue15805
"""
old_stream = sys.stdout
sys.stdout = stream
try:
yield
finally:
sys.stdout = old_stream
def cmd_help_to_rst(cmd, dst, name):
with open(dst, 'w') as fid:
fid.write('$ {} --help\n'.format(name))
with redirect_stdout(fid):
try:
cmd.make_context(name, ['--help'])
except SystemExit:
# Success
pass
if __name__ == '__main__':
help_docs_dst = make_destination()
# CLICK COMMAND LINE
for cmd in [yatsm_cli] + yatsm_cli.commands.values():
if isinstance(cmd, click_plugins.core.BrokenCommand):
continue
name = 'yatsm {}'.format(cmd.name) if cmd.name != 'cli' else 'yatsm'
dst = os.path.join(help_docs_dst,
'{}.txt'.format(name.replace(' ', '_')))
cmd_help_to_rst(cmd, dst, name)
# SCRIPTS IN yatsm/scripts
script_dir = os.path.join(here, '..', 'scripts')
os.environ['PATH'] += '{sep}{path}'.format(sep=os.pathsep, path=script_dir)
for script in os.listdir(script_dir):
script_name = os.path.splitext(script)[0]
dst = os.path.join(help_docs_dst, '{}.txt'.format(script_name))
with open(dst, 'w') as fid:
fid.write('$ {} -h\n'.format(script))
fid.flush()
subprocess.Popen([script, '-h'], stdout=fid).communicate()
| 504
| 0
| 46
|
2e975b39468ca0ba983a654b851314f2a36a1b84
| 151
|
py
|
Python
|
Python3/DebuggingAndErrorHandling/try_except.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/DebuggingAndErrorHandling/try_except.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/DebuggingAndErrorHandling/try_except.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
try:
foobar
except: #"catch all", highly discourage due to not being able to identify what went wrong
print("PROBLEM")
print("after the try")
| 30.2
| 91
| 0.708609
|
try:
foobar
except: #"catch all", highly discourage due to not being able to identify what went wrong
print("PROBLEM")
print("after the try")
| 0
| 0
| 0
|
2e644c0780b72db35e413e1216b0bdcb7a87ce73
| 3,895
|
py
|
Python
|
src/onnxruntime_numpy/einsum_helper.py
|
gf712/onnxruntime-numpy
|
752ecb90e97295384c96ff339165c461ba4caf87
|
[
"MIT"
] | 2
|
2021-04-24T07:50:31.000Z
|
2021-09-07T18:56:51.000Z
|
src/onnxruntime_numpy/einsum_helper.py
|
gf712/onnxruntime-numpy
|
752ecb90e97295384c96ff339165c461ba4caf87
|
[
"MIT"
] | null | null | null |
src/onnxruntime_numpy/einsum_helper.py
|
gf712/onnxruntime-numpy
|
752ecb90e97295384c96ff339165c461ba4caf87
|
[
"MIT"
] | null | null | null |
import re
import string
# obtained and modified from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/python/ops/special_math_ops.py#L311
def einsum_parse_and_resolve_equation(equation, input_shapes):
"""Helper for einsum() that splits/resolves inputs & outputs.
Args:
equation: Equation string given as argument to einsum().
input_shapes: List of the shapes of all inputs given to einsum()
Returns:
input_axis_labels, output_axis_labels where:
input_axis_labels: List of length len(input_shapes) of strings
representing the character label for each dimension of each given input,
resolving any broadcast (...) axes,
output_axis_labels: A string of character labels for each axes of output
tensor, filling in missing output subscripts and broadcast axes.
Raises:
ValueError: If equation is in the uncorrect format, incorrect number of
inputs given or broadcast axes "..." or output axes could not be resolved.
"""
equation = equation.replace(' ', '')
match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation)
if not match:
raise ValueError('Indices have incorrect format: %s' % equation)
input_axis_labels = match.group(1).split(',')
output_axis_labels = match.group(2)[2:] if match.group(2) else None
if len(input_shapes) != len(input_axis_labels):
raise ValueError('Got %d arguments for equation "%s", expecting %d' %
(len(input_shapes), equation, len(input_axis_labels)))
# Resolve Ellipsis
# Assign axes labels for unspecified dimensions in inputs. Labels taken
# from unused labels. Follow numpy einsum broadcasting conventions for
# tensors of different length and unlabeled output.
ellipsis_axes = ''
if '...' in equation:
unused = ''.join([c for c in string.ascii_letters
if c not in ''.join(input_axis_labels)])
for i, ax in enumerate(input_axis_labels):
if '...' in ax:
parts = ax.split('...')
if len(parts) != 2:
raise ValueError(
'Unable to resolve ellipsis. Excess number found.')
n = len(input_shapes[i]) - len(''.join(parts))
if n < 0:
raise ValueError('Ellipses lengths do not match.')
if len(unused) < n:
raise ValueError(
'Unable to resolve ellipsis, too many distinct labels.')
replace_axes = unused[-n:] if n > 0 else ''
input_axis_labels[i] = input_axis_labels[i].replace(
'...', replace_axes)
if len(replace_axes) > len(ellipsis_axes):
ellipsis_axes = replace_axes
if any(['.' in ax for ax in input_axis_labels]):
raise ValueError('period "." found outside of ellipsis')
if output_axis_labels is not None:
output_axis_labels = output_axis_labels.replace(
'...', ellipsis_axes)
if '.' in output_axis_labels:
raise ValueError('period "." found outside of ellipsis')
if output_axis_labels is None:
# infer the output subscripts if not given, assume alphabetical order,
# but always place ellipsis axes before given.
axis_labels = set(''.join(input_axis_labels)) - set(ellipsis_axes)
indices = ''.join(sorted(axis_labels))
counts = {ax: 0 for ax in indices}
for axes_ in input_axis_labels:
for ax in axes_:
if ax not in ellipsis_axes:
counts[ax] += 1
output_axis_labels = ellipsis_axes + ''.join(
sorted(ax for ax in axis_labels if counts[ax] == 1))
return input_axis_labels, output_axis_labels
| 45.290698
| 135
| 0.618485
|
import re
import string
# obtained and modified from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/python/ops/special_math_ops.py#L311
def einsum_parse_and_resolve_equation(equation, input_shapes):
"""Helper for einsum() that splits/resolves inputs & outputs.
Args:
equation: Equation string given as argument to einsum().
input_shapes: List of the shapes of all inputs given to einsum()
Returns:
input_axis_labels, output_axis_labels where:
input_axis_labels: List of length len(input_shapes) of strings
representing the character label for each dimension of each given input,
resolving any broadcast (...) axes,
output_axis_labels: A string of character labels for each axes of output
tensor, filling in missing output subscripts and broadcast axes.
Raises:
ValueError: If equation is in the uncorrect format, incorrect number of
inputs given or broadcast axes "..." or output axes could not be resolved.
"""
equation = equation.replace(' ', '')
match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation)
if not match:
raise ValueError('Indices have incorrect format: %s' % equation)
input_axis_labels = match.group(1).split(',')
output_axis_labels = match.group(2)[2:] if match.group(2) else None
if len(input_shapes) != len(input_axis_labels):
raise ValueError('Got %d arguments for equation "%s", expecting %d' %
(len(input_shapes), equation, len(input_axis_labels)))
# Resolve Ellipsis
# Assign axes labels for unspecified dimensions in inputs. Labels taken
# from unused labels. Follow numpy einsum broadcasting conventions for
# tensors of different length and unlabeled output.
ellipsis_axes = ''
if '...' in equation:
unused = ''.join([c for c in string.ascii_letters
if c not in ''.join(input_axis_labels)])
for i, ax in enumerate(input_axis_labels):
if '...' in ax:
parts = ax.split('...')
if len(parts) != 2:
raise ValueError(
'Unable to resolve ellipsis. Excess number found.')
n = len(input_shapes[i]) - len(''.join(parts))
if n < 0:
raise ValueError('Ellipses lengths do not match.')
if len(unused) < n:
raise ValueError(
'Unable to resolve ellipsis, too many distinct labels.')
replace_axes = unused[-n:] if n > 0 else ''
input_axis_labels[i] = input_axis_labels[i].replace(
'...', replace_axes)
if len(replace_axes) > len(ellipsis_axes):
ellipsis_axes = replace_axes
if any(['.' in ax for ax in input_axis_labels]):
raise ValueError('period "." found outside of ellipsis')
if output_axis_labels is not None:
output_axis_labels = output_axis_labels.replace(
'...', ellipsis_axes)
if '.' in output_axis_labels:
raise ValueError('period "." found outside of ellipsis')
if output_axis_labels is None:
# infer the output subscripts if not given, assume alphabetical order,
# but always place ellipsis axes before given.
axis_labels = set(''.join(input_axis_labels)) - set(ellipsis_axes)
indices = ''.join(sorted(axis_labels))
counts = {ax: 0 for ax in indices}
for axes_ in input_axis_labels:
for ax in axes_:
if ax not in ellipsis_axes:
counts[ax] += 1
output_axis_labels = ellipsis_axes + ''.join(
sorted(ax for ax in axis_labels if counts[ax] == 1))
return input_axis_labels, output_axis_labels
| 0
| 0
| 0
|
3a061df687e9af5bce3beb7af1dbd2d6be9f9a0c
| 2,837
|
py
|
Python
|
src/lib/gcode_preview.py
|
utitankaspk/SPKCAM
|
a1ac8121e93faaecd0132d16b3c3a5ae7fa84343
|
[
"Apache-2.0"
] | 1
|
2018-12-05T22:44:48.000Z
|
2018-12-05T22:44:48.000Z
|
src/lib/gcode_preview.py
|
utitankaspk/SPKCAM
|
a1ac8121e93faaecd0132d16b3c3a5ae7fa84343
|
[
"Apache-2.0"
] | null | null | null |
src/lib/gcode_preview.py
|
utitankaspk/SPKCAM
|
a1ac8121e93faaecd0132d16b3c3a5ae7fa84343
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rhinoscriptsyntax as rs
import Rhino
import os
color_palette = {"cut":(153,204,255),"plunge":(254,184,0),"point":(153,204,255)}
LAYER_NAME = "vector_from_gcode"
if __name__=="__main__":
main()
| 31.175824
| 113
| 0.602749
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rhinoscriptsyntax as rs
import Rhino
import os
color_palette = {"cut":(153,204,255),"plunge":(254,184,0),"point":(153,204,255)}
LAYER_NAME = "vector_from_gcode"
def parse_line(line):
line = line.lower().replace("\n","")
point = []
if line.startswith("(") or line.startswith("m"):
return point
try:
if line.find("x") != -1:
point.append(float(line.split("x")[1].split("y")[0]))
point.append(float(line.split("y")[1].split("z")[0]))
if line.find("f") != -1:
z_pt,feed = line.split("y")[1].split("z")[1].split("f")
point.append(float(z_pt))
point.append(int(feed))
else:
point.append(float(line.split("y")[1].split("z")[1]))
except:
return None
return point
def vector_sum(lines,preview=False):
vector_list = []
total_time = 0
total_length = 0
last_feed = 0
for i in range(len(lines)):
point_a = lines[i][:3]
if i == len(lines)-1: break
point_b = lines[i+1][:3]
vector = rs.AddLine(point_a,point_b) if preview else rs.VectorCreate(point_a,point_b)
if preview:
rs.ObjectLayer(vector,LAYER_NAME)
rs.ObjectColor(vector,color_palette["cut"])
vector_list.append(vector)
if len(lines[i]) == 4:
feed = lines[i][-1]
last_feed = feed
vector_length = rs.CurveLength(vector) if preview else rs.VectorLength(vector)
total_length += vector_length
total_time += (vector_length)/last_feed
return vector_list,round(total_time,2),round(total_length,2)
def vectors_from_gcode(lines,preview=False,only_time=False):
parsed_lines = []
for line in lines:
parsed_line = parse_line(line)
if parsed_line: parsed_lines.append(parsed_line)
vector_list,total_time,total_length = vector_sum(parsed_lines,preview)
if only_time:
return total_time
else: return vector_list,total_time,total_length
def main():
print "Spkcam's vector from gcode"
print "selecciona tu archivo de codifo g:"
if not rs.IsLayer(LAYER_NAME):
rs.AddLayer(LAYER_NAME)
else:
rs.LayerLocked(LAYER_NAME, locked=False)
f_path = rs.OpenFileName(title="Selecciona CodigoG", filter=None, folder=None, filename=None, extension=None)
f = open(f_path)
gcode = f.readlines()
f.close()
vector_list,total_time,total_length = vectors_from_gcode(gcode,True)
print "Tiempo de corte: %s minutos" % total_time
print "Logitud total: %s mm" % total_length
rs.LayerLocked(LAYER_NAME, locked=True)
if __name__=="__main__":
main()
| 2,477
| 0
| 100
|
fae7b0157a18871e061420658fff8d1d8058b250
| 460
|
py
|
Python
|
dictionary_from_book5.py
|
AmitSuresh/learning-python
|
f1ea5b9f3659f21504b1b0e452c03239b03cde85
|
[
"MIT"
] | null | null | null |
dictionary_from_book5.py
|
AmitSuresh/learning-python
|
f1ea5b9f3659f21504b1b0e452c03239b03cde85
|
[
"MIT"
] | null | null | null |
dictionary_from_book5.py
|
AmitSuresh/learning-python
|
f1ea5b9f3659f21504b1b0e452c03239b03cde85
|
[
"MIT"
] | null | null | null |
#A dictionary in a Dictionary
users = {
'aeinstein':{
'first':'albert',
'last':'einstein',
'location':'princeton',
},
'mcurie':{
'first':'mary',
'last':'curie',
'location':'paris',
},
}
for username,user_info in users.items():
print(f'Username: {username}')
full_name=f"{user_info['first']} {user_info['last']}"
location=user_info['location']
print(f'Full name:{full_name.title()}')
print(f'Location:{location.title()}')
| 24.210526
| 55
| 0.621739
|
#A dictionary in a Dictionary
users = {
'aeinstein':{
'first':'albert',
'last':'einstein',
'location':'princeton',
},
'mcurie':{
'first':'mary',
'last':'curie',
'location':'paris',
},
}
for username,user_info in users.items():
print(f'Username: {username}')
full_name=f"{user_info['first']} {user_info['last']}"
location=user_info['location']
print(f'Full name:{full_name.title()}')
print(f'Location:{location.title()}')
| 0
| 0
| 0
|
9308ccd54291ef3ae5d3f45ec6555591f94d6baa
| 36,724
|
py
|
Python
|
adi_study_watch/nrf5_sdk_15.2.0/validation/common.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | 5
|
2021-06-13T17:11:19.000Z
|
2021-12-01T18:20:38.000Z
|
adi_study_watch/nrf5_sdk_15.2.0/validation/common.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | null | null | null |
adi_study_watch/nrf5_sdk_15.2.0/validation/common.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | 1
|
2022-01-08T15:01:44.000Z
|
2022-01-08T15:01:44.000Z
|
import os
import sys
try:
import tkinter
import time
import yaml
from tkinter import messagebox
from utils.serial_comm import SerialIface
from datetime import datetime
from utils.test_utils import util_logger
from matplotlib import pyplot as plt
from robot.libraries.BuiltIn import BuiltIn
import shutil
import inspect
import logging
import easygui
import subprocess
from utils import instr_lib
from utils.cli_map import CLIMap
from utils import cli_map
import threading
except Exception as e:
print("Import Exception! Details:", e)
# Adding CLI destination path to sys path in order to import the module
# curr_dir = os.path.join(os.path.abspath(__file__), '../')
# cli_dir = os.path.join(curr_dir, '../adi_study_watch/cli/m2m2/tools')
# sys.path.insert(0, cli_dir)
# import CLI
# from adi_study_watch_cli import CLI
# **********************************************************************
# Initializing TkInter for showing dialog pop ups
root = tkinter.Tk()
root.withdraw()
# **********************************************************************
# ********************** Test Variables ********************************
arduino_port = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port_ble = None # This variable will be updated from station config file [read_station_Cfg()]
fg_instr_addr = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port_type = None # This variable will be updated from station config file [read_station_Cfg()]
sm_instr_addr = None # This variable will be updated from station config file [read_station_Cfg()]
arduino = None
watch_shell = None
ts_mismatch_tolerance = None
fg, sm = None, None
matlab_eng = None
pcb_name_default = 'A1H1'
shared_drive_path = r'\\wilmnas4\Local Programs\FDWatch_TestData\Data_Testsetup\DVT1_Test_Results'
ecg_stream_file_name = 'ecg.csv'
bia_stream_file_name = "bia.csv"
ppg_stream_file_name = 'ppg.csv'
syncppg_stream_file_name = 'sync_ppg.csv'
adxl_stream_file_name = 'adxl.csv'
temperature_stream_file_name = 'temp.csv'
adpd_stream_file_name = 'adpd6.csv'
eda_stream_file_name = 'eda.csv'
volt_scale_range = (0, 5)
# The switch map dictionary maps the various switches to the arduino digital pins (24-42)
switch_map = {'SNOISE1': 22, 'SNOISE2': 23, 'ECG_NEGSEL': 24, 'ECG_POSSEL': 25}
close_plot_mode_global = True
test_report_dir = None
copy_results_to_shared_drive = True
save_plots = False
DVT_version = None
adpd_clk_calib = None
cm = None # CLI Map
ble_mac_addr = None
current_watch_mode = None
test_level_handeler = 0
# **********************************************************************
# ********************* Configure Logging ******************************
test_logger = logging.getLogger('test_logger')
logging_format = "[%(levelname)s] : %(message)s"
date_str = "%m/%d/%Y %I:%M:%S %p"
logger_formatter = logging.Formatter(logging_format, date_str)
test_stream_handler = logging.StreamHandler()
test_logger.setLevel(logging.INFO)
test_logger.addHandler(test_stream_handler)
# logging_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
# logging.basicConfig(# filename='output.log',
# level=logging.INFO,
# # filemode='w',
# format=logging_format,
# datefmt=date_str)
# **********************************************************************
# ********************* Common Functions *******************************
class ConditionCheckFailure(RuntimeError):
"""
This class is used to raise failures from test cases so that
robot framework detects them as failures and continues to
next test case due to the below variable
"""
ROBOT_CONTINUE_ON_FAILURE = True
def update_robot_suite_doc(doc):
"""
:param doc:
:return:
"""
try:
BuiltIn().set_suite_documentation(doc)
except Exception as e:
test_logger.warn('Skipping robot documentation update!')
pass
def write_analysis_report(result_dict, report_file=None, header='Analysis Section', append_report=False):
"""
:param result_dict:
:param report_file:
:param header:
:param append_report:
:return:
"""
report_file = 'analysis_report.txt' if not report_file else report_file
file_mode = 'a' if append_report else 'w'
with open(report_file, file_mode) as f_ref:
f_ref.write('<<< {} >>>\n'.format(header))
for k, v in result_dict.items():
f_ref.write('{} = {}\n'.format(k, v))
f_ref.write('\n'.format(header))
return os.path.abspath(report_file)
def analyze_wfm(file_path, mode='ecg', file_mode='cli', gen_filtered_ppg='1'):
"""
This function calls the BioSigProcess app built from LV vi and extracts results stored in yaml file
:param file_path: waveform *.csv file path
:param mode: 'ecg' | 'ppg'
:param file_mode: 'cli' | 'awt'
:return:
"""
results_dict = {}
if os.path.isfile(file_path):
subprocess.call(['utils/lv/builds/bio_sig_process/BioSigProcess.exe', mode, file_path, file_mode, gen_filtered_ppg])
time.sleep(2)
result_file_path = os.path.join(file_path, '../{}_extracted_results.txt'.format(mode))
with open(result_file_path, 'r') as f_ref:
line_list = f_ref.readlines()
for line in line_list:
result_list = line.split(' - ')
results_dict[result_list[0]] = result_list[1].strip()
else:
test_logger.warn('Input File not found! {}'.format(file_path))
results_dict = None
return results_dict
def quick_start_ecg(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start('ecg', 'ecg')
def quick_start_bia(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w bia 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start('bia', 'bia')
def set_ecg_stream_freq(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
def set_eda_stream_freq(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w eda 0:{}'.format(hex(samp_freq_hz)))
if samp_freq_hz <= 16:
watch_shell.do_lcfg("w eda 0x2:0x2")
else:
watch_shell.do_lcfg("w eda 0x2:0x1")
def quick_start_eda(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz:
set_eda_stream_freq(samp_freq_hz)
watch_shell.quick_start('eda', 'eda')
def quick_start_eda_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz:
set_eda_stream_freq(samp_freq_hz)
watch_shell.quick_start("eda", "eda", fs=True)
watch_shell.do_start_logging("")
def quick_start_bia_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w bia 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start("bia", "bia", fs=True)
watch_shell.do_start_logging("")
def quick_start_ecg_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start("ecg", "ecg", fs=True)
watch_shell.do_start_logging("")
def quick_start_adpd_fs(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state:
:param led:
:param skip_load_cfg:
:return:
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
watch_shell.quick_start("adpd", "adpd{}".format(cfg_dict[led]['sub']), fs=True)
watch_shell.do_start_logging("")
def config_adpd_stream(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state:
:param led:
:param skip_load_cfg:
:return:
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def quick_start_adxl(samp_freq_hz=100):
"""
Set ADXL sampling frequency and start capturing the data
:param samp_freq_hz:
:return:
"""
watch_shell.quick_start("adxl", "adxl")
if samp_freq_hz == 12.5:
watch_shell.do_reg("w adxl 0x2C:0x98")
elif samp_freq_hz == 25:
watch_shell.do_reg("w adxl 0x2C:0x99")
elif samp_freq_hz == 50:
watch_shell.do_reg("w adxl 0x2C:0x9A")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adxl 0x2C:0x9B")
elif samp_freq_hz == 200:
watch_shell.do_reg("w adxl 0x2C:0x9C")
elif samp_freq_hz == 400:
watch_shell.do_reg("w adxl 0x2C:0x9F")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def set_adxl_stream_freq(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz == 12.5:
watch_shell.do_reg("w adxl 0x2C:0x98")
elif samp_freq_hz == 25:
watch_shell.do_reg("w adxl 0x2C:0x99")
elif samp_freq_hz == 50:
watch_shell.do_reg("w adxl 0x2C:0x9A")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adxl 0x2C:0x9B")
elif samp_freq_hz == 200:
watch_shell.do_reg("w adxl 0x2C:0x9C")
elif samp_freq_hz == 400:
watch_shell.do_reg("w adxl 0x2C:0x9F")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def quick_start_adpd(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state: [0 | 1]
:param led: ['G' | 'R' | 'IR' | 'B']
:return: stream_file_name
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
elif samp_freq_hz is None:
pass
else:
raise RuntimeError("Sampling Frequency Not Supported!")
watch_shell.quick_start('adpd', "adpd{}".format(cfg_dict[led]['sub']))
stream_file_name = 'adpd{}.csv'.format(cfg_dict[led]['sub'])
return stream_file_name
def quick_stop_adpd(led='G'):
"""
:param led: ['G' | 'R' | 'IR' | 'B']
:return:
"""
cfg_dict = {'G': 'adpd6',
'R': 'adpd7',
'IR': 'adpd8',
'B': 'adpd9'}
led = led.upper()
watch_shell.quick_stop('adpd', cfg_dict[led])
def quick_start_ppg(samp_freq_hz=50, agc_state=0):
"""
:param samp_freq_hz:
:param agc_state:
:return:
"""
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(adpd_clk_calib)
watch_shell.do_set_ppg_lcfg("5")
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
if agc_state:
watch_shell.do_lcfg("w ppg 0x4:0x1210")
else:
watch_shell.do_lcfg("w ppg 0x4:0x1010")
watch_shell.quick_start('ppg', 'ppg')
def set_adpd_stream_freq(samp_freq_hz=50):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
def quick_stop_ppg(samp_freq_hz=50):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.quick_stop('ppg', 'ppg')
# def set_ecg_samp_freq(samp_freq_hz=100):
# """
#
# :param samp_freq_hz:
# :return:
# """
# watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
def dcb_cfg(mode='w', dev='adxl', file_name=''):
"""
:param mode: 'w'| 'r' | 'd'
:param dev: 'adpd' | 'adxl' | 'ecg' | 'eda'
:param file_name: '*_dcb_config.cfg'
:return:
"""
curr_dir = os.getcwd()
dcb_cfg_dir = os.path.join(curr_dir, 'dcb_cfg')
if not os.path.exists(dcb_cfg_dir):
os.mkdir(dcb_cfg_dir)
test_logger.warning("DCG Config Dir Not Found! Creating empty directory 'dcb_cfg'")
if mode == 'w':
if os.path.exists(os.path.join(dcb_cfg_dir, file_name)) and os.path.isfile(os.path.join(dcb_cfg_dir, file_name)):
pkt = watch_shell.do_write_dcb('{} {}'.format(dev, os.path.join(dcb_cfg_dir, file_name)))
if dev == "adpd":
for pkt_element in pkt:
err_stat = watch_shell.check_err_stat(pkt_element)
if err_stat == 1:
break
else:
err_stat = watch_shell.check_err_stat(pkt)
else:
err_stat = 1
test_logger.error("DCB Config file not found!")
raise RuntimeError("DCB Config file not found!\n{}".format(os.path.join(dcb_cfg_dir, file_name)))
elif mode == 'r':
pkt = watch_shell.do_read_dcb('{}'.format(dev))
if dev in ["ecg", "eda", "bia"]:
file_name = r".\dcb_cfg\{}_dcb_get.lcfg".format(dev)
else:
file_name = r".\dcb_cfg\{}_dcb_get.dcfg".format(dev)
with open(file_name, "w") as fs:
if dev == "adpd":
for pkt_element in pkt:
for index, data in enumerate(pkt_element["payload"]["data"]):
if index == 0 and type(data[0]) is int:
convert_2_hex = True
else:
convert_2_hex = False
if convert_2_hex:
data = [hex(data[0]), data[1]]
fs.write(" ".join(data))
fs.write("\n")
err_stat = watch_shell.check_err_stat(pkt_element)
else:
for index, data in enumerate(pkt["payload"]["data"]):
if index == 0 and type(data[0]) is int:
convert_2_hex = True
else:
convert_2_hex = False
if convert_2_hex:
data = [hex(data[0]), data[1]]
fs.write(" ".join(data))
fs.write("\n")
err_stat = watch_shell.check_err_stat(pkt)
test_logger.info('DCB File Name: dcb_cfg\{}_dcb_get.dcfg'.format(dev))
elif mode == 'd':
pkt = watch_shell.do_delete_dcb('{}'.format(dev))
err_stat = watch_shell.check_err_stat(pkt)
else:
err_stat = 1
return err_stat, dcb_cfg_dir
def plot_and_save_png(f_path, col_idx=1, row_offset=1):
"""
This function reads a csv file and plots the data and saves the plot into a png file
:param f_path:
:param col_idx:
:return: plot_path
"""
data_list = read_csv_col(f_path, col_idx, row_offset)
f_name = os.path.splitext(os.path.split(f_path)[-1])[0]
plot_path = os.path.join(os.path.split(f_path)[0], 'plots', f_name+'.png')
plt.plot(data_list)
plt.xlabel('time')
plt.ylabel('amplitude')
plt.savefig(plot_path)
plt.close()
return plot_path
def update_arduino(in_obj):
"""
This function updates the arduino global variable usually from an initialize function call
:param in_obj:
:return:
"""
global arduino
arduino = in_obj
def update_watch_shell(in_obj):
"""
This function updates the watch_shell global variable usually from an initialize function call
:param in_obj:
:return:
"""
global watch_shell
watch_shell = in_obj
def update_dvt_version():
"""
This function updates the DVT_version global variable usually from an initialize function call
:return:
"""
global DVT_version
err_stat, chip_id = watch_shell.get_chip_id("2") # ADPD chip ID index is 2
if chip_id == 0xc0:
test_logger.info("DVT1 Watch Connected")
DVT_version = 0
else:
test_logger.info("DVT2 Watch Connected")
DVT_version = 1
# else:
# raise RuntimeError("Unknown DVT Watch version ADPD Chip ID-{}".format(str(chip_id)))
def read_station_cfg():
"""
This function reads the station config yaml file and updates the global variables. If a file is not found, it will
create a file with the default values in it. The file location is <user>/AppData/Roaming/
:return:
"""
# Default values
cfg_dict = {'arduino_port': 'COM7', 'watch_port': 'COM13', 'watch_port_ble': 'COM7',
'fg_instr_addr': 'USB0::0x0957::0x2C07::MY52802639::0::INSTR',
'sm_instr_addr': 'GPIB0::23::INSTR', 'watch_port_type': 'USB', 'ble_mac': '6B-28-88-26-52-C3'}
station_cfg_path = os.path.join(os.getenv('APPDATA'), 'watch_test.yaml')
if os.path.exists(station_cfg_path) and os.path.isfile(station_cfg_path):
with open(station_cfg_path, 'r') as f_ref:
cfg_dict = yaml.load(f_ref, Loader=yaml.FullLoader)
else:
with open(station_cfg_path, 'w') as f_ref:
yaml.dump(cfg_dict, f_ref)
missing_keys = []
global arduino_port, watch_port, watch_port_ble, fg_instr_addr, sm_instr_addr, watch_port_type, ble_mac_addr
if 'watch_port_ble' not in cfg_dict.keys():
missing_keys.append("watch_port_ble")
watch_port_ble = ""
else:
watch_port_ble = cfg_dict['watch_port_ble']
if 'ble_mac' not in cfg_dict.keys():
missing_keys.append("ble_mac")
ble_mac_addr = ""
else:
ble_mac_addr = cfg_dict['ble_mac']
if len(missing_keys) != 0:
test_logger.warning("Please add the {} values in the {} file".format(" and ".join(missing_keys),
os.path.join(os.getenv('APPDATA'),
'watch_test.yaml')))
# raise ConditionCheckFailure("Please add the {} values in the {} file"
# "".format(" and ".join(missing_keys), os.path.join(os.getenv('APPDATA'),
# 'watch_test.yaml')))
arduino_port = cfg_dict['arduino_port']
watch_port = cfg_dict['watch_port']
fg_instr_addr = cfg_dict['fg_instr_addr']
sm_instr_addr = cfg_dict['sm_instr_addr']
if 'watch_port_type' in cfg_dict.keys():
watch_port_type = cfg_dict['watch_port_type']
else:
watch_port_type = 'USB'
def close_plot_after_run(plot_name_list, close_plot_mode=False):
"""
This function closes all open plot and cmd windows opened by the test.
This checks for a global mode variable or the local mode variable. Local variable defaults to False
:param plot_name_list: This is a list of string values of the plot window names
:param close_plot_mode: This is a boolean arg
:return:
"""
if close_plot_mode or close_plot_mode_global:
for plot_name in plot_name_list:
os.system('taskkill /fi "WINDOWTITLE eq {}"'.format(plot_name))
time.sleep(0.25)
os.system('taskkill /fi "WINDOWTITLE eq C:\WINDOWS\system32\cmd.exe"')
def init_matlab_engine():
"""
This function imports and initializes matlab engine
MATLAB package needs to be installed from <matlab_root>/extern/engine/python directory
Use the command "python setup.py install"
:return:
"""
global matlab_eng
try:
import matlab.engine
matlab_eng = matlab.engine.start_matlab()
except:
print("Error loading MATLAB Engine!")
if matlab_eng:
matlab_dir = os.path.join(os.path.abspath('.'), 'utils', 'matlab_utils')
matlab_eng.addpath(matlab_dir, nargout=0)
return matlab_eng
def initialize_setup(ts_tolerance=10, com_port="NA", mode="NA", ble_mac="NA", ble_com_port="NA"):
"""
This function runs necessary steps to initialize the test setup
- Connects to Arduino and initializes arduino global variable
:return:
"""
global fg, sm, cm
global test_report_dir
read_station_cfg()
# Instantiating watch shell
gui_signaller = cli_map.cli.QtSignaller()
# threading.Thread(target=cli_map.cli._init_gui, args=(gui_signaller,), daemon=True).start()
watch_shell_obj = CLIMap(gui_signaller, testing=True)
if com_port != "NA" and "COM" in com_port:
global watch_port
watch_port = com_port
if ble_com_port != "NA" and "COM" in ble_com_port:
global watch_port_ble
watch_port_ble = ble_com_port
if ble_mac != "NA":
global ble_mac_addr
ble_mac_addr = ble_mac
if mode != "NA":
global watch_port_type
watch_port_type = mode
if watch_port_type == 'USB':
watch_shell_obj.do_connect_usb('{}'.format(watch_port))
else:
watch_shell_obj.do_connect_ble('{} {}'.format(watch_port_ble, ble_mac_addr))
# cm = CLIMap(watch_shell_obj)
# Creating Test Rport Directory
err_stat, sys_info_dict = watch_shell_obj.get_system_info()
if err_stat:
raise RuntimeError('Unable to communicate with the watch!')
pcb_name = str(sys_info_dict['mac_addr'])
if not pcb_name:
pcb_name = easygui.enterbox('PCB Number:', 'Enter PCB Number')
test_report_dir = init_test_report_dir(pcb_name)
test_logger.info('Test Results Directory: {}'.format(test_report_dir))
err_stat, fw_ver_info_dict = watch_shell_obj.get_version_cli()
if not err_stat:
ver_info_str = 'Firmware Version: V{}.{}.{} | Build Info: {}'.format(fw_ver_info_dict['major'],
fw_ver_info_dict['minor'],
fw_ver_info_dict['patch'],
fw_ver_info_dict['build'])
update_robot_suite_doc(ver_info_str)
# Instantiating Arduino
#arduino_obj = SerialIface(port=arduino_port)
#arduino_obj.serial_write('!CfgIOMap\r')
# watch_shell_obj.do_toggleSaveCSV('')
#update_arduino(arduino_obj)
update_watch_shell(watch_shell_obj)
# TODO: Enable below code to configure instruments
#fg = instr_lib.KeysightFG33522B()
#fg.instr_connect(fg_instr_addr)
#sm = instr_lib.KeithleySM2400()
#sm.instr_connect(sm_instr_addr)
update_dvt_version()
update_adpd_clock_calibration_value()
update_ts_mismatch_tolerance(int(ts_tolerance))
def initialize_setup_nk(ts_tolerance=0, usb_com_port="NA", mode="NA",
ble_mac="NA", ble_com_port="NA", clear_flash=0, test_level=0, flash_reset=0):
"""
This function runs necessary steps to initialize the test setup
- Connects to Arduino and initializes arduino global variable
:return:
"""
global test_report_dir
read_station_cfg()
# Instantiating watch shell
gui_signaller = cli_map.cli.QtSignaller()
# threading.Thread(target=cli_map.cli._init_gui, args=(gui_signaller,), daemon=True).start()
watch_shell_obj = CLIMap(gui_signaller, testing=True)
update_watch_shell(watch_shell_obj)
if usb_com_port != "NA" and "COM" in usb_com_port:
global watch_port
watch_port = usb_com_port
if ble_com_port != "NA" and "COM" in ble_com_port:
global watch_port_ble
watch_port_ble = ble_com_port
if ble_mac != "NA":
global ble_mac_addr
ble_mac_addr = ble_mac
if mode != "NA":
global watch_port_type
watch_port_type = mode
connect(watch_port_type)
# Creating Test Report Directory
err_stat, sys_info_dict = watch_shell_obj.get_system_info()
if err_stat:
raise RuntimeError('Unable to communicate with the watch!')
pcb_name = str(sys_info_dict['mac_addr'])
if not pcb_name:
pcb_name = easygui.enterbox('PCB Number:', 'Enter PCB Number')
test_report_dir = init_test_report_dir(pcb_name)
test_logger.info('Test Results Directory: {}'.format(test_report_dir))
err_stat, fw_ver_info_dict = watch_shell_obj.get_version_cli()
if not err_stat:
ver_info_str = 'Firmware Version: V{}.{}.{} | Build Info: {}'.format(fw_ver_info_dict['major'],
fw_ver_info_dict['minor'],
fw_ver_info_dict['patch'],
fw_ver_info_dict['build'])
update_robot_suite_doc(ver_info_str)
update_dvt_version()
update_adpd_clock_calibration_value()
update_ts_mismatch_tolerance(int(ts_tolerance))
if clear_flash:
watch_shell.do_fs_format('')
if flash_reset:
watch_shell.do_flash_reset('')
test_level_update(test_level)
def init_test_report_dir(pcb_name):
"""
This function creates a directory for pcb test reports if not already present and
creates a folder inside it with the current date and time string for storing the test results
:param pcb_name:
:return:
"""
if copy_results_to_shared_drive:
if not pcb_name:
pcb_name = pcb_name_default
pcb_name = pcb_name.upper()
pcb_test_dir_path = os.path.join(shared_drive_path, pcb_name+'_test_result')
if not os.path.exists(shared_drive_path):
raise Exception('Unable to access shared drive path!')
if not os.path.exists(pcb_test_dir_path):
# Creating PCB test directory
os.mkdir(pcb_test_dir_path)
now = datetime.now()
dt_str = now.strftime("%m_%d_%y_%H_%M_%S")
# Creating time-stamped test directory
test_report_dir = os.path.join(pcb_test_dir_path, dt_str)
os.mkdir(test_report_dir)
with open('output_dir.tmp', 'w') as f_ref:
f_ref.write(test_report_dir)
else:
test_report_dir = ''
# Clearing plots folder
if os.path.isdir('plots'):
shutil.rmtree('plots')
time.sleep(1)
os.mkdir('plots')
return test_report_dir
@util_logger
def close_setup():
"""
This function runs necessary steps to close the test setup
:return:
"""
# common.watch_shell.do_exit('') # TODO: Need to enable this after exit command is fixed
update_arduino(None)
update_watch_shell(None)
close_plot_after_run(['ECG Data Plot'], True)
# TODO: Enable below code to configure function generator
# fg.close()
@util_logger
def set_switch(name, state):
"""
This function extracts the io_id from the switch_map based on the input naem and sets the state
:param name:
:param state:
:return:
"""
if name in switch_map:
io_id = switch_map[name]
arduino.serial_write('!SetIO {} {}\r'.format(io_id, state))
else:
raise Exception('Invalid switch name! Unable to find the provided switch name in the switch map')
def rename_stream_file(old_file_name, suffix='', row_offset=1, col_idx=1,
copy_to_shared_drive=copy_results_to_shared_drive, plot=save_plots):
"""
This function renames the old_file_name of stream file by appending a suffix to it
:param old_file_name:
:param suffix:
:param row_offset: If there is header on row 0 of csv data, row_offset can be 1
:param col_idx: If the data is on column 2, col_idx will be 1
:param copy_to_shared_drive:
:param plot: True/False
:return:
"""
if os.path.exists(old_file_name): # Renaming stream file for each iteration
new_name = os.path.splitext(old_file_name)[0] + suffix
if os.path.exists(new_name) and os.path.isfile(new_name):
os.remove(new_name)
time.sleep(0.5)
os.rename(old_file_name, new_name)
else:
new_name = ''
if plot:
plot_path = plot_and_save_png(new_name, col_idx, row_offset)
if copy_to_shared_drive:
total_retry = 1
for retry in range(total_retry): # has to be multiple iteration but limiting due to the delay
try:
test_group_name = inspect.getmodule(inspect.stack()[1][0]).__name__.split('.')[-1]
test_group_dir = os.path.join(test_report_dir, test_group_name)
if not os.path.exists(test_group_dir):
os.mkdir(test_group_dir)
file_name = os.path.split(new_name)[-1]
shutil.copyfile(new_name, os.path.join(test_group_dir, file_name))
if plot:
plot_dir = os.path.join(test_group_dir, 'plots')
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
plot_name = os.path.split(plot_path)[-1]
new_plot_path = os.path.join(plot_dir, plot_name)
shutil.copyfile(plot_path, new_plot_path)
break
except WindowsError:
test_logger.info("Trying to copy the file; Attempts remaining: {}".format(total_retry - retry - 1))
else:
test_logger.warning("*** File Copy Failed ***")
return new_name
def amp_volts_to_percentage(in_volt):
"""
This function takes in amplitude in volts and returns amplitude percentage for arduino
:param in_volt:
:return:
"""
full_scale_amp = float(volt_scale_range[1])/2
amp_percentage = (in_volt * 100) / full_scale_amp
if amp_percentage > 100:
amp_percentage = 100.0
return amp_percentage
def read_csv_file(file_path, num_cols=2):
"""
This function can be used for reading and returning data from csv files
:param file_path:
:param num_cols:
:return:
"""
with open(file_path, 'r') as f_ref:
lines = f_ref.readlines()
lines.pop(0)
rows = [list(map(float, line.split(','))) for line in lines]
cols = zip(*rows)
return cols[:num_cols]
def read_csv_col(file_path, col_idx=0, row_offset=1):
"""
This function reads a csv file and returns the column data specified by the col_idx.
:param file_path:
:param col_idx:
:return:
"""
with open(file_path, 'r') as f_ref:
line_list = f_ref.readlines()
col_data_list = []
last_line = len(line_list) - 1
for i, line in enumerate(line_list):
if i >= row_offset and any(line):
line.strip()
if last_line == i and not any(line.split(",")[col_idx]): # If the last row is empty
continue
if i <= last_line - 7:
# if the last packet in adpd has a partial data for example you are streaming Slot F, G, H, I.
# The last packet might contain only Slot H, I data so truncating the data
try:
any(line.split(",")[col_idx])
except IndexError:
continue
col_data_list.append(float(line.split(",")[col_idx]))
return col_data_list
# **********************************************************************
| 35.176245
| 124
| 0.61856
|
import os
import sys
try:
import tkinter
import time
import yaml
from tkinter import messagebox
from utils.serial_comm import SerialIface
from datetime import datetime
from utils.test_utils import util_logger
from matplotlib import pyplot as plt
from robot.libraries.BuiltIn import BuiltIn
import shutil
import inspect
import logging
import easygui
import subprocess
from utils import instr_lib
from utils.cli_map import CLIMap
from utils import cli_map
import threading
except Exception as e:
print("Import Exception! Details:", e)
# Adding CLI destination path to sys path in order to import the module
# curr_dir = os.path.join(os.path.abspath(__file__), '../')
# cli_dir = os.path.join(curr_dir, '../adi_study_watch/cli/m2m2/tools')
# sys.path.insert(0, cli_dir)
# import CLI
# from adi_study_watch_cli import CLI
# **********************************************************************
# Initializing TkInter for showing dialog pop ups
root = tkinter.Tk()
root.withdraw()
# **********************************************************************
# ********************** Test Variables ********************************
arduino_port = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port_ble = None # This variable will be updated from station config file [read_station_Cfg()]
fg_instr_addr = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port_type = None # This variable will be updated from station config file [read_station_Cfg()]
sm_instr_addr = None # This variable will be updated from station config file [read_station_Cfg()]
arduino = None
watch_shell = None
ts_mismatch_tolerance = None
fg, sm = None, None
matlab_eng = None
pcb_name_default = 'A1H1'
shared_drive_path = r'\\wilmnas4\Local Programs\FDWatch_TestData\Data_Testsetup\DVT1_Test_Results'
ecg_stream_file_name = 'ecg.csv'
bia_stream_file_name = "bia.csv"
ppg_stream_file_name = 'ppg.csv'
syncppg_stream_file_name = 'sync_ppg.csv'
adxl_stream_file_name = 'adxl.csv'
temperature_stream_file_name = 'temp.csv'
adpd_stream_file_name = 'adpd6.csv'
eda_stream_file_name = 'eda.csv'
volt_scale_range = (0, 5)
# The switch map dictionary maps the various switches to the arduino digital pins (24-42)
switch_map = {'SNOISE1': 22, 'SNOISE2': 23, 'ECG_NEGSEL': 24, 'ECG_POSSEL': 25}
close_plot_mode_global = True
test_report_dir = None
copy_results_to_shared_drive = True
save_plots = False
DVT_version = None
adpd_clk_calib = None
cm = None # CLI Map
ble_mac_addr = None
current_watch_mode = None
test_level_handeler = 0
# **********************************************************************
# ********************* Configure Logging ******************************
test_logger = logging.getLogger('test_logger')
logging_format = "[%(levelname)s] : %(message)s"
date_str = "%m/%d/%Y %I:%M:%S %p"
logger_formatter = logging.Formatter(logging_format, date_str)
test_stream_handler = logging.StreamHandler()
test_logger.setLevel(logging.INFO)
test_logger.addHandler(test_stream_handler)
# logging_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
# logging.basicConfig(# filename='output.log',
# level=logging.INFO,
# # filemode='w',
# format=logging_format,
# datefmt=date_str)
# **********************************************************************
# ********************* Common Functions *******************************
class ConditionCheckFailure(RuntimeError):
"""
This class is used to raise failures from test cases so that
robot framework detects them as failures and continues to
next test case due to the below variable
"""
ROBOT_CONTINUE_ON_FAILURE = True
def update_robot_suite_doc(doc):
"""
:param doc:
:return:
"""
try:
BuiltIn().set_suite_documentation(doc)
except Exception as e:
test_logger.warn('Skipping robot documentation update!')
pass
def write_analysis_report(result_dict, report_file=None, header='Analysis Section', append_report=False):
"""
:param result_dict:
:param report_file:
:param header:
:param append_report:
:return:
"""
report_file = 'analysis_report.txt' if not report_file else report_file
file_mode = 'a' if append_report else 'w'
with open(report_file, file_mode) as f_ref:
f_ref.write('<<< {} >>>\n'.format(header))
for k, v in result_dict.items():
f_ref.write('{} = {}\n'.format(k, v))
f_ref.write('\n'.format(header))
return os.path.abspath(report_file)
def analyze_wfm(file_path, mode='ecg', file_mode='cli', gen_filtered_ppg='1'):
"""
This function calls the BioSigProcess app built from LV vi and extracts results stored in yaml file
:param file_path: waveform *.csv file path
:param mode: 'ecg' | 'ppg'
:param file_mode: 'cli' | 'awt'
:return:
"""
results_dict = {}
if os.path.isfile(file_path):
subprocess.call(['utils/lv/builds/bio_sig_process/BioSigProcess.exe', mode, file_path, file_mode, gen_filtered_ppg])
time.sleep(2)
result_file_path = os.path.join(file_path, '../{}_extracted_results.txt'.format(mode))
with open(result_file_path, 'r') as f_ref:
line_list = f_ref.readlines()
for line in line_list:
result_list = line.split(' - ')
results_dict[result_list[0]] = result_list[1].strip()
else:
test_logger.warn('Input File not found! {}'.format(file_path))
results_dict = None
return results_dict
def quick_start_ecg(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start('ecg', 'ecg')
def quick_start_bia(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w bia 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start('bia', 'bia')
def set_ecg_stream_freq(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
def set_eda_stream_freq(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w eda 0:{}'.format(hex(samp_freq_hz)))
if samp_freq_hz <= 16:
watch_shell.do_lcfg("w eda 0x2:0x2")
else:
watch_shell.do_lcfg("w eda 0x2:0x1")
def quick_start_eda(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz:
set_eda_stream_freq(samp_freq_hz)
watch_shell.quick_start('eda', 'eda')
def quick_start_eda_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz:
set_eda_stream_freq(samp_freq_hz)
watch_shell.quick_start("eda", "eda", fs=True)
watch_shell.do_start_logging("")
def quick_stop_eda_fs():
watch_shell.quick_stop("eda", "eda", fs=True)
watch_shell.do_stop_logging("")
def quick_start_bia_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w bia 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start("bia", "bia", fs=True)
watch_shell.do_start_logging("")
def quick_stop_bia_fs():
watch_shell.quick_stop("bia", "bia", fs=True)
watch_shell.do_stop_logging("")
def quick_start_ecg_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start("ecg", "ecg", fs=True)
watch_shell.do_start_logging("")
def quick_stop_ecg_fs():
watch_shell.quick_stop("ecg", "ecg", fs=True)
watch_shell.do_stop_logging("")
def quick_start_adpd_fs(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state:
:param led:
:param skip_load_cfg:
:return:
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
watch_shell.quick_start("adpd", "adpd{}".format(cfg_dict[led]['sub']), fs=True)
watch_shell.do_start_logging("")
def quick_stop_adpd_fs(led='G'):
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
watch_shell.quick_stop("adpd", "adpd{}".format(cfg_dict[led]['sub']), fs=True)
watch_shell.do_stop_logging("")
def config_adpd_stream(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state:
:param led:
:param skip_load_cfg:
:return:
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def quick_start_adxl(samp_freq_hz=100):
"""
Set ADXL sampling frequency and start capturing the data
:param samp_freq_hz:
:return:
"""
watch_shell.quick_start("adxl", "adxl")
if samp_freq_hz == 12.5:
watch_shell.do_reg("w adxl 0x2C:0x98")
elif samp_freq_hz == 25:
watch_shell.do_reg("w adxl 0x2C:0x99")
elif samp_freq_hz == 50:
watch_shell.do_reg("w adxl 0x2C:0x9A")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adxl 0x2C:0x9B")
elif samp_freq_hz == 200:
watch_shell.do_reg("w adxl 0x2C:0x9C")
elif samp_freq_hz == 400:
watch_shell.do_reg("w adxl 0x2C:0x9F")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def set_adxl_stream_freq(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz == 12.5:
watch_shell.do_reg("w adxl 0x2C:0x98")
elif samp_freq_hz == 25:
watch_shell.do_reg("w adxl 0x2C:0x99")
elif samp_freq_hz == 50:
watch_shell.do_reg("w adxl 0x2C:0x9A")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adxl 0x2C:0x9B")
elif samp_freq_hz == 200:
watch_shell.do_reg("w adxl 0x2C:0x9C")
elif samp_freq_hz == 400:
watch_shell.do_reg("w adxl 0x2C:0x9F")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def quick_start_adpd(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state: [0 | 1]
:param led: ['G' | 'R' | 'IR' | 'B']
:return: stream_file_name
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
elif samp_freq_hz is None:
pass
else:
raise RuntimeError("Sampling Frequency Not Supported!")
watch_shell.quick_start('adpd', "adpd{}".format(cfg_dict[led]['sub']))
stream_file_name = 'adpd{}.csv'.format(cfg_dict[led]['sub'])
return stream_file_name
def quick_start_temperature():
watch_shell.quick_start('temp', 'temp')
def quick_stop_adpd(led='G'):
"""
:param led: ['G' | 'R' | 'IR' | 'B']
:return:
"""
cfg_dict = {'G': 'adpd6',
'R': 'adpd7',
'IR': 'adpd8',
'B': 'adpd9'}
led = led.upper()
watch_shell.quick_stop('adpd', cfg_dict[led])
def quick_start_ppg(samp_freq_hz=50, agc_state=0):
"""
:param samp_freq_hz:
:param agc_state:
:return:
"""
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(adpd_clk_calib)
watch_shell.do_set_ppg_lcfg("5")
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
if agc_state:
watch_shell.do_lcfg("w ppg 0x4:0x1210")
else:
watch_shell.do_lcfg("w ppg 0x4:0x1010")
watch_shell.quick_start('ppg', 'ppg')
def set_adpd_stream_freq(samp_freq_hz=50):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
def quick_stop_ppg(samp_freq_hz=50):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.quick_stop('ppg', 'ppg')
# def set_ecg_samp_freq(samp_freq_hz=100):
# """
#
# :param samp_freq_hz:
# :return:
# """
# watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
def dcb_cfg(mode='w', dev='adxl', file_name=''):
"""
:param mode: 'w'| 'r' | 'd'
:param dev: 'adpd' | 'adxl' | 'ecg' | 'eda'
:param file_name: '*_dcb_config.cfg'
:return:
"""
curr_dir = os.getcwd()
dcb_cfg_dir = os.path.join(curr_dir, 'dcb_cfg')
if not os.path.exists(dcb_cfg_dir):
os.mkdir(dcb_cfg_dir)
test_logger.warning("DCG Config Dir Not Found! Creating empty directory 'dcb_cfg'")
if mode == 'w':
if os.path.exists(os.path.join(dcb_cfg_dir, file_name)) and os.path.isfile(os.path.join(dcb_cfg_dir, file_name)):
pkt = watch_shell.do_write_dcb('{} {}'.format(dev, os.path.join(dcb_cfg_dir, file_name)))
if dev == "adpd":
for pkt_element in pkt:
err_stat = watch_shell.check_err_stat(pkt_element)
if err_stat == 1:
break
else:
err_stat = watch_shell.check_err_stat(pkt)
else:
err_stat = 1
test_logger.error("DCB Config file not found!")
raise RuntimeError("DCB Config file not found!\n{}".format(os.path.join(dcb_cfg_dir, file_name)))
elif mode == 'r':
pkt = watch_shell.do_read_dcb('{}'.format(dev))
if dev in ["ecg", "eda", "bia"]:
file_name = r".\dcb_cfg\{}_dcb_get.lcfg".format(dev)
else:
file_name = r".\dcb_cfg\{}_dcb_get.dcfg".format(dev)
with open(file_name, "w") as fs:
if dev == "adpd":
for pkt_element in pkt:
for index, data in enumerate(pkt_element["payload"]["data"]):
if index == 0 and type(data[0]) is int:
convert_2_hex = True
else:
convert_2_hex = False
if convert_2_hex:
data = [hex(data[0]), data[1]]
fs.write(" ".join(data))
fs.write("\n")
err_stat = watch_shell.check_err_stat(pkt_element)
else:
for index, data in enumerate(pkt["payload"]["data"]):
if index == 0 and type(data[0]) is int:
convert_2_hex = True
else:
convert_2_hex = False
if convert_2_hex:
data = [hex(data[0]), data[1]]
fs.write(" ".join(data))
fs.write("\n")
err_stat = watch_shell.check_err_stat(pkt)
test_logger.info('DCB File Name: dcb_cfg\{}_dcb_get.dcfg'.format(dev))
elif mode == 'd':
pkt = watch_shell.do_delete_dcb('{}'.format(dev))
err_stat = watch_shell.check_err_stat(pkt)
else:
err_stat = 1
return err_stat, dcb_cfg_dir
def plot_and_save_png(f_path, col_idx=1, row_offset=1):
"""
This function reads a csv file and plots the data and saves the plot into a png file
:param f_path:
:param col_idx:
:return: plot_path
"""
data_list = read_csv_col(f_path, col_idx, row_offset)
f_name = os.path.splitext(os.path.split(f_path)[-1])[0]
plot_path = os.path.join(os.path.split(f_path)[0], 'plots', f_name+'.png')
plt.plot(data_list)
plt.xlabel('time')
plt.ylabel('amplitude')
plt.savefig(plot_path)
plt.close()
return plot_path
def update_arduino(in_obj):
"""
This function updates the arduino global variable usually from an initialize function call
:param in_obj:
:return:
"""
global arduino
arduino = in_obj
def update_watch_shell(in_obj):
"""
This function updates the watch_shell global variable usually from an initialize function call
:param in_obj:
:return:
"""
global watch_shell
watch_shell = in_obj
def update_dvt_version():
"""
This function updates the DVT_version global variable usually from an initialize function call
:return:
"""
global DVT_version
err_stat, chip_id = watch_shell.get_chip_id("2") # ADPD chip ID index is 2
if chip_id == 0xc0:
test_logger.info("DVT1 Watch Connected")
DVT_version = 0
else:
test_logger.info("DVT2 Watch Connected")
DVT_version = 1
# else:
# raise RuntimeError("Unknown DVT Watch version ADPD Chip ID-{}".format(str(chip_id)))
def update_adpd_clock_calibration_value():
global adpd_clk_calib
adpd_clk_calib = ["6", "2"][DVT_version]
def update_ts_mismatch_tolerance(tolerance=0):
global ts_mismatch_tolerance
ts_mismatch_tolerance = tolerance
def read_station_cfg():
"""
This function reads the station config yaml file and updates the global variables. If a file is not found, it will
create a file with the default values in it. The file location is <user>/AppData/Roaming/
:return:
"""
# Default values
cfg_dict = {'arduino_port': 'COM7', 'watch_port': 'COM13', 'watch_port_ble': 'COM7',
'fg_instr_addr': 'USB0::0x0957::0x2C07::MY52802639::0::INSTR',
'sm_instr_addr': 'GPIB0::23::INSTR', 'watch_port_type': 'USB', 'ble_mac': '6B-28-88-26-52-C3'}
station_cfg_path = os.path.join(os.getenv('APPDATA'), 'watch_test.yaml')
if os.path.exists(station_cfg_path) and os.path.isfile(station_cfg_path):
with open(station_cfg_path, 'r') as f_ref:
cfg_dict = yaml.load(f_ref, Loader=yaml.FullLoader)
else:
with open(station_cfg_path, 'w') as f_ref:
yaml.dump(cfg_dict, f_ref)
missing_keys = []
global arduino_port, watch_port, watch_port_ble, fg_instr_addr, sm_instr_addr, watch_port_type, ble_mac_addr
if 'watch_port_ble' not in cfg_dict.keys():
missing_keys.append("watch_port_ble")
watch_port_ble = ""
else:
watch_port_ble = cfg_dict['watch_port_ble']
if 'ble_mac' not in cfg_dict.keys():
missing_keys.append("ble_mac")
ble_mac_addr = ""
else:
ble_mac_addr = cfg_dict['ble_mac']
if len(missing_keys) != 0:
test_logger.warning("Please add the {} values in the {} file".format(" and ".join(missing_keys),
os.path.join(os.getenv('APPDATA'),
'watch_test.yaml')))
# raise ConditionCheckFailure("Please add the {} values in the {} file"
# "".format(" and ".join(missing_keys), os.path.join(os.getenv('APPDATA'),
# 'watch_test.yaml')))
arduino_port = cfg_dict['arduino_port']
watch_port = cfg_dict['watch_port']
fg_instr_addr = cfg_dict['fg_instr_addr']
sm_instr_addr = cfg_dict['sm_instr_addr']
if 'watch_port_type' in cfg_dict.keys():
watch_port_type = cfg_dict['watch_port_type']
else:
watch_port_type = 'USB'
def close_plot_after_run(plot_name_list, close_plot_mode=False):
"""
This function closes all open plot and cmd windows opened by the test.
This checks for a global mode variable or the local mode variable. Local variable defaults to False
:param plot_name_list: This is a list of string values of the plot window names
:param close_plot_mode: This is a boolean arg
:return:
"""
if close_plot_mode or close_plot_mode_global:
for plot_name in plot_name_list:
os.system('taskkill /fi "WINDOWTITLE eq {}"'.format(plot_name))
time.sleep(0.25)
os.system('taskkill /fi "WINDOWTITLE eq C:\WINDOWS\system32\cmd.exe"')
def init_matlab_engine():
"""
This function imports and initializes matlab engine
MATLAB package needs to be installed from <matlab_root>/extern/engine/python directory
Use the command "python setup.py install"
:return:
"""
global matlab_eng
try:
import matlab.engine
matlab_eng = matlab.engine.start_matlab()
except:
print("Error loading MATLAB Engine!")
if matlab_eng:
matlab_dir = os.path.join(os.path.abspath('.'), 'utils', 'matlab_utils')
matlab_eng.addpath(matlab_dir, nargout=0)
return matlab_eng
def initialize_setup(ts_tolerance=10, com_port="NA", mode="NA", ble_mac="NA", ble_com_port="NA"):
"""
This function runs necessary steps to initialize the test setup
- Connects to Arduino and initializes arduino global variable
:return:
"""
global fg, sm, cm
global test_report_dir
read_station_cfg()
# Instantiating watch shell
gui_signaller = cli_map.cli.QtSignaller()
# threading.Thread(target=cli_map.cli._init_gui, args=(gui_signaller,), daemon=True).start()
watch_shell_obj = CLIMap(gui_signaller, testing=True)
if com_port != "NA" and "COM" in com_port:
global watch_port
watch_port = com_port
if ble_com_port != "NA" and "COM" in ble_com_port:
global watch_port_ble
watch_port_ble = ble_com_port
if ble_mac != "NA":
global ble_mac_addr
ble_mac_addr = ble_mac
if mode != "NA":
global watch_port_type
watch_port_type = mode
if watch_port_type == 'USB':
watch_shell_obj.do_connect_usb('{}'.format(watch_port))
else:
watch_shell_obj.do_connect_ble('{} {}'.format(watch_port_ble, ble_mac_addr))
# cm = CLIMap(watch_shell_obj)
# Creating Test Rport Directory
err_stat, sys_info_dict = watch_shell_obj.get_system_info()
if err_stat:
raise RuntimeError('Unable to communicate with the watch!')
pcb_name = str(sys_info_dict['mac_addr'])
if not pcb_name:
pcb_name = easygui.enterbox('PCB Number:', 'Enter PCB Number')
test_report_dir = init_test_report_dir(pcb_name)
test_logger.info('Test Results Directory: {}'.format(test_report_dir))
err_stat, fw_ver_info_dict = watch_shell_obj.get_version_cli()
if not err_stat:
ver_info_str = 'Firmware Version: V{}.{}.{} | Build Info: {}'.format(fw_ver_info_dict['major'],
fw_ver_info_dict['minor'],
fw_ver_info_dict['patch'],
fw_ver_info_dict['build'])
update_robot_suite_doc(ver_info_str)
# Instantiating Arduino
#arduino_obj = SerialIface(port=arduino_port)
#arduino_obj.serial_write('!CfgIOMap\r')
# watch_shell_obj.do_toggleSaveCSV('')
#update_arduino(arduino_obj)
update_watch_shell(watch_shell_obj)
# TODO: Enable below code to configure instruments
#fg = instr_lib.KeysightFG33522B()
#fg.instr_connect(fg_instr_addr)
#sm = instr_lib.KeithleySM2400()
#sm.instr_connect(sm_instr_addr)
update_dvt_version()
update_adpd_clock_calibration_value()
update_ts_mismatch_tolerance(int(ts_tolerance))
def initialize_setup_nk(ts_tolerance=0, usb_com_port="NA", mode="NA",
ble_mac="NA", ble_com_port="NA", clear_flash=0, test_level=0, flash_reset=0):
"""
This function runs necessary steps to initialize the test setup
- Connects to Arduino and initializes arduino global variable
:return:
"""
global test_report_dir
read_station_cfg()
# Instantiating watch shell
gui_signaller = cli_map.cli.QtSignaller()
# threading.Thread(target=cli_map.cli._init_gui, args=(gui_signaller,), daemon=True).start()
watch_shell_obj = CLIMap(gui_signaller, testing=True)
update_watch_shell(watch_shell_obj)
if usb_com_port != "NA" and "COM" in usb_com_port:
global watch_port
watch_port = usb_com_port
if ble_com_port != "NA" and "COM" in ble_com_port:
global watch_port_ble
watch_port_ble = ble_com_port
if ble_mac != "NA":
global ble_mac_addr
ble_mac_addr = ble_mac
if mode != "NA":
global watch_port_type
watch_port_type = mode
connect(watch_port_type)
# Creating Test Report Directory
err_stat, sys_info_dict = watch_shell_obj.get_system_info()
if err_stat:
raise RuntimeError('Unable to communicate with the watch!')
pcb_name = str(sys_info_dict['mac_addr'])
if not pcb_name:
pcb_name = easygui.enterbox('PCB Number:', 'Enter PCB Number')
test_report_dir = init_test_report_dir(pcb_name)
test_logger.info('Test Results Directory: {}'.format(test_report_dir))
err_stat, fw_ver_info_dict = watch_shell_obj.get_version_cli()
if not err_stat:
ver_info_str = 'Firmware Version: V{}.{}.{} | Build Info: {}'.format(fw_ver_info_dict['major'],
fw_ver_info_dict['minor'],
fw_ver_info_dict['patch'],
fw_ver_info_dict['build'])
update_robot_suite_doc(ver_info_str)
update_dvt_version()
update_adpd_clock_calibration_value()
update_ts_mismatch_tolerance(int(ts_tolerance))
if clear_flash:
watch_shell.do_fs_format('')
if flash_reset:
watch_shell.do_flash_reset('')
test_level_update(test_level)
def connect_with_mode(mode="USB", retry=False):
if current_watch_mode != mode.upper():
disconnect()
connect(mode, retry)
def connect(mode="USB", retry=False):
global current_watch_mode
if mode.upper() == 'USB':
pkt = watch_shell.do_connect_usb('{}'.format(watch_port))
else:
pkt = watch_shell.do_connect_ble('{} {}'.format(watch_port_ble, ble_mac_addr))
if pkt["payload"]["status"].value[0] == -1:
if not retry:
test_logger.warning("***{} connect Not responding. Attempting Retry!!***".format(mode))
disconnect()
time.sleep(2)
connect(mode, True)
else:
test_logger.error("***{} connect Not responding***".format(mode))
raise ConditionCheckFailure("***{} connect Not responding***".format(mode))
current_watch_mode = mode.upper()
def disconnect():
watch_shell.do_quit("")
def test_level_update(level=0):
global test_level_handeler
test_level_handeler = level
def init_test_report_dir(pcb_name):
"""
This function creates a directory for pcb test reports if not already present and
creates a folder inside it with the current date and time string for storing the test results
:param pcb_name:
:return:
"""
if copy_results_to_shared_drive:
if not pcb_name:
pcb_name = pcb_name_default
pcb_name = pcb_name.upper()
pcb_test_dir_path = os.path.join(shared_drive_path, pcb_name+'_test_result')
if not os.path.exists(shared_drive_path):
raise Exception('Unable to access shared drive path!')
if not os.path.exists(pcb_test_dir_path):
# Creating PCB test directory
os.mkdir(pcb_test_dir_path)
now = datetime.now()
dt_str = now.strftime("%m_%d_%y_%H_%M_%S")
# Creating time-stamped test directory
test_report_dir = os.path.join(pcb_test_dir_path, dt_str)
os.mkdir(test_report_dir)
with open('output_dir.tmp', 'w') as f_ref:
f_ref.write(test_report_dir)
else:
test_report_dir = ''
# Clearing plots folder
if os.path.isdir('plots'):
shutil.rmtree('plots')
time.sleep(1)
os.mkdir('plots')
return test_report_dir
@util_logger
def close_setup():
"""
This function runs necessary steps to close the test setup
:return:
"""
# common.watch_shell.do_exit('') # TODO: Need to enable this after exit command is fixed
update_arduino(None)
update_watch_shell(None)
close_plot_after_run(['ECG Data Plot'], True)
# TODO: Enable below code to configure function generator
# fg.close()
@util_logger
def set_switch(name, state):
"""
This function extracts the io_id from the switch_map based on the input naem and sets the state
:param name:
:param state:
:return:
"""
if name in switch_map:
io_id = switch_map[name]
arduino.serial_write('!SetIO {} {}\r'.format(io_id, state))
else:
raise Exception('Invalid switch name! Unable to find the provided switch name in the switch map')
def rename_stream_file(old_file_name, suffix='', row_offset=1, col_idx=1,
copy_to_shared_drive=copy_results_to_shared_drive, plot=save_plots):
"""
This function renames the old_file_name of stream file by appending a suffix to it
:param old_file_name:
:param suffix:
:param row_offset: If there is header on row 0 of csv data, row_offset can be 1
:param col_idx: If the data is on column 2, col_idx will be 1
:param copy_to_shared_drive:
:param plot: True/False
:return:
"""
if os.path.exists(old_file_name): # Renaming stream file for each iteration
new_name = os.path.splitext(old_file_name)[0] + suffix
if os.path.exists(new_name) and os.path.isfile(new_name):
os.remove(new_name)
time.sleep(0.5)
os.rename(old_file_name, new_name)
else:
new_name = ''
if plot:
plot_path = plot_and_save_png(new_name, col_idx, row_offset)
if copy_to_shared_drive:
total_retry = 1
for retry in range(total_retry): # has to be multiple iteration but limiting due to the delay
try:
test_group_name = inspect.getmodule(inspect.stack()[1][0]).__name__.split('.')[-1]
test_group_dir = os.path.join(test_report_dir, test_group_name)
if not os.path.exists(test_group_dir):
os.mkdir(test_group_dir)
file_name = os.path.split(new_name)[-1]
shutil.copyfile(new_name, os.path.join(test_group_dir, file_name))
if plot:
plot_dir = os.path.join(test_group_dir, 'plots')
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
plot_name = os.path.split(plot_path)[-1]
new_plot_path = os.path.join(plot_dir, plot_name)
shutil.copyfile(plot_path, new_plot_path)
break
except WindowsError:
test_logger.info("Trying to copy the file; Attempts remaining: {}".format(total_retry - retry - 1))
else:
test_logger.warning("*** File Copy Failed ***")
return new_name
def amp_volts_to_percentage(in_volt):
"""
This function takes in amplitude in volts and returns amplitude percentage for arduino
:param in_volt:
:return:
"""
full_scale_amp = float(volt_scale_range[1])/2
amp_percentage = (in_volt * 100) / full_scale_amp
if amp_percentage > 100:
amp_percentage = 100.0
return amp_percentage
def read_csv_file(file_path, num_cols=2):
"""
This function can be used for reading and returning data from csv files
:param file_path:
:param num_cols:
:return:
"""
with open(file_path, 'r') as f_ref:
lines = f_ref.readlines()
lines.pop(0)
rows = [list(map(float, line.split(','))) for line in lines]
cols = zip(*rows)
return cols[:num_cols]
def read_csv_col(file_path, col_idx=0, row_offset=1):
"""
This function reads a csv file and returns the column data specified by the col_idx.
:param file_path:
:param col_idx:
:return:
"""
with open(file_path, 'r') as f_ref:
line_list = f_ref.readlines()
col_data_list = []
last_line = len(line_list) - 1
for i, line in enumerate(line_list):
if i >= row_offset and any(line):
line.strip()
if last_line == i and not any(line.split(",")[col_idx]): # If the last row is empty
continue
if i <= last_line - 7:
# if the last packet in adpd has a partial data for example you are streaming Slot F, G, H, I.
# The last packet might contain only Slot H, I data so truncating the data
try:
any(line.split(",")[col_idx])
except IndexError:
continue
col_data_list.append(float(line.split(",")[col_idx]))
return col_data_list
# **********************************************************************
def test_func():
test_logger.info('Logging has started!')
| 2,115
| 0
| 275
|
14901fa8b5972d2f082360e0f570d6b3d0cb7fd3
| 3,826
|
py
|
Python
|
robustcode/models/modules/iterators.py
|
david-maine/robust-code
|
971b7336813bb856fafab368320be47a16919e4c
|
[
"Apache-2.0"
] | 8
|
2020-08-16T23:26:37.000Z
|
2021-11-03T06:52:56.000Z
|
robustcode/models/modules/iterators.py
|
david-maine/robust-code
|
971b7336813bb856fafab368320be47a16919e4c
|
[
"Apache-2.0"
] | 2
|
2020-08-16T23:11:46.000Z
|
2021-03-30T02:12:23.000Z
|
robustcode/models/modules/iterators.py
|
eth-sri/robust-code
|
971b7336813bb856fafab368320be47a16919e4c
|
[
"Apache-2.0"
] | 2
|
2021-03-31T04:17:50.000Z
|
2021-10-21T20:49:27.000Z
|
import collections
import copy
from typing import Iterable
import dgl
import torch
import torchtext
| 32.423729
| 108
| 0.553581
|
import collections
import copy
from typing import Iterable
import dgl
import torch
import torchtext
class MiniBatch(
collections.namedtuple(
"MiniBatch", ["X", "Y", "lengths", "masks", "P", "data", "ids"]
)
):
__slots__ = ()
def clone(self):
assert self.P is None, "clone not implemented for field P."
if isinstance(self.X, list):
X = [x.detach().clone() for x in self.X]
M = {key: value.detach().clone() for key, value in self.masks.items()}
elif isinstance(self.X, torch.Tensor):
X = self.X.detach().clone()
elif isinstance(self.X, dgl.DGLGraph):
X = dgl.batch(dgl.unbatch(self.X))
M = {mask: X.ndata[mask] for mask in self.masks.keys()}
else:
assert False, "unhandled type to clone: {}".format(type(self.X))
return MiniBatch(
X,
self.Y.detach().clone(), # tensor
copy.copy(self.lengths), # list
M, # dict of tensors
None, # ??
copy.deepcopy(self.data) if self.data is not None else None, # dist
copy.copy(self.ids) if self.ids is not None else None, # list
)
class SequenceIterator:
def __init__(
self,
it: torchtext.data.BucketIterator,
input_fields: Iterable[str],
target_field: str,
mask_fields: Iterable[str] = None,
):
"""
Args:
it: iterator to wrap around
input_fields: list of input fields names wrap as X
target_field: target field name to wrap as Y
mask_fields: list of mask fields to wrap as masks
"""
self.it = it
self.input_fields = input_fields
self.target_field = target_field
self.mask_fields = mask_fields if mask_fields is not None else []
def init_epoch(self):
self.it.init_epoch()
def __len__(self):
return len(self.it)
def __iter__(self):
for batch in self.it:
"""
assumes atleast one the field was created with include_lengths=True, e.g.:
torchtext.data.Field(sequential=True, include_lengths=True, eos_token='</s>', init_token='<s>')
"""
L = None
Y = getattr(batch, self.target_field)
if isinstance(Y, tuple):
Y = Y[0]
X = []
for field in self.input_fields:
data = getattr(batch, field)
"""
in case field was created with lengths=True it will be a tuple (values, lengths)
since we already have the length from target, we drop the lengths here
"""
X.append(data[0] if isinstance(data, tuple) else data)
if L is None and isinstance(data, tuple):
L = data[1]
M = {
mask_name: getattr(batch, mask_name).bool()
for mask_name in self.mask_fields
}
yield MiniBatch(X, Y, L.tolist(), M, None, None, batch.id.tolist())
class BPTTIterator:
def __init__(self, x: torch.Tensor, bptt_len: int, batch_first: bool = False):
"""
Iterates over x in chunks of bptt_len
Args:
x: tensor of values of shape B x * if batch_first is True or N x B x * of False
bptt_len: size of chunks to generate
"""
self.x = x
self.bptt_len = bptt_len
self.batch_first = batch_first
def __iter__(self):
length_dim = 1 if self.batch_first else 0
size = self.x.size(length_dim)
for i in range(0, size, self.bptt_len):
chunk_len = min(self.bptt_len, size - i)
chunk = self.x.narrow(length_dim, i, chunk_len)
yield chunk
| 2,368
| 1,285
| 69
|
f5950857c677b000860033c3cad22749b7050775
| 181,473
|
py
|
Python
|
chesstab/tools/chesstab-4-1-1_castling-option-correction.py
|
RogerMarsh/chesstab
|
01d375dc6bf025b621612a84513e55c4640a78ad
|
[
"BSD-3-Clause"
] | null | null | null |
chesstab/tools/chesstab-4-1-1_castling-option-correction.py
|
RogerMarsh/chesstab
|
01d375dc6bf025b621612a84513e55c4640a78ad
|
[
"BSD-3-Clause"
] | null | null | null |
chesstab/tools/chesstab-4-1-1_castling-option-correction.py
|
RogerMarsh/chesstab
|
01d375dc6bf025b621612a84513e55c4640a78ad
|
[
"BSD-3-Clause"
] | null | null | null |
# chesstab-4-1-1_castling-option-correction.py
# Copyright 2020 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Read games from a ChessTab database and report games with FENs where the
castling options are not consistent with the piece placement, and attempt
correction on request.
The database must be compatible with ChessTab-4.1.
"""
import tkinter
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
import os
from ast import literal_eval
import time
# This module must have the PGN class from pgn-read-1.3.1 and the PGNUpdate
# class from ChessTab-4.1 so both are copied here, rather than imported, as
# PGN131 along with PGNError131.
# The fitting pgn_read constants module is copied too.
# The two chessql constants are declared here too.
# All docstrings removed from the copied classes and modules.
# The names are modified to indicate their reliance on pgn-read-1.3.1.
# One constant is copied from chesstab.core.chessrecord.
# A regular expession is copied from chesstab.core.querystatement.
# The PGN class from pgn-read-1.3.2 is used to verify any corrected FENs are
# valid so it is copied here, rather than imported, as PGN132 along with
# PGNError132.
# PGN131 and PGN132 use the same version of .constants module
import re
from solentware_base import modulequery
from solentware_base.core.record import KeyData, Value, Record
from pgn_read.core import parser
from .. import (
APPLICATION_DATABASE_MODULE,
FULL_POSITION_MODULE,
ANALYSIS_MODULE,
)
from ..core.chessrecord import ChessDBrecordGameUpdate, ChessDBrecordAnalysis
# These have to be same at both versions of ChessTab so use the current ones.
from ..core.filespec import (
FileSpec,
POSITIONS_FIELD_DEF,
SOURCE_FIELD_DEF,
PIECESQUAREMOVE_FIELD_DEF,
PIECEMOVE_FIELD_DEF,
SQUAREMOVE_FIELD_DEF,
GAMES_FILE_DEF,
REPERTOIRE_FILE_DEF,
OPENING_ERROR_FIELD_DEF,
PGN_DATE_FIELD_DEF,
VARIATION_FIELD_DEF,
ENGINE_FIELD_DEF,
PARTIALPOSITION_NAME_FIELD_DEF,
RULE_FIELD_DEF,
COMMAND_FIELD_DEF,
ANALYSIS_FILE_DEF,
)
# start of attributes copied from pgn_read.core.constants at version 1.3.1
# pgn specification values
TAG_EVENT = "Event"
TAG_SITE = "Site"
TAG_DATE = "Date"
TAG_ROUND = "Round"
TAG_WHITE = "White"
TAG_BLACK = "Black"
TAG_RESULT = "Result"
TAG_FEN = "FEN"
SEVEN_TAG_ROSTER = {
TAG_EVENT: "?",
TAG_SITE: "?",
TAG_DATE: "????.??.??",
TAG_ROUND: "?",
TAG_WHITE: "?",
TAG_BLACK: "?",
TAG_RESULT: "*",
}
SEVEN_TAG_ROSTER_DISPLAY_ORDER = (
TAG_SITE,
TAG_ROUND,
TAG_EVENT,
TAG_DATE,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
SEVEN_TAG_ROSTER_EXPORT_ORDER = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
# Allow for decorators to do special cases for Date and Round sorting
SPECIAL_TAG_DATE = ("?", "0")
SPECIAL_TAG_ROUND = {"?": 1, "-": 2}
NORMAL_TAG_ROUND = 3
SEVEN_TAG_ROSTER_ARCHIVE_SORT1 = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
)
SEVEN_TAG_ROSTER_ARCHIVE_SORT2 = (
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
WHITE_WIN = "1-0"
BLACK_WIN = "0-1"
DRAW = "1/2-1/2"
UNKNOWN_RESULT = "*"
RESULT_SET = {WHITE_WIN, BLACK_WIN, DRAW, UNKNOWN_RESULT}
# Repertoire Tags (non-standard)
TAG_OPENING = "Opening"
REPERTOIRE_TAG_ORDER = (TAG_OPENING, TAG_RESULT)
REPERTOIRE_GAME_TAGS = {
TAG_OPENING: "?",
TAG_RESULT: UNKNOWN_RESULT,
}
PGN_PAWN = ""
PGN_KING = "K"
PGN_QUEEN = "Q"
PGN_ROOK = "R"
PGN_BISHOP = "B"
PGN_KNIGHT = "N"
PGN_FROM_SQUARE_DISAMBIGUATION = frozenset((PGN_QUEEN, PGN_BISHOP, PGN_KNIGHT))
# Refugees from old PGN regular expression pattern matching strings.
O_O_O = "O-O-O"
O_O = "O-O"
PLAIN_MOVE = ""
CAPTURE_MOVE = "x"
LINEFEED = "\n"
CARRIAGE_RETURN = "\r"
NEWLINE = "".join((LINEFEED, CARRIAGE_RETURN))
SPACE = " "
HORIZONTAL_TAB = "\t"
FORMFEED = "\f"
VERTICAL_TAB = "\v"
# PGN regular expression pattern matching strings
# Building blocks
ANYTHING_ELSE = "."
WHITESPACE = "\s+"
FULLSTOP = "."
PERIOD = "\\" + FULLSTOP
INTEGER = "[1-9][0-9]*"
TERMINATION = "|".join((WHITE_WIN, BLACK_WIN, DRAW, "\\" + UNKNOWN_RESULT))
START_TAG = "["
END_TAG = "]"
SYMBOL = "([A-Za-z0-9][A-Za-z0-9_+#=:-]*)"
STRING = r'"((?:[^\\"]|\\.)*)"'
TAG_PAIR = "".join(
(
"(\\",
START_TAG,
")\s*",
SYMBOL,
"\s*",
STRING,
"\s*",
"(\\",
END_TAG,
")",
)
)
START_COMMENT = "{"
END_COMMENT = "}"
COMMENT = "".join(
("\\", START_COMMENT, "[^", END_COMMENT, "]*\\", END_COMMENT)
)
LEFT_ANGLE_BRACE = "<"
RIGHT_ANGLE_BRACE = ">"
RESERVED = "".join(
(LEFT_ANGLE_BRACE, "[^", RIGHT_ANGLE_BRACE, "]*", RIGHT_ANGLE_BRACE)
)
COMMENT_TO_EOL = ";(?:[^\n]*)\n"
PERCENT = "%"
ESCAPE_LINE = PERCENT.join(("(?:\A|(?<=\n))", "(?:[^\n]*)\n"))
NAG = "\$[0-9]+(?!/|-)"
START_RAV = "("
END_RAV = ")"
# KQRBN are replaced by PGN_KING, ..., constants; not WKING, ..., constants.
FNR = "a-h"
RNR = "1-8"
PAWN_PROMOTE = "".join(
(
"(?:([" + FNR + "])(x))?([" + FNR + "][18])(=[",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])",
)
)
PAWN_CAPTURE = "([" + FNR + "])(x)([" + FNR + "][2-7])"
PIECE_CAPTURE = "".join(
(
"(?:(",
PGN_KING,
")|(?:([",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([" + FNR + "]?[" + RNR + "]?)))",
"(x)([" + FNR + "][" + RNR + "])",
)
)
PIECE_CHOICE_MOVE = "".join(
(
"([",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([",
FNR + RNR + "])([" + FNR + "][" + RNR + "])",
)
)
PIECE_MOVE = "".join(
(
"([",
PGN_KING,
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([" + FNR + "][" + RNR + "])",
)
)
PAWN_MOVE = "([" + FNR + "][" + RNR + "])"
CASTLES = "(O-O(?:-O)?)"
CHECK = "([+#]?)"
ANNOTATION = "([!?][!?]?)?"
# Regular expression to detect full games in import format; export format is a
# subset of import format. The text stored on database is captured.
IMPORT_FORMAT = "".join(
(
"(?:",
TAG_PAIR,
")",
"|",
"(?:",
"(?:",
"(?:",
PAWN_PROMOTE,
")",
"|",
"(?:",
PAWN_CAPTURE,
")",
"|",
"(?:",
PIECE_CAPTURE,
")",
"|",
"(?:",
PIECE_CHOICE_MOVE,
")",
"|",
"(?:",
PIECE_MOVE,
")",
"|",
"(?:",
PAWN_MOVE,
")",
"|",
"(?:",
CASTLES,
")",
")",
"(?:",
CHECK,
")",
"(?:",
ANNOTATION,
")",
")",
"|",
"(",
COMMENT,
")",
"|",
"(",
NAG,
")",
"|",
"(",
COMMENT_TO_EOL,
")",
"|",
"(",
TERMINATION,
")",
"|",
INTEGER,
"|",
PERIOD,
"|",
WHITESPACE,
"|",
"(\\",
START_RAV,
")",
"|",
"(\\",
END_RAV,
")",
"|",
RESERVED,
"|",
ESCAPE_LINE,
"|",
"(",
ANYTHING_ELSE,
")",
)
)
# Regular expressions to disambiguate moves: move text like 'Bc4d5' is the only
# kind which could need to be interpreted as one move rather than two.
DISAMBIGUATE_FORMAT = "".join(
(
"[" + PGN_BISHOP + PGN_KNIGHT + PGN_QUEEN + "]",
"[" + FNR + "][" + RNR + "]",
"[" + FNR + "][" + RNR + "]",
)
)
UNAMBIGUOUS_FORMAT = ".*"
# Regular expression to detect possible beginning of move in an error sequence,
# "Bxa" for example while typing "Bxa6".
# No constants for partial castling moves.
POSSIBLE_MOVE = "".join(
(
"[O",
PGN_KING,
PGN_BISHOP,
PGN_KNIGHT,
PGN_ROOK,
PGN_QUEEN,
FNR,
"][-O",
FNR,
RNR,
"+#?!=]* *",
)
)
#
# Group offsets for IMPORT_FORMAT matches
#
IFG_START_TAG = 1
IFG_TAG_SYMBOL = 2
# IFG_TAG_STRING = 3
IFG_TAG_STRING_VALUE = 3
# IFG_TAG_END = 4
IFG_PAWN_PROMOTE_FROM_FILE = 5
IFG_PAWN_TAKES_PROMOTE = 6
IFG_PAWN_PROMOTE_SQUARE = 7
IFG_PAWN_PROMOTE_PIECE = 8
IFG_PAWN_CAPTURE_FROM_FILE = 9
IFG_PAWN_TAKES = 10
IFG_PAWN_CAPTURE_SQUARE = 11
IFG_KING_CAPTURE = 12
IFG_PIECE_CAPTURE = 13
IFG_PIECE_CAPTURE_FROM = 14
IFG_PIECE_TAKES = 15
IFG_PIECE_CAPTURE_SQUARE = 16
IFG_PIECE_CHOICE = 17
IFG_PIECE_CHOICE_FILE_OR_RANK = 18
IFG_PIECE_CHOICE_SQUARE = 19
IFG_PIECE_MOVE = 20
IFG_PIECE_SQUARE = 21
IFG_PAWN_SQUARE = 22
IFG_CASTLES = 23
IFG_CHECK = 24
IFG_ANNOTATION = 25
IFG_COMMENT = 26
IFG_NAG = 27
IFG_COMMENT_TO_EOL = 28
IFG_TERMINATION = 29
IFG_START_RAV = 30
IFG_END_RAV = 31
IFG_ANYTHING_ELSE = 32
#
# Parser states
#
PGN_SEARCHING = 0
PGN_SEARCHING_AFTER_ERROR_IN_RAV = 1
PGN_SEARCHING_AFTER_ERROR_IN_GAME = 2
PGN_COLLECTING_TAG_PAIRS = 3
PGN_COLLECTING_MOVETEXT = 4
PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING = 5
PGN_DISAMBIGUATE_MOVE = 6
#
# numeric annotation glyphs (just validation for now)
#
NAG_TRANSLATION = {"$" + str(o): None for o in range(1, 499)}
#
# Square constants and flags
#
BOARDSIDE = 8
BOARDSQUARES = BOARDSIDE * BOARDSIDE
SQUARE_BITS = [1 << i for i in range(BOARDSQUARES)]
ALL_SQUARES = sum(SQUARE_BITS)
EN_PASSANT_TO_SQUARES = sum([SQUARE_BITS[s] for s in range(24, 40)])
EN_PASSANT_FROM_SQUARES = sum([SQUARE_BITS[s] for s in range(8, 16)]) | sum(
[SQUARE_BITS[s] for s in range(48, 56)]
)
# Pieces
# Encoding positions is more efficient (key length) if pawns are encoded with
# a value less than 4 with either the white or the black pawn encoded as 0 and
# the squares that cannot host a pawn include 0..3 as their encodings (bytes
# \x01..\x03 which arises naturally as the second byte of the 2-byte encodings
# ), typically the squares b1 c1 and d1. The other two values less than 4 are
# best used for the kings which are always present. Absence of a piece is best
# encoded with the highest value, which will be 12 if using lists, wherever
# possible, rather than dictionaries for mappings.
NOPIECE = ""
WKING = "K"
WQUEEN = "Q"
WROOK = "R"
WBISHOP = "B"
WKNIGHT = "N"
WPAWN = "P"
BKING = "k"
BQUEEN = "q"
BROOK = "r"
BBISHOP = "b"
BKNIGHT = "n"
BPAWN = "p"
PIECES = frozenset(
(
WKING,
WQUEEN,
WROOK,
WBISHOP,
WKNIGHT,
WPAWN,
BKING,
BQUEEN,
BROOK,
BBISHOP,
BKNIGHT,
BPAWN,
)
)
# Define white and black pieces and map to values used in database records
WPIECES = frozenset((WKING, WQUEEN, WROOK, WBISHOP, WKNIGHT, WPAWN))
BPIECES = frozenset((BKING, BQUEEN, BROOK, BBISHOP, BKNIGHT, BPAWN))
# The default initial board, internal representation.
INITIAL_BOARD = (
BROOK,
BKNIGHT,
BBISHOP,
BQUEEN,
BKING,
BBISHOP,
BKNIGHT,
BROOK,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WROOK,
WKNIGHT,
WBISHOP,
WQUEEN,
WKING,
WBISHOP,
WKNIGHT,
WROOK,
)
INITIAL_OCCUPIED_SQUARES = (
frozenset(
(48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)
),
frozenset((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)),
)
INITIAL_BOARD_BITMAP = sum(
[sum([SQUARE_BITS[o] for o in s]) for s in INITIAL_OCCUPIED_SQUARES]
)
INITIAL_PIECE_LOCATIONS = {
k: v
for k, v in (
(WKING, (60,)),
(WQUEEN, (59,)),
(WROOK, (56, 63)),
(WBISHOP, (58, 61)),
(WKNIGHT, (57, 62)),
(WPAWN, (48, 49, 50, 51, 52, 53, 54, 55)),
(BKING, (4,)),
(BQUEEN, (3,)),
(BROOK, (0, 7)),
(BBISHOP, (2, 5)),
(BKNIGHT, (1, 6)),
(BPAWN, (8, 9, 10, 11, 12, 13, 14, 15)),
)
}
# White and black side
WHITE_SIDE = 0
BLACK_SIDE = 1
OTHER_SIDE = BLACK_SIDE, WHITE_SIDE
SIDE_KING = WKING, BKING
# Map PGN piece file and rank names to internal representation
MAPPIECE = (
{
PGN_PAWN: WPAWN,
PGN_KING: WKING,
PGN_QUEEN: WQUEEN,
PGN_ROOK: WROOK,
PGN_BISHOP: WBISHOP,
PGN_KNIGHT: WKNIGHT,
},
{
PGN_PAWN: BPAWN,
PGN_KING: BKING,
PGN_QUEEN: BQUEEN,
PGN_ROOK: BROOK,
PGN_BISHOP: BBISHOP,
PGN_KNIGHT: BKNIGHT,
},
) # not sure if this should be set or tuple or dict
MAPFILE = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7}
MAPRANK = {
"8": 0,
"7": 8,
"6": 16,
"5": 24,
"4": 32,
"3": 40,
"2": 48,
"1": 56,
}
MAPROW = {"8": 0, "7": 1, "6": 2, "5": 3, "4": 4, "3": 5, "2": 6, "1": 7}
# {'a8':0, 'b8':1, ..., 'g1':62, 'h1':63}, the order squares are listed in
# Forsyth-Edwards Notation and the square numbers used internally.
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER = {
"".join((f, r)): fn + rn
for f, fn in MAPFILE.items()
for r, rn in MAPRANK.items()
}
# FEN constants
FEN_WHITE = "w"
FEN_BLACK = "b"
FEN_FIELD_DELIM = " "
FEN_RANK_DELIM = "/"
FEN_NULL = "-"
FEN_INITIAL_HALFMOVE_COUNT = 0
FEN_INITIAL_FULLMOVE_NUMBER = 1
FEN_INITIAL_CASTLING = WKING + WQUEEN + BKING + BQUEEN
FEN_STARTPOSITION = FEN_FIELD_DELIM.join(
(
FEN_RANK_DELIM.join(
(
"".join(
(
BROOK,
BKNIGHT,
BBISHOP,
BQUEEN,
BKING,
BBISHOP,
BKNIGHT,
BROOK,
)
),
"".join(
(BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN)
),
str(len(MAPFILE)),
str(len(MAPFILE)),
str(len(MAPFILE)),
str(len(MAPFILE)),
"".join(
(WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN)
),
"".join(
(
WROOK,
WKNIGHT,
WBISHOP,
WQUEEN,
WKING,
WBISHOP,
WKNIGHT,
WROOK,
)
),
)
),
FEN_WHITE,
FEN_INITIAL_CASTLING,
FEN_NULL,
str(FEN_INITIAL_HALFMOVE_COUNT),
str(FEN_INITIAL_FULLMOVE_NUMBER),
)
)
FEN_FIELD_COUNT = 6
FEN_SIDES = {FEN_WHITE: WHITE_SIDE, FEN_BLACK: BLACK_SIDE}
FEN_TOMOVE = FEN_WHITE, FEN_BLACK
# Map FEN square names to board square numbers for en passant move and capture
FEN_WHITE_MOVE_TO_EN_PASSANT = {
"a6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a6"],
"b6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b6"],
"c6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c6"],
"d6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d6"],
"e6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e6"],
"f6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f6"],
"g6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g6"],
"h6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h6"],
}
FEN_BLACK_MOVE_TO_EN_PASSANT = {
"a3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a3"],
"b3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b3"],
"c3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c3"],
"d3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d3"],
"e3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e3"],
"f3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f3"],
"g3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g3"],
"h3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h3"],
}
FEN_WHITE_CAPTURE_EN_PASSANT = {
"a6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a5"],
"b6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b5"],
"c6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c5"],
"d6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d5"],
"e6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e5"],
"f6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f5"],
"g6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g5"],
"h6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h5"],
}
FEN_BLACK_CAPTURE_EN_PASSANT = {
"a3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a4"],
"b3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b4"],
"c3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c4"],
"d3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d4"],
"e3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e4"],
"f3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f4"],
"g3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g4"],
"h3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h4"],
}
FEN_EN_PASSANT_TARGET_RANK = {"5": "6", "4": "3"}
# Specification of conditions to be met to permit castling and changes to make
# to board to display move in internal representation.
# The square to which the king moves is not included in the set of squares
# that must not be under attack because this condition is checked for all moves
# after being played provisionally on the board. The special additional thing
# about castling is that the king cannot move out of or through check; for all
# types of move the king must not be under attack after playing the move. But
# as currently implemented there is no harm except waste in including the test.
CASTLING_W = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e1"]
CASTLING_WK = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h1"]
CASTLING_WQ = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a1"]
CASTLING_B = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e8"]
CASTLING_BK = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h8"]
CASTLING_BQ = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a8"]
CASTLING_AVAILABITY_SQUARES = (
SQUARE_BITS[CASTLING_WQ]
| SQUARE_BITS[CASTLING_W]
| SQUARE_BITS[CASTLING_WK]
| SQUARE_BITS[CASTLING_BQ]
| SQUARE_BITS[CASTLING_B]
| SQUARE_BITS[CASTLING_BK]
)
CASTLING_SQUARES = {
WKING: (
CASTLING_W,
CASTLING_WK,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f1"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g1"],
),
(),
WROOK,
WKING,
),
WQUEEN: (
CASTLING_W,
CASTLING_WQ,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d1"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c1"],
),
(MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b1"],),
WROOK,
WKING,
),
BKING: (
CASTLING_B,
CASTLING_BK,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f8"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g8"],
),
(),
BROOK,
BKING,
),
BQUEEN: (
CASTLING_B,
CASTLING_BQ,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d8"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c8"],
),
(MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b8"],),
BROOK,
BKING,
),
}
# FEN validation
FEN_CASTLING_OPTION_REPEAT_MAX = 1
FEN_PIECE_COUNT_PER_SIDE_MAX = 16
FEN_KING_COUNT = 1
FEN_PAWN_COUNT_MAX = 8
FEN_QUEEN_COUNT_INITIAL = 1
FEN_ROOK_COUNT_INITIAL = 2
FEN_BISHOP_COUNT_INITIAL = 2
FEN_KNIGHT_COUNT_INITIAL = 2
FEN_MAXIMUM_PIECES_GIVING_CHECK = 2
# variation markers and non-move placeholders
NON_MOVE = None
MOVE_ERROR = False
MOVE_AFTER_ERROR = 0
MOVE_TEXT = True
# Maximum line length in PGN file for movetext excluding EOL ('\n')
# Some PGN Tags are allowed to exceed this
# The rule may not be enforcable for comments, especially any re-exported,
# without disturbing any formatting attempts with EOL and spaces.
PGN_MAX_LINE_LEN = 79
# Piece moves and line definitions
_RANKS = [
sum([SQUARE_BITS[s + r * BOARDSIDE] for s in range(BOARDSIDE)])
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_FILES = [
sum([SQUARE_BITS[s * BOARDSIDE + f] for s in range(BOARDSIDE)])
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_TOPLEFT_TO_BOTTOMRIGHT = [
sum(
[
SQUARE_BITS[
((f + c) % BOARDSIDE) + ((r + c) % BOARDSIDE) * BOARDSIDE
]
for c in range(BOARDSIDE)
if (
f + c < BOARDSIDE
and r + c < BOARDSIDE
or f + c >= BOARDSIDE
and r + c >= BOARDSIDE
)
]
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_BOTTOMLEFT_TO_TOPRIGHT = [
sum(
[
SQUARE_BITS[
((f - c) % BOARDSIDE) + ((r + c) % BOARDSIDE) * BOARDSIDE
]
for c in range(BOARDSIDE)
if f >= c and r + c < BOARDSIDE or c > f and r + c >= BOARDSIDE
]
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
RANKS = [_RANKS[r * BOARDSIDE] for r in range(BOARDSIDE)]
FILES = _FILES[:BOARDSIDE]
ROOK_MOVES = [(_RANKS[k] | _FILES[k]) - s for k, s in enumerate(SQUARE_BITS)]
BISHOP_MOVES = [
(_TOPLEFT_TO_BOTTOMRIGHT[k] | _BOTTOMLEFT_TO_TOPRIGHT[k]) - s
for k, s in enumerate(SQUARE_BITS)
]
QUEEN_MOVES = [(BISHOP_MOVES[s] | ROOK_MOVES[s]) for s in range(BOARDSQUARES)]
KNIGHT_MOVES = [
(
(
sum(
_FILES[kf + r * BOARDSIDE]
for kf in range(f - 2, f + 3)
if kf >= 0 and kf < BOARDSIDE
)
& sum(
_RANKS[f + kr * 8]
for kr in range(r - 2, r + 3)
if kr >= 0 and kr < BOARDSIDE
)
)
& ~(
_RANKS[f + r * BOARDSIDE]
| _FILES[f + r * BOARDSIDE]
| _TOPLEFT_TO_BOTTOMRIGHT[f + r * BOARDSIDE]
| _BOTTOMLEFT_TO_TOPRIGHT[f + r * BOARDSIDE]
)
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
KING_MOVES = [
(
QUEEN_MOVES[f + r * BOARDSIDE]
& (
sum(
_FILES[kf + r * BOARDSIDE]
for kf in range(f - 1, f + 2)
if kf >= 0 and kf < BOARDSIDE
)
& sum(
_RANKS[f + kr * 8]
for kr in range(r - 1, r + 2)
if kr >= 0 and kr < BOARDSIDE
)
)
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
WHITE_PAWN_MOVES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSQUARES - BOARDSIDE * 2:
WHITE_PAWN_MOVES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE])
else:
WHITE_PAWN_MOVES_TO_SQUARE.append(0)
for s in range(BOARDSQUARES - BOARDSIDE * 4, BOARDSQUARES - BOARDSIDE * 3):
WHITE_PAWN_MOVES_TO_SQUARE[s] |= SQUARE_BITS[s + BOARDSIDE * 2]
BLACK_PAWN_MOVES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSIDE * 2:
BLACK_PAWN_MOVES_TO_SQUARE.append(0)
else:
BLACK_PAWN_MOVES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE])
for s in range(BOARDSIDE * 3, BOARDSIDE * 4):
BLACK_PAWN_MOVES_TO_SQUARE[s] |= SQUARE_BITS[s - BOARDSIDE * 2]
# 'b1' for black, and 'b8' for white, are allowed as pawn move specifications
# to disambiguate queen moves like 'Qd1f1'.
# PAWN_MOVE_DESITINATION filters them out.
PAWN_MOVE_DESITINATION = [0, 0]
for s in range(BOARDSQUARES):
if s < BOARDSIDE:
pass
elif s < BOARDSIDE * 2:
PAWN_MOVE_DESITINATION[0] |= SQUARE_BITS[s]
elif s < BOARDSQUARES - BOARDSIDE * 2:
PAWN_MOVE_DESITINATION[0] |= SQUARE_BITS[s]
PAWN_MOVE_DESITINATION[1] |= SQUARE_BITS[s]
elif s < BOARDSQUARES - BOARDSIDE:
PAWN_MOVE_DESITINATION[1] |= SQUARE_BITS[s]
WHITE_PAWN_CAPTURES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s > BOARDSQUARES - BOARDSIDE * 2 - 1:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(0)
elif s % BOARDSIDE == 0:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE + 1])
elif s % BOARDSIDE == BOARDSIDE - 1:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE - 1])
else:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(
SQUARE_BITS[s + BOARDSIDE - 1] | SQUARE_BITS[s + BOARDSIDE + 1]
)
BLACK_PAWN_CAPTURES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSIDE * 2:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(0)
elif s % BOARDSIDE == 0:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE + 1])
elif s % BOARDSIDE == BOARDSIDE - 1:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE - 1])
else:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(
SQUARE_BITS[s - BOARDSIDE - 1] | SQUARE_BITS[s - BOARDSIDE + 1]
)
GAPS = []
for f in range(BOARDSQUARES):
GAPS.append(list())
for t in range(BOARDSQUARES):
aligned = (
(_RANKS[f] & _RANKS[t])
| (_FILES[f] & _FILES[t])
| (_TOPLEFT_TO_BOTTOMRIGHT[f] & _TOPLEFT_TO_BOTTOMRIGHT[t])
| (_BOTTOMLEFT_TO_TOPRIGHT[f] & _BOTTOMLEFT_TO_TOPRIGHT[t])
)
if not aligned:
if SQUARE_BITS[t] & KNIGHT_MOVES[f]:
GAPS[f].append(0)
else:
GAPS[f].append(ALL_SQUARES)
else:
gap = (
aligned
& sum(SQUARE_BITS[min(f, t) : max(f, t) + 1])
& ~(SQUARE_BITS[f] | SQUARE_BITS[t])
)
if gap:
GAPS[f].append(gap)
elif f == t:
GAPS[f].append(ALL_SQUARES)
else:
GAPS[f].append(0)
del _TOPLEFT_TO_BOTTOMRIGHT
del _BOTTOMLEFT_TO_TOPRIGHT
del _FILES
del _RANKS
del f, t, gap, aligned
PIECE_CAPTURE_MAP = {
k: v
for k, v in (
(WKING, KING_MOVES),
(WQUEEN, QUEEN_MOVES),
(WROOK, ROOK_MOVES),
(WBISHOP, BISHOP_MOVES),
(WKNIGHT, KNIGHT_MOVES),
(WPAWN, WHITE_PAWN_CAPTURES_TO_SQUARE),
(BKING, KING_MOVES),
(BQUEEN, QUEEN_MOVES),
(BROOK, ROOK_MOVES),
(BBISHOP, BISHOP_MOVES),
(BKNIGHT, KNIGHT_MOVES),
(BPAWN, BLACK_PAWN_CAPTURES_TO_SQUARE),
)
}
PIECE_MOVE_MAP = {
k: v
for k, v in (
(WKING, KING_MOVES),
(WQUEEN, QUEEN_MOVES),
(WROOK, ROOK_MOVES),
(WBISHOP, BISHOP_MOVES),
(WKNIGHT, KNIGHT_MOVES),
(WPAWN, WHITE_PAWN_MOVES_TO_SQUARE),
(BKING, KING_MOVES),
(BQUEEN, QUEEN_MOVES),
(BROOK, ROOK_MOVES),
(BBISHOP, BISHOP_MOVES),
(BKNIGHT, KNIGHT_MOVES),
(BPAWN, BLACK_PAWN_MOVES_TO_SQUARE),
)
}
# Lookup tables for string representation of square and move numbers.
MAP_FEN_ORDER_TO_PGN_SQUARE_NAME = [
t[-1]
for t in sorted(
(v, k) for k, v in MAP_PGN_SQUARE_NAME_TO_FEN_ORDER.items()
)
]
MOVE_NUMBER_KEYS = tuple(
["0"] + [str(len(hex(i)) - 2) + hex(i)[2:] for i in range(1, 256)]
)
# Error markers for PGN display.
ERROR_START_COMMENT = START_COMMENT + "Error: "
ESCAPE_END_COMMENT = "::" + START_COMMENT + START_COMMENT + "::"
# end of attributes copied from pgn_read.core.constants
# Defined in chesstab.core.chessrecord.
PLAYER_NAME_TAGS = frozenset((TAG_WHITE, TAG_BLACK))
# Imported from chesstab.core.querystatement.
re_normalize_player_name = re.compile("([^,\.\s]+)(?:[,\.\s]*)")
# The two chessql.core.constants attributes needed.
ANY_WHITE_PIECE_NAME = r"A"
ANY_BLACK_PIECE_NAME = r"a"
MAP_PGN_PIECE_TO_CQL_COMPOSITE_PIECE = {
WKING: ANY_WHITE_PIECE_NAME,
WQUEEN: ANY_WHITE_PIECE_NAME,
WROOK: ANY_WHITE_PIECE_NAME,
WBISHOP: ANY_WHITE_PIECE_NAME,
WKNIGHT: ANY_WHITE_PIECE_NAME,
WPAWN: ANY_WHITE_PIECE_NAME,
BKING: ANY_BLACK_PIECE_NAME,
BQUEEN: ANY_BLACK_PIECE_NAME,
BROOK: ANY_BLACK_PIECE_NAME,
BBISHOP: ANY_BLACK_PIECE_NAME,
BKNIGHT: ANY_BLACK_PIECE_NAME,
BPAWN: ANY_BLACK_PIECE_NAME,
}
re_tokens = re.compile(IMPORT_FORMAT)
# Avoid re.fullmatch() method while compatibility with Python 3.3 is important.
re_disambiguate_error = re.compile(DISAMBIGUATE_FORMAT.join(("^", "$")))
re_disambiguate_non_move = re.compile(UNAMBIGUOUS_FORMAT.join(("^", "$")))
re_possible_move = re.compile(POSSIBLE_MOVE.join(("(^", "$)")))
# for runtime "from <db|dpt>results import ChessDatabase" and similar
_ChessDB = "ChessDatabase"
_FullPositionDS = "FullPositionDS"
_AnalysisDS = "AnalysisDS"
# Subclass PGN131 to collect inconsistent FENs.
# Subclass PGN132 to collect inconsistent FENs: meaning verify they do not
# exist for PGN copied from pgn_read.core.parser version 1.3.2.
# Versions of the classes in core.chessrecord which use PGNUpdate modified to
# use PGNUpdate131, defined in this module above, so the records which have the
# inconsistent castling options can be deleted in full.
# Replaces ChessDBvaluePGNUpdate and ChessDBvalueGameImport which had been
# identical for a considerable time.
# Decided that PGNUpdate should remain in pgn.core.parser because that code
# generates data while this code updates a database.
# ChessDBvalueGameImport had this comment:
# Implication of original is encode_move_number not supported and load in
# ChessDBvaluePGN superclass is used.
if __name__ == "__main__":
Main().root.mainloop()
| 34.738323
| 80
| 0.555603
|
# chesstab-4-1-1_castling-option-correction.py
# Copyright 2020 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Read games from a ChessTab database and report games with FENs where the
castling options are not consistent with the piece placement, and attempt
correction on request.
The database must be compatible with ChessTab-4.1.
"""
import tkinter
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
import os
from ast import literal_eval
import time
# This module must have the PGN class from pgn-read-1.3.1 and the PGNUpdate
# class from ChessTab-4.1 so both are copied here, rather than imported, as
# PGN131 along with PGNError131.
# The fitting pgn_read constants module is copied too.
# The two chessql constants are declared here too.
# All docstrings removed from the copied classes and modules.
# The names are modified to indicate their reliance on pgn-read-1.3.1.
# One constant is copied from chesstab.core.chessrecord.
# A regular expession is copied from chesstab.core.querystatement.
# The PGN class from pgn-read-1.3.2 is used to verify any corrected FENs are
# valid so it is copied here, rather than imported, as PGN132 along with
# PGNError132.
# PGN131 and PGN132 use the same version of .constants module
import re
from solentware_base import modulequery
from solentware_base.core.record import KeyData, Value, Record
from pgn_read.core import parser
from .. import (
APPLICATION_DATABASE_MODULE,
FULL_POSITION_MODULE,
ANALYSIS_MODULE,
)
from ..core.chessrecord import ChessDBrecordGameUpdate, ChessDBrecordAnalysis
# These have to be same at both versions of ChessTab so use the current ones.
from ..core.filespec import (
FileSpec,
POSITIONS_FIELD_DEF,
SOURCE_FIELD_DEF,
PIECESQUAREMOVE_FIELD_DEF,
PIECEMOVE_FIELD_DEF,
SQUAREMOVE_FIELD_DEF,
GAMES_FILE_DEF,
REPERTOIRE_FILE_DEF,
OPENING_ERROR_FIELD_DEF,
PGN_DATE_FIELD_DEF,
VARIATION_FIELD_DEF,
ENGINE_FIELD_DEF,
PARTIALPOSITION_NAME_FIELD_DEF,
RULE_FIELD_DEF,
COMMAND_FIELD_DEF,
ANALYSIS_FILE_DEF,
)
# start of attributes copied from pgn_read.core.constants at version 1.3.1
# pgn specification values
TAG_EVENT = "Event"
TAG_SITE = "Site"
TAG_DATE = "Date"
TAG_ROUND = "Round"
TAG_WHITE = "White"
TAG_BLACK = "Black"
TAG_RESULT = "Result"
TAG_FEN = "FEN"
SEVEN_TAG_ROSTER = {
TAG_EVENT: "?",
TAG_SITE: "?",
TAG_DATE: "????.??.??",
TAG_ROUND: "?",
TAG_WHITE: "?",
TAG_BLACK: "?",
TAG_RESULT: "*",
}
SEVEN_TAG_ROSTER_DISPLAY_ORDER = (
TAG_SITE,
TAG_ROUND,
TAG_EVENT,
TAG_DATE,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
SEVEN_TAG_ROSTER_EXPORT_ORDER = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
# Allow for decorators to do special cases for Date and Round sorting
SPECIAL_TAG_DATE = ("?", "0")
SPECIAL_TAG_ROUND = {"?": 1, "-": 2}
NORMAL_TAG_ROUND = 3
SEVEN_TAG_ROSTER_ARCHIVE_SORT1 = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
)
SEVEN_TAG_ROSTER_ARCHIVE_SORT2 = (
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
WHITE_WIN = "1-0"
BLACK_WIN = "0-1"
DRAW = "1/2-1/2"
UNKNOWN_RESULT = "*"
RESULT_SET = {WHITE_WIN, BLACK_WIN, DRAW, UNKNOWN_RESULT}
# Repertoire Tags (non-standard)
TAG_OPENING = "Opening"
REPERTOIRE_TAG_ORDER = (TAG_OPENING, TAG_RESULT)
REPERTOIRE_GAME_TAGS = {
TAG_OPENING: "?",
TAG_RESULT: UNKNOWN_RESULT,
}
PGN_PAWN = ""
PGN_KING = "K"
PGN_QUEEN = "Q"
PGN_ROOK = "R"
PGN_BISHOP = "B"
PGN_KNIGHT = "N"
PGN_FROM_SQUARE_DISAMBIGUATION = frozenset((PGN_QUEEN, PGN_BISHOP, PGN_KNIGHT))
# Refugees from old PGN regular expression pattern matching strings.
O_O_O = "O-O-O"
O_O = "O-O"
PLAIN_MOVE = ""
CAPTURE_MOVE = "x"
LINEFEED = "\n"
CARRIAGE_RETURN = "\r"
NEWLINE = "".join((LINEFEED, CARRIAGE_RETURN))
SPACE = " "
HORIZONTAL_TAB = "\t"
FORMFEED = "\f"
VERTICAL_TAB = "\v"
# PGN regular expression pattern matching strings
# Building blocks
ANYTHING_ELSE = "."
WHITESPACE = "\s+"
FULLSTOP = "."
PERIOD = "\\" + FULLSTOP
INTEGER = "[1-9][0-9]*"
TERMINATION = "|".join((WHITE_WIN, BLACK_WIN, DRAW, "\\" + UNKNOWN_RESULT))
START_TAG = "["
END_TAG = "]"
SYMBOL = "([A-Za-z0-9][A-Za-z0-9_+#=:-]*)"
STRING = r'"((?:[^\\"]|\\.)*)"'
TAG_PAIR = "".join(
(
"(\\",
START_TAG,
")\s*",
SYMBOL,
"\s*",
STRING,
"\s*",
"(\\",
END_TAG,
")",
)
)
START_COMMENT = "{"
END_COMMENT = "}"
COMMENT = "".join(
("\\", START_COMMENT, "[^", END_COMMENT, "]*\\", END_COMMENT)
)
LEFT_ANGLE_BRACE = "<"
RIGHT_ANGLE_BRACE = ">"
RESERVED = "".join(
(LEFT_ANGLE_BRACE, "[^", RIGHT_ANGLE_BRACE, "]*", RIGHT_ANGLE_BRACE)
)
COMMENT_TO_EOL = ";(?:[^\n]*)\n"
PERCENT = "%"
ESCAPE_LINE = PERCENT.join(("(?:\A|(?<=\n))", "(?:[^\n]*)\n"))
NAG = "\$[0-9]+(?!/|-)"
START_RAV = "("
END_RAV = ")"
# KQRBN are replaced by PGN_KING, ..., constants; not WKING, ..., constants.
FNR = "a-h"
RNR = "1-8"
PAWN_PROMOTE = "".join(
(
"(?:([" + FNR + "])(x))?([" + FNR + "][18])(=[",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])",
)
)
PAWN_CAPTURE = "([" + FNR + "])(x)([" + FNR + "][2-7])"
PIECE_CAPTURE = "".join(
(
"(?:(",
PGN_KING,
")|(?:([",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([" + FNR + "]?[" + RNR + "]?)))",
"(x)([" + FNR + "][" + RNR + "])",
)
)
PIECE_CHOICE_MOVE = "".join(
(
"([",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([",
FNR + RNR + "])([" + FNR + "][" + RNR + "])",
)
)
PIECE_MOVE = "".join(
(
"([",
PGN_KING,
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([" + FNR + "][" + RNR + "])",
)
)
PAWN_MOVE = "([" + FNR + "][" + RNR + "])"
CASTLES = "(O-O(?:-O)?)"
CHECK = "([+#]?)"
ANNOTATION = "([!?][!?]?)?"
# Regular expression to detect full games in import format; export format is a
# subset of import format. The text stored on database is captured.
IMPORT_FORMAT = "".join(
(
"(?:",
TAG_PAIR,
")",
"|",
"(?:",
"(?:",
"(?:",
PAWN_PROMOTE,
")",
"|",
"(?:",
PAWN_CAPTURE,
")",
"|",
"(?:",
PIECE_CAPTURE,
")",
"|",
"(?:",
PIECE_CHOICE_MOVE,
")",
"|",
"(?:",
PIECE_MOVE,
")",
"|",
"(?:",
PAWN_MOVE,
")",
"|",
"(?:",
CASTLES,
")",
")",
"(?:",
CHECK,
")",
"(?:",
ANNOTATION,
")",
")",
"|",
"(",
COMMENT,
")",
"|",
"(",
NAG,
")",
"|",
"(",
COMMENT_TO_EOL,
")",
"|",
"(",
TERMINATION,
")",
"|",
INTEGER,
"|",
PERIOD,
"|",
WHITESPACE,
"|",
"(\\",
START_RAV,
")",
"|",
"(\\",
END_RAV,
")",
"|",
RESERVED,
"|",
ESCAPE_LINE,
"|",
"(",
ANYTHING_ELSE,
")",
)
)
# Regular expressions to disambiguate moves: move text like 'Bc4d5' is the only
# kind which could need to be interpreted as one move rather than two.
DISAMBIGUATE_FORMAT = "".join(
(
"[" + PGN_BISHOP + PGN_KNIGHT + PGN_QUEEN + "]",
"[" + FNR + "][" + RNR + "]",
"[" + FNR + "][" + RNR + "]",
)
)
UNAMBIGUOUS_FORMAT = ".*"
# Regular expression to detect possible beginning of move in an error sequence,
# "Bxa" for example while typing "Bxa6".
# No constants for partial castling moves.
POSSIBLE_MOVE = "".join(
(
"[O",
PGN_KING,
PGN_BISHOP,
PGN_KNIGHT,
PGN_ROOK,
PGN_QUEEN,
FNR,
"][-O",
FNR,
RNR,
"+#?!=]* *",
)
)
#
# Group offsets for IMPORT_FORMAT matches
#
IFG_START_TAG = 1
IFG_TAG_SYMBOL = 2
# IFG_TAG_STRING = 3
IFG_TAG_STRING_VALUE = 3
# IFG_TAG_END = 4
IFG_PAWN_PROMOTE_FROM_FILE = 5
IFG_PAWN_TAKES_PROMOTE = 6
IFG_PAWN_PROMOTE_SQUARE = 7
IFG_PAWN_PROMOTE_PIECE = 8
IFG_PAWN_CAPTURE_FROM_FILE = 9
IFG_PAWN_TAKES = 10
IFG_PAWN_CAPTURE_SQUARE = 11
IFG_KING_CAPTURE = 12
IFG_PIECE_CAPTURE = 13
IFG_PIECE_CAPTURE_FROM = 14
IFG_PIECE_TAKES = 15
IFG_PIECE_CAPTURE_SQUARE = 16
IFG_PIECE_CHOICE = 17
IFG_PIECE_CHOICE_FILE_OR_RANK = 18
IFG_PIECE_CHOICE_SQUARE = 19
IFG_PIECE_MOVE = 20
IFG_PIECE_SQUARE = 21
IFG_PAWN_SQUARE = 22
IFG_CASTLES = 23
IFG_CHECK = 24
IFG_ANNOTATION = 25
IFG_COMMENT = 26
IFG_NAG = 27
IFG_COMMENT_TO_EOL = 28
IFG_TERMINATION = 29
IFG_START_RAV = 30
IFG_END_RAV = 31
IFG_ANYTHING_ELSE = 32
#
# Parser states
#
PGN_SEARCHING = 0
PGN_SEARCHING_AFTER_ERROR_IN_RAV = 1
PGN_SEARCHING_AFTER_ERROR_IN_GAME = 2
PGN_COLLECTING_TAG_PAIRS = 3
PGN_COLLECTING_MOVETEXT = 4
PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING = 5
PGN_DISAMBIGUATE_MOVE = 6
#
# numeric annotation glyphs (just validation for now)
#
NAG_TRANSLATION = {"$" + str(o): None for o in range(1, 499)}
#
# Square constants and flags
#
BOARDSIDE = 8
BOARDSQUARES = BOARDSIDE * BOARDSIDE
SQUARE_BITS = [1 << i for i in range(BOARDSQUARES)]
ALL_SQUARES = sum(SQUARE_BITS)
EN_PASSANT_TO_SQUARES = sum([SQUARE_BITS[s] for s in range(24, 40)])
EN_PASSANT_FROM_SQUARES = sum([SQUARE_BITS[s] for s in range(8, 16)]) | sum(
[SQUARE_BITS[s] for s in range(48, 56)]
)
# Pieces
# Encoding positions is more efficient (key length) if pawns are encoded with
# a value less than 4 with either the white or the black pawn encoded as 0 and
# the squares that cannot host a pawn include 0..3 as their encodings (bytes
# \x01..\x03 which arises naturally as the second byte of the 2-byte encodings
# ), typically the squares b1 c1 and d1. The other two values less than 4 are
# best used for the kings which are always present. Absence of a piece is best
# encoded with the highest value, which will be 12 if using lists, wherever
# possible, rather than dictionaries for mappings.
NOPIECE = ""
WKING = "K"
WQUEEN = "Q"
WROOK = "R"
WBISHOP = "B"
WKNIGHT = "N"
WPAWN = "P"
BKING = "k"
BQUEEN = "q"
BROOK = "r"
BBISHOP = "b"
BKNIGHT = "n"
BPAWN = "p"
PIECES = frozenset(
(
WKING,
WQUEEN,
WROOK,
WBISHOP,
WKNIGHT,
WPAWN,
BKING,
BQUEEN,
BROOK,
BBISHOP,
BKNIGHT,
BPAWN,
)
)
# Define white and black pieces and map to values used in database records
WPIECES = frozenset((WKING, WQUEEN, WROOK, WBISHOP, WKNIGHT, WPAWN))
BPIECES = frozenset((BKING, BQUEEN, BROOK, BBISHOP, BKNIGHT, BPAWN))
# The default initial board, internal representation.
INITIAL_BOARD = (
BROOK,
BKNIGHT,
BBISHOP,
BQUEEN,
BKING,
BBISHOP,
BKNIGHT,
BROOK,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WROOK,
WKNIGHT,
WBISHOP,
WQUEEN,
WKING,
WBISHOP,
WKNIGHT,
WROOK,
)
INITIAL_OCCUPIED_SQUARES = (
frozenset(
(48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)
),
frozenset((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)),
)
INITIAL_BOARD_BITMAP = sum(
[sum([SQUARE_BITS[o] for o in s]) for s in INITIAL_OCCUPIED_SQUARES]
)
INITIAL_PIECE_LOCATIONS = {
k: v
for k, v in (
(WKING, (60,)),
(WQUEEN, (59,)),
(WROOK, (56, 63)),
(WBISHOP, (58, 61)),
(WKNIGHT, (57, 62)),
(WPAWN, (48, 49, 50, 51, 52, 53, 54, 55)),
(BKING, (4,)),
(BQUEEN, (3,)),
(BROOK, (0, 7)),
(BBISHOP, (2, 5)),
(BKNIGHT, (1, 6)),
(BPAWN, (8, 9, 10, 11, 12, 13, 14, 15)),
)
}
# White and black side
WHITE_SIDE = 0
BLACK_SIDE = 1
OTHER_SIDE = BLACK_SIDE, WHITE_SIDE
SIDE_KING = WKING, BKING
# Map PGN piece file and rank names to internal representation
MAPPIECE = (
{
PGN_PAWN: WPAWN,
PGN_KING: WKING,
PGN_QUEEN: WQUEEN,
PGN_ROOK: WROOK,
PGN_BISHOP: WBISHOP,
PGN_KNIGHT: WKNIGHT,
},
{
PGN_PAWN: BPAWN,
PGN_KING: BKING,
PGN_QUEEN: BQUEEN,
PGN_ROOK: BROOK,
PGN_BISHOP: BBISHOP,
PGN_KNIGHT: BKNIGHT,
},
) # not sure if this should be set or tuple or dict
MAPFILE = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7}
MAPRANK = {
"8": 0,
"7": 8,
"6": 16,
"5": 24,
"4": 32,
"3": 40,
"2": 48,
"1": 56,
}
MAPROW = {"8": 0, "7": 1, "6": 2, "5": 3, "4": 4, "3": 5, "2": 6, "1": 7}
# {'a8':0, 'b8':1, ..., 'g1':62, 'h1':63}, the order squares are listed in
# Forsyth-Edwards Notation and the square numbers used internally.
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER = {
"".join((f, r)): fn + rn
for f, fn in MAPFILE.items()
for r, rn in MAPRANK.items()
}
# FEN constants
FEN_WHITE = "w"
FEN_BLACK = "b"
FEN_FIELD_DELIM = " "
FEN_RANK_DELIM = "/"
FEN_NULL = "-"
FEN_INITIAL_HALFMOVE_COUNT = 0
FEN_INITIAL_FULLMOVE_NUMBER = 1
FEN_INITIAL_CASTLING = WKING + WQUEEN + BKING + BQUEEN
FEN_STARTPOSITION = FEN_FIELD_DELIM.join(
(
FEN_RANK_DELIM.join(
(
"".join(
(
BROOK,
BKNIGHT,
BBISHOP,
BQUEEN,
BKING,
BBISHOP,
BKNIGHT,
BROOK,
)
),
"".join(
(BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN)
),
str(len(MAPFILE)),
str(len(MAPFILE)),
str(len(MAPFILE)),
str(len(MAPFILE)),
"".join(
(WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN)
),
"".join(
(
WROOK,
WKNIGHT,
WBISHOP,
WQUEEN,
WKING,
WBISHOP,
WKNIGHT,
WROOK,
)
),
)
),
FEN_WHITE,
FEN_INITIAL_CASTLING,
FEN_NULL,
str(FEN_INITIAL_HALFMOVE_COUNT),
str(FEN_INITIAL_FULLMOVE_NUMBER),
)
)
FEN_FIELD_COUNT = 6
FEN_SIDES = {FEN_WHITE: WHITE_SIDE, FEN_BLACK: BLACK_SIDE}
FEN_TOMOVE = FEN_WHITE, FEN_BLACK
# Map FEN square names to board square numbers for en passant move and capture
FEN_WHITE_MOVE_TO_EN_PASSANT = {
"a6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a6"],
"b6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b6"],
"c6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c6"],
"d6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d6"],
"e6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e6"],
"f6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f6"],
"g6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g6"],
"h6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h6"],
}
FEN_BLACK_MOVE_TO_EN_PASSANT = {
"a3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a3"],
"b3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b3"],
"c3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c3"],
"d3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d3"],
"e3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e3"],
"f3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f3"],
"g3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g3"],
"h3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h3"],
}
FEN_WHITE_CAPTURE_EN_PASSANT = {
"a6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a5"],
"b6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b5"],
"c6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c5"],
"d6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d5"],
"e6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e5"],
"f6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f5"],
"g6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g5"],
"h6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h5"],
}
FEN_BLACK_CAPTURE_EN_PASSANT = {
"a3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a4"],
"b3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b4"],
"c3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c4"],
"d3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d4"],
"e3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e4"],
"f3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f4"],
"g3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g4"],
"h3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h4"],
}
FEN_EN_PASSANT_TARGET_RANK = {"5": "6", "4": "3"}
# Specification of conditions to be met to permit castling and changes to make
# to board to display move in internal representation.
# The square to which the king moves is not included in the set of squares
# that must not be under attack because this condition is checked for all moves
# after being played provisionally on the board. The special additional thing
# about castling is that the king cannot move out of or through check; for all
# types of move the king must not be under attack after playing the move. But
# as currently implemented there is no harm except waste in including the test.
CASTLING_W = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e1"]
CASTLING_WK = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h1"]
CASTLING_WQ = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a1"]
CASTLING_B = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e8"]
CASTLING_BK = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h8"]
CASTLING_BQ = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a8"]
CASTLING_AVAILABITY_SQUARES = (
SQUARE_BITS[CASTLING_WQ]
| SQUARE_BITS[CASTLING_W]
| SQUARE_BITS[CASTLING_WK]
| SQUARE_BITS[CASTLING_BQ]
| SQUARE_BITS[CASTLING_B]
| SQUARE_BITS[CASTLING_BK]
)
CASTLING_SQUARES = {
WKING: (
CASTLING_W,
CASTLING_WK,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f1"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g1"],
),
(),
WROOK,
WKING,
),
WQUEEN: (
CASTLING_W,
CASTLING_WQ,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d1"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c1"],
),
(MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b1"],),
WROOK,
WKING,
),
BKING: (
CASTLING_B,
CASTLING_BK,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f8"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g8"],
),
(),
BROOK,
BKING,
),
BQUEEN: (
CASTLING_B,
CASTLING_BQ,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d8"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c8"],
),
(MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b8"],),
BROOK,
BKING,
),
}
# FEN validation
FEN_CASTLING_OPTION_REPEAT_MAX = 1
FEN_PIECE_COUNT_PER_SIDE_MAX = 16
FEN_KING_COUNT = 1
FEN_PAWN_COUNT_MAX = 8
FEN_QUEEN_COUNT_INITIAL = 1
FEN_ROOK_COUNT_INITIAL = 2
FEN_BISHOP_COUNT_INITIAL = 2
FEN_KNIGHT_COUNT_INITIAL = 2
FEN_MAXIMUM_PIECES_GIVING_CHECK = 2
# variation markers and non-move placeholders
NON_MOVE = None
MOVE_ERROR = False
MOVE_AFTER_ERROR = 0
MOVE_TEXT = True
# Maximum line length in PGN file for movetext excluding EOL ('\n')
# Some PGN Tags are allowed to exceed this
# The rule may not be enforcable for comments, especially any re-exported,
# without disturbing any formatting attempts with EOL and spaces.
PGN_MAX_LINE_LEN = 79
# Piece moves and line definitions
_RANKS = [
sum([SQUARE_BITS[s + r * BOARDSIDE] for s in range(BOARDSIDE)])
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_FILES = [
sum([SQUARE_BITS[s * BOARDSIDE + f] for s in range(BOARDSIDE)])
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_TOPLEFT_TO_BOTTOMRIGHT = [
sum(
[
SQUARE_BITS[
((f + c) % BOARDSIDE) + ((r + c) % BOARDSIDE) * BOARDSIDE
]
for c in range(BOARDSIDE)
if (
f + c < BOARDSIDE
and r + c < BOARDSIDE
or f + c >= BOARDSIDE
and r + c >= BOARDSIDE
)
]
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_BOTTOMLEFT_TO_TOPRIGHT = [
sum(
[
SQUARE_BITS[
((f - c) % BOARDSIDE) + ((r + c) % BOARDSIDE) * BOARDSIDE
]
for c in range(BOARDSIDE)
if f >= c and r + c < BOARDSIDE or c > f and r + c >= BOARDSIDE
]
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
RANKS = [_RANKS[r * BOARDSIDE] for r in range(BOARDSIDE)]
FILES = _FILES[:BOARDSIDE]
ROOK_MOVES = [(_RANKS[k] | _FILES[k]) - s for k, s in enumerate(SQUARE_BITS)]
BISHOP_MOVES = [
(_TOPLEFT_TO_BOTTOMRIGHT[k] | _BOTTOMLEFT_TO_TOPRIGHT[k]) - s
for k, s in enumerate(SQUARE_BITS)
]
QUEEN_MOVES = [(BISHOP_MOVES[s] | ROOK_MOVES[s]) for s in range(BOARDSQUARES)]
KNIGHT_MOVES = [
(
(
sum(
_FILES[kf + r * BOARDSIDE]
for kf in range(f - 2, f + 3)
if kf >= 0 and kf < BOARDSIDE
)
& sum(
_RANKS[f + kr * 8]
for kr in range(r - 2, r + 3)
if kr >= 0 and kr < BOARDSIDE
)
)
& ~(
_RANKS[f + r * BOARDSIDE]
| _FILES[f + r * BOARDSIDE]
| _TOPLEFT_TO_BOTTOMRIGHT[f + r * BOARDSIDE]
| _BOTTOMLEFT_TO_TOPRIGHT[f + r * BOARDSIDE]
)
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
KING_MOVES = [
(
QUEEN_MOVES[f + r * BOARDSIDE]
& (
sum(
_FILES[kf + r * BOARDSIDE]
for kf in range(f - 1, f + 2)
if kf >= 0 and kf < BOARDSIDE
)
& sum(
_RANKS[f + kr * 8]
for kr in range(r - 1, r + 2)
if kr >= 0 and kr < BOARDSIDE
)
)
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
WHITE_PAWN_MOVES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSQUARES - BOARDSIDE * 2:
WHITE_PAWN_MOVES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE])
else:
WHITE_PAWN_MOVES_TO_SQUARE.append(0)
for s in range(BOARDSQUARES - BOARDSIDE * 4, BOARDSQUARES - BOARDSIDE * 3):
WHITE_PAWN_MOVES_TO_SQUARE[s] |= SQUARE_BITS[s + BOARDSIDE * 2]
BLACK_PAWN_MOVES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSIDE * 2:
BLACK_PAWN_MOVES_TO_SQUARE.append(0)
else:
BLACK_PAWN_MOVES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE])
for s in range(BOARDSIDE * 3, BOARDSIDE * 4):
BLACK_PAWN_MOVES_TO_SQUARE[s] |= SQUARE_BITS[s - BOARDSIDE * 2]
# 'b1' for black, and 'b8' for white, are allowed as pawn move specifications
# to disambiguate queen moves like 'Qd1f1'.
# PAWN_MOVE_DESITINATION filters them out.
PAWN_MOVE_DESITINATION = [0, 0]
for s in range(BOARDSQUARES):
if s < BOARDSIDE:
pass
elif s < BOARDSIDE * 2:
PAWN_MOVE_DESITINATION[0] |= SQUARE_BITS[s]
elif s < BOARDSQUARES - BOARDSIDE * 2:
PAWN_MOVE_DESITINATION[0] |= SQUARE_BITS[s]
PAWN_MOVE_DESITINATION[1] |= SQUARE_BITS[s]
elif s < BOARDSQUARES - BOARDSIDE:
PAWN_MOVE_DESITINATION[1] |= SQUARE_BITS[s]
WHITE_PAWN_CAPTURES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s > BOARDSQUARES - BOARDSIDE * 2 - 1:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(0)
elif s % BOARDSIDE == 0:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE + 1])
elif s % BOARDSIDE == BOARDSIDE - 1:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE - 1])
else:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(
SQUARE_BITS[s + BOARDSIDE - 1] | SQUARE_BITS[s + BOARDSIDE + 1]
)
BLACK_PAWN_CAPTURES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSIDE * 2:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(0)
elif s % BOARDSIDE == 0:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE + 1])
elif s % BOARDSIDE == BOARDSIDE - 1:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE - 1])
else:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(
SQUARE_BITS[s - BOARDSIDE - 1] | SQUARE_BITS[s - BOARDSIDE + 1]
)
GAPS = []
for f in range(BOARDSQUARES):
GAPS.append(list())
for t in range(BOARDSQUARES):
aligned = (
(_RANKS[f] & _RANKS[t])
| (_FILES[f] & _FILES[t])
| (_TOPLEFT_TO_BOTTOMRIGHT[f] & _TOPLEFT_TO_BOTTOMRIGHT[t])
| (_BOTTOMLEFT_TO_TOPRIGHT[f] & _BOTTOMLEFT_TO_TOPRIGHT[t])
)
if not aligned:
if SQUARE_BITS[t] & KNIGHT_MOVES[f]:
GAPS[f].append(0)
else:
GAPS[f].append(ALL_SQUARES)
else:
gap = (
aligned
& sum(SQUARE_BITS[min(f, t) : max(f, t) + 1])
& ~(SQUARE_BITS[f] | SQUARE_BITS[t])
)
if gap:
GAPS[f].append(gap)
elif f == t:
GAPS[f].append(ALL_SQUARES)
else:
GAPS[f].append(0)
del _TOPLEFT_TO_BOTTOMRIGHT
del _BOTTOMLEFT_TO_TOPRIGHT
del _FILES
del _RANKS
del f, t, gap, aligned
PIECE_CAPTURE_MAP = {
k: v
for k, v in (
(WKING, KING_MOVES),
(WQUEEN, QUEEN_MOVES),
(WROOK, ROOK_MOVES),
(WBISHOP, BISHOP_MOVES),
(WKNIGHT, KNIGHT_MOVES),
(WPAWN, WHITE_PAWN_CAPTURES_TO_SQUARE),
(BKING, KING_MOVES),
(BQUEEN, QUEEN_MOVES),
(BROOK, ROOK_MOVES),
(BBISHOP, BISHOP_MOVES),
(BKNIGHT, KNIGHT_MOVES),
(BPAWN, BLACK_PAWN_CAPTURES_TO_SQUARE),
)
}
PIECE_MOVE_MAP = {
k: v
for k, v in (
(WKING, KING_MOVES),
(WQUEEN, QUEEN_MOVES),
(WROOK, ROOK_MOVES),
(WBISHOP, BISHOP_MOVES),
(WKNIGHT, KNIGHT_MOVES),
(WPAWN, WHITE_PAWN_MOVES_TO_SQUARE),
(BKING, KING_MOVES),
(BQUEEN, QUEEN_MOVES),
(BROOK, ROOK_MOVES),
(BBISHOP, BISHOP_MOVES),
(BKNIGHT, KNIGHT_MOVES),
(BPAWN, BLACK_PAWN_MOVES_TO_SQUARE),
)
}
# Lookup tables for string representation of square and move numbers.
MAP_FEN_ORDER_TO_PGN_SQUARE_NAME = [
t[-1]
for t in sorted(
(v, k) for k, v in MAP_PGN_SQUARE_NAME_TO_FEN_ORDER.items()
)
]
MOVE_NUMBER_KEYS = tuple(
["0"] + [str(len(hex(i)) - 2) + hex(i)[2:] for i in range(1, 256)]
)
# Error markers for PGN display.
ERROR_START_COMMENT = START_COMMENT + "Error: "
ESCAPE_END_COMMENT = "::" + START_COMMENT + START_COMMENT + "::"
# end of attributes copied from pgn_read.core.constants
# Defined in chesstab.core.chessrecord.
PLAYER_NAME_TAGS = frozenset((TAG_WHITE, TAG_BLACK))
# Imported from chesstab.core.querystatement.
re_normalize_player_name = re.compile("([^,\.\s]+)(?:[,\.\s]*)")
# The two chessql.core.constants attributes needed.
ANY_WHITE_PIECE_NAME = r"A"
ANY_BLACK_PIECE_NAME = r"a"
MAP_PGN_PIECE_TO_CQL_COMPOSITE_PIECE = {
WKING: ANY_WHITE_PIECE_NAME,
WQUEEN: ANY_WHITE_PIECE_NAME,
WROOK: ANY_WHITE_PIECE_NAME,
WBISHOP: ANY_WHITE_PIECE_NAME,
WKNIGHT: ANY_WHITE_PIECE_NAME,
WPAWN: ANY_WHITE_PIECE_NAME,
BKING: ANY_BLACK_PIECE_NAME,
BQUEEN: ANY_BLACK_PIECE_NAME,
BROOK: ANY_BLACK_PIECE_NAME,
BBISHOP: ANY_BLACK_PIECE_NAME,
BKNIGHT: ANY_BLACK_PIECE_NAME,
BPAWN: ANY_BLACK_PIECE_NAME,
}
re_tokens = re.compile(IMPORT_FORMAT)
# Avoid re.fullmatch() method while compatibility with Python 3.3 is important.
re_disambiguate_error = re.compile(DISAMBIGUATE_FORMAT.join(("^", "$")))
re_disambiguate_non_move = re.compile(UNAMBIGUOUS_FORMAT.join(("^", "$")))
re_possible_move = re.compile(POSSIBLE_MOVE.join(("(^", "$)")))
# for runtime "from <db|dpt>results import ChessDatabase" and similar
_ChessDB = "ChessDatabase"
_FullPositionDS = "FullPositionDS"
_AnalysisDS = "AnalysisDS"
class PGN131Error(Exception):
pass
class PGN131(object):
def __init__(self):
super().__init__()
# data generated from PGN text for game while checking moves are legal
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
# data generated from PGN text for game after checking moves are legal
self.collected_game = None
self.board_bitmap = None
self.occupied_squares = []
self.board = []
self.piece_locations = {}
self.fullmove_number = None
self.halfmove_count = None
self.en_passant = None
self.castling = None
self.active_side = None
# ravstack keeps track of the position at start of game or variation
# and the position after application of a valid move. Thus the value
# in ravstack[-1] is (None, <position start>) at start of game or line
# and (<position start>, <position after move>) after application of a
# valid move from gametokens.
self.ravstack = []
# data used while parsing PGN text to split into tag and move tokens
self._initial_fen = None
self._state = None
self._move_error_state = None
self._rewind_state = None
self._despatch_table = [
self._searching,
self._searching_after_error_in_rav,
self._searching_after_error_in_game,
self._collecting_tag_pairs,
self._collecting_movetext,
self._collecting_non_whitespace_while_searching,
self._disambiguate_move,
]
@staticmethod
def _read_pgn(string, length):
pgntext = string.read(length)
while len(pgntext):
yield pgntext
pgntext = string.read(length)
yield pgntext
def read_games(self, source, size=10000000, housekeepinghook=lambda: None):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
for pgntext in self._read_pgn(source, size):
if len(self.error_tokens):
self._state = self._rewind_state
pgntext = "".join(self.error_tokens) + pgntext
self.error_tokens.clear()
for t in re_tokens.finditer(pgntext):
self._despatch_table[self._state](t)
if t.group(IFG_TERMINATION):
yield t
housekeepinghook()
def read_pgn_tokens(
self, source, size=10000000, housekeepinghook=lambda: None
):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
for pgntext in self._read_pgn(source, size):
if len(self.error_tokens):
self._state = self._rewind_state
pgntext = "".join(self.error_tokens) + pgntext
self.error_tokens.clear()
for t in re_tokens.finditer(pgntext):
self._despatch_table[self._state](t)
yield t.group(IFG_TERMINATION)
def get_games(self, source):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
for t in re_tokens.finditer(source):
self._despatch_table[self._state](t)
if t.group(IFG_TERMINATION):
yield t
def get_first_pgn_token(self, source):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
try:
t = next(re_tokens.finditer(source))
self._despatch_table[self._state](t)
return False if t.group(IFG_TERMINATION) else True
except StopIteration:
return
def read_first_game(
self, source, size=10000000, housekeepinghook=lambda: None
):
return next(
self.read_games(
source, size=size, housekeepinghook=housekeepinghook
)
)
def get_first_game(self, source):
return next(self.get_games(source))
def is_movetext_valid(self):
return not self.collected_game[3]
def is_pgn_valid(self):
return self.is_movetext_valid() and self.is_tag_roster_valid()
def is_tag_roster_valid(self):
tags_in_order = self.collected_game[0]
tags = self.collected_game[1]
if len(tags) != len(tags_in_order):
# Tag must appear no more than once
return False
for v in tags.values():
if len(v) == 0:
# Tag value must not be null
return False
for t in SEVEN_TAG_ROSTER:
if t not in tags:
# A mandatory tag is missing
return False
return True
def set_position_fen(self, fen=None):
# fen is standard start position by default
if fen is None:
self.board_bitmap = INITIAL_BOARD_BITMAP
self.board = list(INITIAL_BOARD)
self.occupied_squares[:] = [
set(s) for s in INITIAL_OCCUPIED_SQUARES
]
self.piece_locations = {
k: set(v) for k, v in INITIAL_PIECE_LOCATIONS.items()
}
self.ravstack[:] = [
(
None,
(
INITIAL_BOARD,
WHITE_SIDE,
FEN_INITIAL_CASTLING,
FEN_NULL,
FEN_INITIAL_HALFMOVE_COUNT,
FEN_INITIAL_FULLMOVE_NUMBER,
),
)
]
self.active_side = WHITE_SIDE
self.castling = FEN_INITIAL_CASTLING
self.en_passant = FEN_NULL
self.halfmove_count = FEN_INITIAL_HALFMOVE_COUNT
self.fullmove_number = FEN_INITIAL_FULLMOVE_NUMBER
self._initial_fen = True
return
# fen specifies an arbitrary position.
# fen has six space delimited fields.
fs = fen.split(FEN_FIELD_DELIM)
if len(fs) != FEN_FIELD_COUNT:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
(
piece_placement,
active_side,
castling,
en_passant,
halfmove_count,
fullmove_number,
) = fs
del fs
# fen side to move field.
if active_side not in FEN_SIDES:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen castling field.
if castling != FEN_NULL:
for c in FEN_INITIAL_CASTLING:
if castling.count(c) > FEN_CASTLING_OPTION_REPEAT_MAX:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
for c in castling:
if c not in FEN_INITIAL_CASTLING:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen square to which a pawn can move when capturing en passant.
if active_side == FEN_WHITE:
if en_passant not in FEN_WHITE_MOVE_TO_EN_PASSANT:
if en_passant != FEN_NULL:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
elif active_side == FEN_BLACK:
if en_passant not in FEN_BLACK_MOVE_TO_EN_PASSANT:
if en_passant != FEN_NULL:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Earlier 'fen side to move field' test makes this unreachable.
else:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen halfmove count since pawn move or capture.
if not halfmove_count.isdigit():
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen fullmove number.
if not fullmove_number.isdigit():
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen piece placement field has eight ranks delimited by '/'.
ranks = piece_placement.split(FEN_RANK_DELIM)
if len(ranks) != BOARDSIDE:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen piece placement field has pieces and empty squares only.
for r in ranks:
for c in r:
if c not in PIECES:
if not c.isdigit():
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Exactly 64 squares: equivalent to exactly 8 squares per rank.
for r in ranks:
if sum([1 if not s.isdigit() else int(s) for s in r]) != BOARDSIDE:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# No pawns on first or eighth ranks.
if (
ranks[0].count(WPAWN)
+ ranks[0].count(BPAWN)
+ ranks[BOARDSIDE - 1].count(WPAWN)
+ ranks[BOARDSIDE - 1].count(BPAWN)
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# No more than 16 pieces per side.
for s in WPIECES, BPIECES:
for p in s:
if (
sum([piece_placement.count(p) for p in s])
> FEN_PIECE_COUNT_PER_SIDE_MAX
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Exactly one king per side.
for p in WKING, BKING:
if piece_placement.count(p) != FEN_KING_COUNT:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# No more than eight pawns per side.
for p in WPAWN, BPAWN:
if piece_placement.count(p) > FEN_PAWN_COUNT_MAX:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Piece counts within initial position and pawn promotion bounds.
if (
piece_placement.count(WPAWN)
- FEN_PAWN_COUNT_MAX
+ max(piece_placement.count(WQUEEN) - FEN_QUEEN_COUNT_INITIAL, 0)
+ max(piece_placement.count(WROOK) - FEN_ROOK_COUNT_INITIAL, 0)
+ max(piece_placement.count(WBISHOP) - FEN_BISHOP_COUNT_INITIAL, 0)
+ max(piece_placement.count(WKNIGHT) - FEN_KNIGHT_COUNT_INITIAL, 0)
) > 0:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if (
piece_placement.count(BPAWN)
- FEN_PAWN_COUNT_MAX
+ max(piece_placement.count(BQUEEN) - FEN_QUEEN_COUNT_INITIAL, 0)
+ max(piece_placement.count(BROOK) - FEN_ROOK_COUNT_INITIAL, 0)
+ max(piece_placement.count(BBISHOP) - FEN_BISHOP_COUNT_INITIAL, 0)
+ max(piece_placement.count(BKNIGHT) - FEN_KNIGHT_COUNT_INITIAL, 0)
) > 0:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Position is legal apart from checks, actual and deduced, and deduced
# move that sets up en passant capture possibility.
board = []
for r in ranks:
for c in r:
if c in PIECES:
board.append(c)
else:
board.extend([NOPIECE] * int(c))
# Castling availability must fit the board position.
if board[CASTLING_W] != WKING:
if WKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if WQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_B] != BKING:
if BKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if BQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_WK] != WROOK:
if WKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_WQ] != WROOK:
if WQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_BK] != BROOK:
if BKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_BQ] != BROOK:
if BQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# the two squares behind the pawn that can be captured en passant
# must be empty. FEN quotes en passant capture square if latest move
# is a two square pawn move,there does not need to be a pawn able to
# make the capture. The side with the move must not be in check
# diagonally through the square containing a pawn that can be captured
# en passant, treating that square as empty.
if en_passant != FEN_NULL:
if en_passant in FEN_WHITE_MOVE_TO_EN_PASSANT:
s = FEN_WHITE_MOVE_TO_EN_PASSANT[en_passant]
if (
board[s] != NOPIECE
or board[s - 8] != NOPIECE
or board[s + 8] != BPAWN
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
elif en_passant in FEN_BLACK_MOVE_TO_EN_PASSANT:
s = FEN_BLACK_MOVE_TO_EN_PASSANT[en_passant]
if (
board[s] != NOPIECE
or board[s + 8] != NOPIECE
or board[s - 8] != WPAWN
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
else:
# Should not happen, caught earlier.
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# FEN is legal, except for restrictions on kings in check, so set
# instance attributes to fit description of position.
piece_locations = {k: set() for k in INITIAL_PIECE_LOCATIONS}
active_side_squares = set()
inactive_side_squares = set()
board_bitmap = []
if active_side == FEN_WHITE:
active_side_pieces = WPIECES
else:
active_side_pieces = BPIECES
for s, p in enumerate(board):
if p in PIECES:
piece_locations[p].add(s)
board_bitmap.append(SQUARE_BITS[s])
if p in active_side_pieces:
active_side_squares.add(s)
else:
inactive_side_squares.add(s)
for active_side_king_square in piece_locations[
SIDE_KING[FEN_SIDES[active_side]]
]:
pass # set active_side_king_square without pop() and add().
for inactive_side_king_square in piece_locations[
SIDE_KING[OTHER_SIDE[FEN_SIDES[active_side]]]
]:
pass # set active_side_king_square without pop() and add().
# Side without the move must not be in check.
# Cannot use is_active_king_attacked method because attributes are
# not set until the position is ok.
gap = GAPS[inactive_side_king_square]
board_bitmap = sum(board_bitmap)
for s in active_side_squares:
if (
not board_bitmap & gap[s]
and SQUARE_BITS[s]
& PIECE_CAPTURE_MAP[board[s]][inactive_side_king_square]
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Side with the move must not be in check from more than two squares.
# Cannot use count_attacks_on_square_by_side method because attributes
# are not set until the position is ok.
gap = GAPS[active_side_king_square]
if (
len(
[
s
for s in inactive_side_squares
if (
not board_bitmap & gap[s]
and SQUARE_BITS[s]
& PIECE_CAPTURE_MAP[board[s]][active_side_king_square]
)
]
)
> FEN_MAXIMUM_PIECES_GIVING_CHECK
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
self.board_bitmap = board_bitmap
self.board = board
if active_side == FEN_WHITE:
self.occupied_squares[:] = (
active_side_squares,
inactive_side_squares,
)
else:
self.occupied_squares[:] = (
inactive_side_squares,
active_side_squares,
)
self.piece_locations = piece_locations
self.ravstack[:] = [
(
None,
(
tuple(board),
FEN_SIDES[active_side],
castling,
en_passant,
int(halfmove_count),
int(fullmove_number),
),
)
]
self.active_side = FEN_SIDES[active_side]
self.castling = castling
self.en_passant = en_passant
self.halfmove_count = int(halfmove_count)
self.fullmove_number = int(fullmove_number)
self._initial_fen = fen
def _play_move(
self, pgn_piece, pgn_from, pgn_capture, pgn_tosquare, pgn_promote
):
tosquare = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER[pgn_tosquare]
piece = MAPPIECE[self.active_side][pgn_piece]
g = GAPS[tosquare]
b = self.board
bb = self.board_bitmap
if pgn_capture == CAPTURE_MOVE:
pts = PIECE_CAPTURE_MAP[piece][tosquare]
else:
pts = PIECE_MOVE_MAP[piece][tosquare]
from_squares = [
s
for s in self.piece_locations[piece]
if (SQUARE_BITS[s] & pts and not bb & g[s])
]
if len(from_squares) > 1:
if pgn_from:
fm = MAPFILE.get(pgn_from[0])
if fm is not None:
fm = FILES[fm]
from_squares = [
s for s in from_squares if SQUARE_BITS[s] & fm
]
if len(from_squares) > 1:
fm = MAPROW.get(pgn_from[-1])
if fm is not None:
fm = RANKS[fm]
from_squares = [
s for s in from_squares if SQUARE_BITS[s] & fm
]
if len(from_squares) > 1:
inactive_side_squares = self.occupied_squares[
OTHER_SIDE[self.active_side]
]
for active_side_king_square in self.piece_locations[
SIDE_KING[self.active_side]
]:
pass # set active_side_king_square without pop() and add().
gk = GAPS[active_side_king_square]
pinned_to_king = set()
for si in inactive_side_squares:
if (
PIECE_CAPTURE_MAP[b[si]][active_side_king_square]
& SQUARE_BITS[si]
):
for s in from_squares:
if gk[si] & SQUARE_BITS[s]:
if not (
(
bb ^ SQUARE_BITS[s]
| SQUARE_BITS[tosquare]
)
& gk[si]
):
if si != tosquare:
pinned_to_king.add(s)
from_squares = [
s for s in from_squares if s not in pinned_to_king
]
if pgn_capture == PLAIN_MOVE and b[tosquare] == piece:
# If moving piece is on tosquare and the next token is a square
# identity try tosquare as fromsquare and next token as tosquare
# for the piece move.
# Only applies to Q B N non-capture moves where the moving side
# has more than 2 of the moving piece so it is possible there
# are two pieces of the moving kind on the same rank and the
# same file at the same time which can reach the tosquare.
# Check that there are at least three pieces of one kind which
# can move to the same square and note the possibilities for
# evaluation in two subsequent states where the next tokens are
# readily available for comparison. The next two tokens must be
# '' and a square identity and the square identity must be one
# of the possibilities.
if b.count(piece) > 2:
if pgn_piece in PGN_FROM_SQUARE_DISAMBIGUATION:
self._state = PGN_DISAMBIGUATE_MOVE
self._rewind_state = self._state
return
self._illegal_play_move()
return
# After the disambiguation test, plain move to square containing piece
# which is moving, because queen moves like both rook and bishop.
if len(from_squares) != 1:
self._illegal_play_move()
return
piece_locations = self.piece_locations
fromsquare = from_squares.pop()
# pgn_from is null, a file name, a rank name, or a square name. If not
# null it must be part of, or equal, the square name of fromsquare.
if pgn_from is not None:
if pgn_from not in MAP_FEN_ORDER_TO_PGN_SQUARE_NAME[fromsquare]:
self._illegal_play_move()
return
if pgn_capture == CAPTURE_MOVE:
inactive_side_squares = self.occupied_squares[
OTHER_SIDE[self.active_side]
]
if tosquare not in inactive_side_squares:
if pgn_piece != PGN_PAWN:
self._illegal_play_move()
return
elif pgn_tosquare != self.en_passant:
self._illegal_play_move()
return
# Remove pawn captured en passant.
elif self.en_passant in FEN_WHITE_CAPTURE_EN_PASSANT:
eps = FEN_WHITE_CAPTURE_EN_PASSANT[self.en_passant]
b[eps] = NOPIECE
inactive_side_squares.remove(eps)
piece_locations[BPAWN].remove(eps)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[eps]
elif self.en_passant in FEN_BLACK_CAPTURE_EN_PASSANT:
eps = FEN_BLACK_CAPTURE_EN_PASSANT[self.en_passant]
b[eps] = NOPIECE
inactive_side_squares.remove(eps)
piece_locations[WPAWN].remove(eps)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[eps]
else:
self._illegal_play_move()
return
else:
inactive_side_squares.remove(tosquare)
piece_locations[b[tosquare]].remove(tosquare)
self.en_passant = FEN_NULL
self.halfmove_count = 0
elif SQUARE_BITS[tosquare] & bb:
self._illegal_play_move()
return
elif pgn_piece == PGN_PAWN:
# Moves like 'b1' for black, and 'b8' for white, are passed earlier
# to cope with disambiguating queen moves like 'Qd1f1'.
if not (
SQUARE_BITS[tosquare]
& PAWN_MOVE_DESITINATION[self.active_side]
):
if not pgn_promote:
self._illegal_play_move()
return
self.halfmove_count = 0
if (
SQUARE_BITS[fromsquare] & EN_PASSANT_FROM_SQUARES
and SQUARE_BITS[tosquare] & EN_PASSANT_TO_SQUARES
):
self.en_passant = (
pgn_tosquare[0]
+ FEN_EN_PASSANT_TARGET_RANK[pgn_tosquare[1]]
)
else:
self.en_passant = FEN_NULL
else:
self.en_passant = FEN_NULL
self.halfmove_count = self.halfmove_count + 1
active_side_squares = self.occupied_squares[self.active_side]
# Remove moving piece from current square.
b[fromsquare] = NOPIECE
active_side_squares.remove(fromsquare)
piece_locations[piece].remove(fromsquare)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[fromsquare]
# Put moving piece on new square.
b[tosquare] = piece
active_side_squares.add(tosquare)
piece_locations[piece].add(tosquare)
self.board_bitmap |= SQUARE_BITS[tosquare]
# Replace moving pawn on promotion and update inactive king square.
if pgn_promote:
piece_locations[b[tosquare]].remove(tosquare)
b[tosquare] = MAPPIECE[self.active_side][pgn_promote]
piece_locations[b[tosquare]].add(tosquare)
# Undo move if it leaves king in check.
if self.is_active_king_attacked():
self.reset_position(self.ravstack[-1][-1])
self._illegal_play_move()
return
# Castling availabity.
# tosquare tests deal with capture of rooks which have not moved.
# For real games the top condition is false for more than half the game
# and the next condition is usually false.
if self.castling != FEN_NULL:
if (
SQUARE_BITS[fromsquare] | SQUARE_BITS[tosquare]
) & CASTLING_AVAILABITY_SQUARES:
if fromsquare == CASTLING_W:
self.castling = self.castling.replace(WKING, NOPIECE)
self.castling = self.castling.replace(WQUEEN, NOPIECE)
elif fromsquare == CASTLING_WK:
self.castling = self.castling.replace(WKING, NOPIECE)
elif fromsquare == CASTLING_WQ:
self.castling = self.castling.replace(WQUEEN, NOPIECE)
elif fromsquare == CASTLING_B:
self.castling = self.castling.replace(BKING, NOPIECE)
self.castling = self.castling.replace(BQUEEN, NOPIECE)
elif fromsquare == CASTLING_BK:
self.castling = self.castling.replace(BKING, NOPIECE)
elif fromsquare == CASTLING_BQ:
self.castling = self.castling.replace(BQUEEN, NOPIECE)
elif tosquare == CASTLING_WK:
self.castling = self.castling.replace(WKING, NOPIECE)
elif tosquare == CASTLING_WQ:
self.castling = self.castling.replace(WQUEEN, NOPIECE)
elif tosquare == CASTLING_BK:
self.castling = self.castling.replace(BKING, NOPIECE)
elif tosquare == CASTLING_BQ:
self.castling = self.castling.replace(BQUEEN, NOPIECE)
if self.castling == NOPIECE:
self.castling = FEN_NULL
self.add_move_to_game()
def _play_castles(self, token):
# Verify castling availability and pick castling rules.
if token.startswith(O_O_O):
if self.active_side == WHITE_SIDE:
if WQUEEN not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[WQUEEN]
else:
if BQUEEN not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[BQUEEN]
elif token.startswith(O_O):
if self.active_side == WHITE_SIDE:
if WKING not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[WKING]
else:
if BKING not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[BKING]
else:
self._illegal_play_castles()
return
bb = self.board_bitmap
board = self.board
piece_locations = self.piece_locations
active_side_squares = self.occupied_squares[self.active_side]
active_side_king_locations = piece_locations[
SIDE_KING[self.active_side]
]
if self.active_side == WHITE_SIDE:
active_side_rook_locations = piece_locations[WROOK]
else:
active_side_rook_locations = piece_locations[BROOK]
for active_side_king_square in active_side_king_locations:
pass # set active_side_king_square without pop() and add().
# Confirm board position is consistent with castling availability.
if (
active_side_king_square != castling_squares[0]
or board[castling_squares[0]] != castling_squares[5]
or board[castling_squares[1]] != castling_squares[4]
):
self._illegal_play_castles()
return
# Squares between king and castling rook must be empty.
for squares in castling_squares[2:4]:
for s in squares:
if SQUARE_BITS[s] & bb:
self._illegal_play_castles()
return
# Castling king must not be in check.
if self.is_square_attacked_by_side(
castling_squares[0], OTHER_SIDE[self.active_side]
):
self._illegal_play_castles()
return
# Castling king's destination square, and the one between, must not be
# attacked by the other side.
for square in castling_squares[2]:
if self.is_square_attacked_by_side(
square, OTHER_SIDE[self.active_side]
):
self._illegal_play_castles()
return
king_square = castling_squares[0]
new_king_square = castling_squares[2][1]
rook_square = castling_squares[1]
new_rook_square = castling_squares[2][0]
# Put moving pieces on new squares.
board[new_king_square] = board[king_square]
board[new_rook_square] = board[rook_square]
active_side_squares.add(new_king_square)
active_side_king_locations.add(new_king_square)
active_side_squares.add(new_rook_square)
active_side_rook_locations.add(new_rook_square)
self.board_bitmap |= (
SQUARE_BITS[new_king_square] | SQUARE_BITS[new_rook_square]
)
# Remove moving pieces from current squares.
board[king_square] = NOPIECE
board[rook_square] = NOPIECE
active_side_squares.remove(king_square)
active_side_king_locations.remove(king_square)
active_side_squares.remove(rook_square)
active_side_rook_locations.remove(rook_square)
self.board_bitmap &= self.board_bitmap ^ (
SQUARE_BITS[king_square] | SQUARE_BITS[rook_square]
)
# Castling availabity.
if self.active_side == WHITE_SIDE:
self.castling = self.castling.replace(WKING, NOPIECE)
self.castling = self.castling.replace(WQUEEN, NOPIECE)
else:
self.castling = self.castling.replace(BKING, NOPIECE)
self.castling = self.castling.replace(BQUEEN, NOPIECE)
if self.castling == NOPIECE:
self.castling = FEN_NULL
# Cannot be en-passant
self.en_passant = FEN_NULL
self.halfmove_count = self.halfmove_count + 1
self.add_move_to_game()
def is_active_king_attacked(self):
b = self.board
bb = self.board_bitmap
# Only one element in this container.
for ks in self.piece_locations[SIDE_KING[self.active_side]]:
g = GAPS[ks]
for s in self.occupied_squares[OTHER_SIDE[self.active_side]]:
if (
not bb & g[s]
and SQUARE_BITS[s] & PIECE_CAPTURE_MAP[b[s]][ks]
):
return True
return False
def is_square_attacked_by_side(self, square, side):
g = GAPS[square]
b = self.board
bb = self.board_bitmap
for s in self.occupied_squares[side]:
if (
not bb & g[s]
and SQUARE_BITS[s] & PIECE_CAPTURE_MAP[b[s]][square]
):
return True
return False
def count_attacks_on_square_by_side(self, square, side):
g = GAPS[square]
b = self.board
bb = self.board_bitmap
return len(
[
s
for s in self.occupied_squares[side]
if (
not bb & g[s]
and SQUARE_BITS[s] & PIECE_CAPTURE_MAP[b[s]][square]
)
]
)
def add_move_to_game(self):
self.active_side = OTHER_SIDE[self.active_side]
if self.active_side == WHITE_SIDE:
self.fullmove_number += 1
self.ravstack[-1] = (
self.ravstack[-1][-1],
(
tuple(self.board),
self.active_side,
self.castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
),
)
def collect_token(self, match):
self.tokens.append(match)
def collect_game_tokens(self):
self.collected_game = (
self.tags_in_order,
{
m.group(IFG_TAG_SYMBOL): m.group(IFG_TAG_STRING_VALUE)
for m in self.tags_in_order
},
self.tokens,
self.error_tokens,
)
def _play_disambiguated_move(
self, pgn_piece, pgn_fromsquare, pgn_tosquare
):
fromsquare = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER[pgn_fromsquare]
tosquare = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER[pgn_tosquare]
piece = MAPPIECE[self.active_side][pgn_piece]
if fromsquare not in self.piece_locations[piece]:
self._illegal_play_disambiguated_move()
return
if not (
SQUARE_BITS[fromsquare] & PIECE_MOVE_MAP[piece][tosquare]
and not self.board_bitmap & GAPS[tosquare][fromsquare]
):
self._illegal_play_disambiguated_move()
return
if SQUARE_BITS[tosquare] & self.board_bitmap:
self._illegal_play_disambiguated_move()
return
else:
self.halfmove_count = self.halfmove_count + 1
b = self.board
piece_locations = self.piece_locations
active_side_squares = self.occupied_squares[self.active_side]
# Remove moving piece from current square.
b[fromsquare] = NOPIECE
active_side_squares.remove(fromsquare)
piece_locations[piece].remove(fromsquare)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[fromsquare]
# Put moving piece on new square.
b[tosquare] = piece
active_side_squares.add(tosquare)
piece_locations[piece].add(tosquare)
self.board_bitmap |= SQUARE_BITS[tosquare]
# Undo move if it leaves king in check.
if self.is_active_king_attacked():
self.reset_position(self.ravstack[-1][-1])
self._illegal_play_disambiguated_move()
return
# Castling availabity is not affected because rooks cannot be involved
# in moves which need disambiguation.
# Cannot be en-passant
self.en_passant = FEN_NULL
self.add_move_to_game()
# Maybe should not be a method now, but retain shape of pre-FEN class code
# for ease of comparison until sure everything works.
# Just say self._fen = ... where method is called.
def reset_position(self, position):
(
board,
self.active_side,
self.castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
) = position
self.board[:] = list(board)
occupied_squares = self.occupied_squares
for side in occupied_squares:
side.clear()
piece_locations = self.piece_locations
for piece in piece_locations.values():
piece.clear()
board_bitmap = 0
for square, piece in enumerate(board):
if piece in WPIECES:
occupied_squares[0].add(square)
piece_locations[piece].add(square)
board_bitmap |= SQUARE_BITS[square]
elif piece in BPIECES:
occupied_squares[1].add(square)
piece_locations[piece].add(square)
board_bitmap |= SQUARE_BITS[square]
self.board_bitmap = board_bitmap
def _start_variation(self):
self.ravstack.append((None, self.ravstack[-1][0]))
self.reset_position(self.ravstack[-1][-1])
def _end_variation(self):
try:
del self.ravstack[-1]
try:
self.reset_position(self.ravstack[-1][-1])
except:
pass
except:
pass
def _searching(self, match):
mg = match.group
if mg(IFG_START_TAG):
self.tags_in_order.append(match)
if mg(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(mg(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if mg(IFG_PIECE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_MOVE), "", "", mg(IFG_PIECE_SQUARE), ""
)
return
if mg(IFG_PAWN_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move("", "", "", mg(IFG_PAWN_SQUARE), "")
return
if mg(IFG_PIECE_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CAPTURE) or mg(IFG_KING_CAPTURE),
mg(IFG_PIECE_CAPTURE_FROM),
mg(IFG_PIECE_TAKES),
mg(IFG_PIECE_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PAWN_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_CAPTURE_FROM_FILE),
mg(IFG_PAWN_TAKES),
mg(IFG_PAWN_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PIECE_CHOICE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CHOICE),
mg(IFG_PIECE_CHOICE_FILE_OR_RANK),
"",
mg(IFG_PIECE_CHOICE_SQUARE),
"",
)
return
if mg(IFG_CASTLES):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_castles(mg(IFG_CASTLES))
return
if mg(IFG_PAWN_PROMOTE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_PROMOTE_FROM_FILE),
mg(IFG_PAWN_TAKES_PROMOTE),
mg(IFG_PAWN_PROMOTE_SQUARE),
mg(IFG_PAWN_PROMOTE_PIECE)[1],
)
return
if mg(IFG_COMMENT):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_NAG):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_COMMENT_TO_EOL):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
# The captured tokens not accepted when searching for start of game.
if mg(IFG_START_RAV):
self.error_tokens.append(mg())
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
return
if mg(IFG_END_RAV):
self.error_tokens.append(mg())
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
return
if mg(IFG_TERMINATION):
self._termination_while_searching(match)
return
# Action for non-captured groups is decided by looking at whole token.
string = mg()
if not string.strip():
return
if string.isdigit():
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
if string == FULLSTOP:
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
# Only other groups with length > 1:
# '<reserved>'
# '%escaped\n'
# are not captured and are ignored.
if len(string) > 1:
return
self.error_tokens.append(string)
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
return
def _searching_after_error_in_rav(self, match):
if match.group(IFG_START_RAV):
self.error_tokens.append(match.group())
self._ravstack_length += 1
return
if match.group(IFG_END_RAV):
if self._ravstack_length == len(self.ravstack):
self._convert_error_tokens_to_token()
self.collect_token(match)
self._end_variation()
self.error_tokens = []
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
if self._ravstack_length > 2:
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_RAV
else:
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
del self._ravstack_length
else:
self.error_tokens.append(match.group())
self._ravstack_length -= 1
return
if match.group(IFG_TERMINATION):
self._convert_error_tokens_to_token()
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
del self._ravstack_length
return
if match.group(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if match.group(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(match.group(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
del self._ravstack_length
return
self.error_tokens.append(match.group())
def _searching_after_error_in_game(self, match):
if match.group(IFG_TERMINATION):
self._convert_error_tokens_to_token()
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
return
if match.group(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if match.group(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(match.group(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
self.error_tokens.append(match.group())
def _collecting_tag_pairs(self, match):
mg = match.group
if mg(IFG_START_TAG):
self.tags_in_order.append(match)
if mg(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(mg(IFG_TAG_STRING_VALUE))
return
if mg(IFG_PIECE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_MOVE), "", "", mg(IFG_PIECE_SQUARE), ""
)
return
if mg(IFG_PAWN_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move("", "", "", mg(IFG_PAWN_SQUARE), "")
return
if mg(IFG_PIECE_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CAPTURE) or mg(IFG_KING_CAPTURE),
mg(IFG_PIECE_CAPTURE_FROM),
mg(IFG_PIECE_TAKES),
mg(IFG_PIECE_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PAWN_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_CAPTURE_FROM_FILE),
mg(IFG_PAWN_TAKES),
mg(IFG_PAWN_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PIECE_CHOICE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CHOICE),
mg(IFG_PIECE_CHOICE_FILE_OR_RANK),
"",
mg(IFG_PIECE_CHOICE_SQUARE),
"",
)
return
if mg(IFG_CASTLES):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_castles(mg(IFG_CASTLES))
return
if mg(IFG_PAWN_PROMOTE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_PROMOTE_FROM_FILE),
mg(IFG_PAWN_TAKES_PROMOTE),
mg(IFG_PAWN_PROMOTE_SQUARE),
mg(IFG_PAWN_PROMOTE_PIECE)[1],
)
return
if mg(IFG_TERMINATION):
if not self._initial_fen:
self.set_position_fen()
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
return
if mg(IFG_COMMENT):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_NAG):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_COMMENT_TO_EOL):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
# The captured tokens not accepted when searching for tag pairs.
if mg(IFG_START_RAV):
self.error_tokens.append(mg())
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if mg(IFG_END_RAV):
self.error_tokens.append(mg())
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Action for non-captured groups is decided by looking at whole token.
string = mg()
if not string.strip():
return
if string.isdigit():
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
if string == FULLSTOP:
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
# Only other groups with length > 1:
# '<reserved>'
# '%escaped\n'
# are not captured and are ignored.
if len(string) > 1:
return
self.error_tokens.append(string)
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
def _collecting_movetext(self, match):
mg = match.group
if mg(IFG_PIECE_SQUARE):
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_MOVE), "", "", mg(IFG_PIECE_SQUARE), ""
)
return
if mg(IFG_PAWN_SQUARE):
self.tokens.append(match)
self._play_move("", "", "", mg(IFG_PAWN_SQUARE), "")
return
if mg(IFG_PIECE_CAPTURE_SQUARE):
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CAPTURE) or mg(IFG_KING_CAPTURE),
mg(IFG_PIECE_CAPTURE_FROM),
mg(IFG_PIECE_TAKES),
mg(IFG_PIECE_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PAWN_CAPTURE_SQUARE):
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_CAPTURE_FROM_FILE),
mg(IFG_PAWN_TAKES),
mg(IFG_PAWN_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PIECE_CHOICE_SQUARE):
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CHOICE),
mg(IFG_PIECE_CHOICE_FILE_OR_RANK),
"",
mg(IFG_PIECE_CHOICE_SQUARE),
"",
)
return
if mg(IFG_CASTLES):
self.tokens.append(match)
self._play_castles(mg(IFG_CASTLES))
return
if mg(IFG_PAWN_PROMOTE_SQUARE):
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_PROMOTE_FROM_FILE),
mg(IFG_PAWN_TAKES_PROMOTE),
mg(IFG_PAWN_PROMOTE_SQUARE),
mg(IFG_PAWN_PROMOTE_PIECE)[1],
)
return
if mg(IFG_START_RAV):
self._start_variation()
self.collect_token(match)
return
if mg(IFG_END_RAV):
if len(self.ravstack) > 1:
self._end_variation()
self.collect_token(match)
else:
self.error_tokens.append(mg())
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if mg(IFG_TERMINATION):
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
return
if mg(IFG_COMMENT):
self.collect_token(match)
return
if mg(IFG_NAG):
self.collect_token(match)
return
if mg(IFG_COMMENT_TO_EOL):
self.collect_token(match)
return
# Other groups are not put on self.tokens because they are not shown in
# game displays and do not need to the associated with a position on
# the board.
# The non-captured groups which are accepted without action.
string = mg()
if not string.strip():
return
if string.isdigit():
return
if string == FULLSTOP:
return
# Current movetext finishes in error, no termination, assume start of
# new game.
if mg(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if mg(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(mg(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Only other groups with length > 1:
# '<reserved>'
# '%escaped\n'
# are not captured and are ignored.
if len(string) > 1:
return
# The non-captured groups which cause an error condition.
self.error_tokens.append(string)
self._ravstack_length = len(self.ravstack)
if self._ravstack_length > 1:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_RAV
else:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
def _collecting_non_whitespace_while_searching(self, match):
if match.group(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if match.group(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(match.group(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if not match.group().split():
self.error_tokens.append(match.group())
return
self.error_tokens.append(match.group())
def _disambiguate_move(self, match):
mg = match.group
if mg(IFG_PAWN_SQUARE):
start = self.tokens.pop()
match = re_disambiguate_error.match(start.group() + mg())
if match is None:
match = re_disambiguate_non_move.match(start.group() + mg())
self.tokens.append(match)
self._illegal_play_disambiguated_move()
return
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_disambiguated_move(
start.group(IFG_PIECE_MOVE),
start.group(IFG_PIECE_SQUARE),
mg(IFG_PAWN_SQUARE),
)
return
self.error_tokens.append(self.tokens.pop().group() + mg())
self._ravstack_length = len(self.ravstack)
if self._ravstack_length > 1:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_RAV
else:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
def _illegal_play_move(self):
self._state = self._move_error_state
et = self.tokens.pop()
self.error_tokens.append(et.group())
def _illegal_play_castles(self):
self._illegal_play_move()
def _illegal_play_disambiguated_move(self):
self._illegal_play_move()
def _convert_error_tokens_to_token(self):
self.collect_token(
re_tokens.match(
"".join(
(
ERROR_START_COMMENT,
"".join(self.error_tokens).replace(
END_COMMENT, ESCAPE_END_COMMENT
),
END_COMMENT,
)
)
)
)
# Should this method clear self.error_tokens too?
def _termination_while_searching(self, match):
self.error_tokens.append(match.group())
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
def __eq__(self, other):
if len(self.collected_game[2]) != len(other.collected_game[2]):
return False
if self.collected_game[3] or other.collected_game[3]:
return False
for ta, tb in zip(self.collected_game[2], other.collected_game[2]):
if ta.group() != tb.group():
return False
return True
def __ne__(self, other):
return not self == other
def get_fen_string(description):
(
board,
side_to_move,
castle_options,
ep_square,
halfmoves,
fullmoves,
) = description
fenboard = []
fenrank = []
gap_length = 0
for e, r in enumerate(board):
if not e % BOARDSIDE:
if gap_length:
fenrank.append(str(gap_length))
gap_length = 0
if len(fenrank):
fenboard.append("".join(fenrank))
fenrank = []
if r == NOPIECE:
gap_length += 1
continue
if gap_length:
fenrank.append(str(gap_length))
gap_length = 0
fenrank.append(r)
if gap_length:
fenrank.append(str(gap_length))
fenboard.append("".join(fenrank))
return " ".join(
(
"/".join(fenboard),
FEN_TOMOVE[side_to_move],
castle_options,
ep_square,
str(halfmoves),
str(fullmoves),
)
)
# Subclass PGN131 to collect inconsistent FENs.
class PGN131Fen(PGN131):
def __init__(self):
super().__init__()
self.position_fens = []
self.position_strings = []
self.board_fens = []
def add_move_to_game(self):
super().add_move_to_game()
board = self.board
castling = self.castling
if (
(board[0] != BROOK and BQUEEN in castling)
or (board[7] != BROOK and BKING in castling)
or (board[56] != WROOK and WQUEEN in castling)
or (board[63] != WROOK and WKING in castling)
):
self.position_fens.append(
get_fen_string(
(
board,
self.active_side,
castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
)
)
)
self.position_strings.append(
get_position_string(
(
board,
self.active_side,
castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
)
)
)
corrected_castling = castling
if board[0] != BROOK:
corrected_castling = corrected_castling.replace(BQUEEN, "")
if board[7] != BROOK:
corrected_castling = corrected_castling.replace(BKING, "")
if board[56] != WROOK:
corrected_castling = corrected_castling.replace(WQUEEN, "")
if board[63] != WROOK:
corrected_castling = corrected_castling.replace(WKING, "")
self.board_fens.append(
(
tuple(board),
self.active_side,
corrected_castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
)
)
class PGNUpdate131(PGN131):
def __init__(self):
super().__init__()
self.positions = []
self.piecesquaremoves = []
self.piecemoves = []
self.squaremoves = []
self.movenumber = None
self.variationnumber = None
self.currentvariation = None
self._variation = None
def set_position_fen(self, fen=None):
super().set_position_fen(fen=fen)
if self._initial_fen:
self.positions = []
self.piecesquaremoves = []
# It is assumed better to have these indicies, missing square and
# piece components, than to process the piecesquaremoves index to
# deduce them when required.
self.piecemoves = []
self.squaremoves = []
if self.active_side == WHITE_SIDE:
self.movenumber = [(self.fullmove_number - 1) * 2]
else:
self.movenumber = [self.fullmove_number * 2 - 1]
self.variationnumber = [0]
self._variation = "".join(
_convert_integer_to_length_hex(i) for i in self.variationnumber
)
def add_move_to_game(self):
super().add_move_to_game()
# Move numbers must be n, n+1, n+2, ... with repeats for Recursive
# Annotation Variations for a move.
# Variation numbers must be unique for each Recursive Annotation
# Variation, where all moves at the same level within a '()' get the
# same unique number.
if len(self.ravstack) != len(self.movenumber):
while len(self.ravstack) < len(self.movenumber):
self.movenumber.pop()
self.variationnumber.pop()
while len(self.ravstack) > len(self.movenumber):
self.movenumber.append(self.movenumber[-1])
self._variation = "".join(
_convert_integer_to_length_hex(i) for i in self.variationnumber
)
self.movenumber[-1] += 1
movenumber = _convert_integer_to_length_hex(self.movenumber[-1])
board = self.board
piecesquaremoves = self.piecesquaremoves
piecemoves = self.piecemoves
squaremoves = self.squaremoves
mfotpsn = MAP_FEN_ORDER_TO_PGN_SQUARE_NAME
mp = MAP_PGN_PIECE_TO_CQL_COMPOSITE_PIECE
pieces = []
mv = movenumber + self._variation
for square, piece in enumerate(board):
if piece:
pieces.append(piece)
# piecesquaremoves.append(mv + piece + mfotpsn[square])
# squaremoves.append(mv + mp[piece] + mfotpsn[square])
# If 'square piece' is better order than 'piece square'
piecesquaremoves.append(mv + mfotpsn[square] + piece)
squaremoves.append(mv + mfotpsn[square] + mp[piece])
for piece in set(pieces):
piecemoves.append(mv + piece)
self.positions.append(
"".join(
(
self.board_bitmap.to_bytes(8, "big").decode("iso-8859-1"),
"".join(pieces),
FEN_TOMOVE[self.active_side],
self.en_passant,
self.castling,
)
)
)
def collect_game_tokens(self):
self.collected_game = (
self.tags_in_order,
{
m.group(IFG_TAG_SYMBOL): m.group(IFG_TAG_STRING_VALUE)
for m in self.tags_in_order
},
self.tokens,
self.error_tokens,
self.positions,
self.piecesquaremoves,
self.piecemoves,
self.squaremoves,
)
def _start_variation(self):
super()._start_variation()
if len(self.ravstack) > len(self.variationnumber):
self.variationnumber.append(0)
def _end_variation(self):
super()._end_variation()
self.variationnumber[len(self.ravstack)] += 1
self._variation = "".join(
_convert_integer_to_length_hex(i) for i in self.variationnumber
)
def get_position_string(description):
board, side_to_move, castle_options, ep_square = description[:4]
return (
sum(SQUARE_BITS[e] for e, p in enumerate(board) if p)
.to_bytes(8, "big")
.decode("iso-8859-1")
+ "".join(p for p in board)
+ FEN_TOMOVE[side_to_move]
+ ep_square
+ castle_options
)
def _convert_integer_to_length_hex(i):
try:
return MOVE_NUMBER_KEYS[i]
except IndexError:
c = hex(i)
return str(len(c) - 2) + c[2:]
class PGNError132(Exception):
pass
class PGN132(object):
def __init__(self):
super().__init__()
# data generated from PGN text for game while checking moves are legal
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
# data generated from PGN text for game after checking moves are legal
self.collected_game = None
self.board_bitmap = None
self.occupied_squares = []
self.board = []
self.piece_locations = {}
self.fullmove_number = None
self.halfmove_count = None
self.en_passant = None
self.castling = None
self.active_side = None
# ravstack keeps track of the position at start of game or variation
# and the position after application of a valid move. Thus the value
# in ravstack[-1] is (None, <position start>) at start of game or line
# and (<position start>, <position after move>) after application of a
# valid move from gametokens.
self.ravstack = []
# data used while parsing PGN text to split into tag and move tokens
self._initial_fen = None
self._state = None
self._move_error_state = None
self._rewind_state = None
self._despatch_table = [
self._searching,
self._searching_after_error_in_rav,
self._searching_after_error_in_game,
self._collecting_tag_pairs,
self._collecting_movetext,
self._collecting_non_whitespace_while_searching,
self._disambiguate_move,
]
@staticmethod
def _read_pgn(string, length):
pgntext = string.read(length)
while len(pgntext):
yield pgntext
pgntext = string.read(length)
yield pgntext
def read_games(self, source, size=10000000, housekeepinghook=lambda: None):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
for pgntext in self._read_pgn(source, size):
if len(self.error_tokens):
self._state = self._rewind_state
pgntext = "".join(self.error_tokens) + pgntext
self.error_tokens.clear()
for t in re_tokens.finditer(pgntext):
self._despatch_table[self._state](t)
if t.group(IFG_TERMINATION):
yield t
housekeepinghook()
def read_pgn_tokens(
self, source, size=10000000, housekeepinghook=lambda: None
):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
for pgntext in self._read_pgn(source, size):
if len(self.error_tokens):
self._state = self._rewind_state
pgntext = "".join(self.error_tokens) + pgntext
self.error_tokens.clear()
for t in re_tokens.finditer(pgntext):
self._despatch_table[self._state](t)
yield t.group(IFG_TERMINATION)
def get_games(self, source):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
for t in re_tokens.finditer(source):
self._despatch_table[self._state](t)
if t.group(IFG_TERMINATION):
yield t
def get_first_pgn_token(self, source):
self._state = PGN_SEARCHING
self._move_error_state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
try:
t = next(re_tokens.finditer(source))
self._despatch_table[self._state](t)
return False if t.group(IFG_TERMINATION) else True
except StopIteration:
return
def read_first_game(
self, source, size=10000000, housekeepinghook=lambda: None
):
return next(
self.read_games(
source, size=size, housekeepinghook=housekeepinghook
)
)
def get_first_game(self, source):
return next(self.get_games(source))
def is_movetext_valid(self):
return not self.collected_game[3]
def is_pgn_valid(self):
return self.is_movetext_valid() and self.is_tag_roster_valid()
def is_tag_roster_valid(self):
tags_in_order = self.collected_game[0]
tags = self.collected_game[1]
if len(tags) != len(tags_in_order):
# Tag must appear no more than once
return False
for v in tags.values():
if len(v) == 0:
# Tag value must not be null
return False
for t in SEVEN_TAG_ROSTER:
if t not in tags:
# A mandatory tag is missing
return False
return True
def set_position_fen(self, fen=None):
# fen is standard start position by default
if fen is None:
self.board_bitmap = INITIAL_BOARD_BITMAP
self.board = list(INITIAL_BOARD)
self.occupied_squares[:] = [
set(s) for s in INITIAL_OCCUPIED_SQUARES
]
self.piece_locations = {
k: set(v) for k, v in INITIAL_PIECE_LOCATIONS.items()
}
self.ravstack[:] = [
(
None,
(
INITIAL_BOARD,
WHITE_SIDE,
FEN_INITIAL_CASTLING,
FEN_NULL,
FEN_INITIAL_HALFMOVE_COUNT,
FEN_INITIAL_FULLMOVE_NUMBER,
),
)
]
self.active_side = WHITE_SIDE
self.castling = FEN_INITIAL_CASTLING
self.en_passant = FEN_NULL
self.halfmove_count = FEN_INITIAL_HALFMOVE_COUNT
self.fullmove_number = FEN_INITIAL_FULLMOVE_NUMBER
self._initial_fen = True
return
# fen specifies an arbitrary position.
# fen has six space delimited fields.
fs = fen.split(FEN_FIELD_DELIM)
if len(fs) != FEN_FIELD_COUNT:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
(
piece_placement,
active_side,
castling,
en_passant,
halfmove_count,
fullmove_number,
) = fs
del fs
# fen side to move field.
if active_side not in FEN_SIDES:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen castling field.
if castling != FEN_NULL:
for c in FEN_INITIAL_CASTLING:
if castling.count(c) > FEN_CASTLING_OPTION_REPEAT_MAX:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
for c in castling:
if c not in FEN_INITIAL_CASTLING:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen square to which a pawn can move when capturing en passant.
if active_side == FEN_WHITE:
if en_passant not in FEN_WHITE_MOVE_TO_EN_PASSANT:
if en_passant != FEN_NULL:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
elif active_side == FEN_BLACK:
if en_passant not in FEN_BLACK_MOVE_TO_EN_PASSANT:
if en_passant != FEN_NULL:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Earlier 'fen side to move field' test makes this unreachable.
else:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen halfmove count since pawn move or capture.
if not halfmove_count.isdigit():
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen fullmove number.
if not fullmove_number.isdigit():
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen piece placement field has eight ranks delimited by '/'.
ranks = piece_placement.split(FEN_RANK_DELIM)
if len(ranks) != BOARDSIDE:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# fen piece placement field has pieces and empty squares only.
for r in ranks:
for c in r:
if c not in PIECES:
if not c.isdigit():
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Exactly 64 squares: equivalent to exactly 8 squares per rank.
for r in ranks:
if sum([1 if not s.isdigit() else int(s) for s in r]) != BOARDSIDE:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# No pawns on first or eighth ranks.
if (
ranks[0].count(WPAWN)
+ ranks[0].count(BPAWN)
+ ranks[BOARDSIDE - 1].count(WPAWN)
+ ranks[BOARDSIDE - 1].count(BPAWN)
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# No more than 16 pieces per side.
for s in WPIECES, BPIECES:
for p in s:
if (
sum([piece_placement.count(p) for p in s])
> FEN_PIECE_COUNT_PER_SIDE_MAX
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Exactly one king per side.
for p in WKING, BKING:
if piece_placement.count(p) != FEN_KING_COUNT:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# No more than eight pawns per side.
for p in WPAWN, BPAWN:
if piece_placement.count(p) > FEN_PAWN_COUNT_MAX:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Piece counts within initial position and pawn promotion bounds.
if (
piece_placement.count(WPAWN)
- FEN_PAWN_COUNT_MAX
+ max(piece_placement.count(WQUEEN) - FEN_QUEEN_COUNT_INITIAL, 0)
+ max(piece_placement.count(WROOK) - FEN_ROOK_COUNT_INITIAL, 0)
+ max(piece_placement.count(WBISHOP) - FEN_BISHOP_COUNT_INITIAL, 0)
+ max(piece_placement.count(WKNIGHT) - FEN_KNIGHT_COUNT_INITIAL, 0)
) > 0:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if (
piece_placement.count(BPAWN)
- FEN_PAWN_COUNT_MAX
+ max(piece_placement.count(BQUEEN) - FEN_QUEEN_COUNT_INITIAL, 0)
+ max(piece_placement.count(BROOK) - FEN_ROOK_COUNT_INITIAL, 0)
+ max(piece_placement.count(BBISHOP) - FEN_BISHOP_COUNT_INITIAL, 0)
+ max(piece_placement.count(BKNIGHT) - FEN_KNIGHT_COUNT_INITIAL, 0)
) > 0:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Position is legal apart from checks, actual and deduced, and deduced
# move that sets up en passant capture possibility.
board = []
for r in ranks:
for c in r:
if c in PIECES:
board.append(c)
else:
board.extend([NOPIECE] * int(c))
# Castling availability must fit the board position.
if board[CASTLING_W] != WKING:
if WKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if WQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_B] != BKING:
if BKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if BQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_WK] != WROOK:
if WKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_WQ] != WROOK:
if WQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_BK] != BROOK:
if BKING in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if board[CASTLING_BQ] != BROOK:
if BQUEEN in castling:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# the two squares behind the pawn that can be captured en passant
# must be empty. FEN quotes en passant capture square if latest move
# is a two square pawn move,there does not need to be a pawn able to
# make the capture. The side with the move must not be in check
# diagonally through the square containing a pawn that can be captured
# en passant, treating that square as empty.
if en_passant != FEN_NULL:
if en_passant in FEN_WHITE_MOVE_TO_EN_PASSANT:
s = FEN_WHITE_MOVE_TO_EN_PASSANT[en_passant]
if (
board[s] != NOPIECE
or board[s - 8] != NOPIECE
or board[s + 8] != BPAWN
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
elif en_passant in FEN_BLACK_MOVE_TO_EN_PASSANT:
s = FEN_BLACK_MOVE_TO_EN_PASSANT[en_passant]
if (
board[s] != NOPIECE
or board[s + 8] != NOPIECE
or board[s - 8] != WPAWN
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
else:
# Should not happen, caught earlier.
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# FEN is legal, except for restrictions on kings in check, so set
# instance attributes to fit description of position.
piece_locations = {k: set() for k in INITIAL_PIECE_LOCATIONS}
active_side_squares = set()
inactive_side_squares = set()
board_bitmap = []
if active_side == FEN_WHITE:
active_side_pieces = WPIECES
else:
active_side_pieces = BPIECES
for s, p in enumerate(board):
if p in PIECES:
piece_locations[p].add(s)
board_bitmap.append(SQUARE_BITS[s])
if p in active_side_pieces:
active_side_squares.add(s)
else:
inactive_side_squares.add(s)
for active_side_king_square in piece_locations[
SIDE_KING[FEN_SIDES[active_side]]
]:
pass # set active_side_king_square without pop() and add().
for inactive_side_king_square in piece_locations[
SIDE_KING[OTHER_SIDE[FEN_SIDES[active_side]]]
]:
pass # set active_side_king_square without pop() and add().
# Side without the move must not be in check.
# Cannot use is_active_king_attacked method because attributes are
# not set until the position is ok.
gap = GAPS[inactive_side_king_square]
board_bitmap = sum(board_bitmap)
for s in active_side_squares:
if (
not board_bitmap & gap[s]
and SQUARE_BITS[s]
& PIECE_CAPTURE_MAP[board[s]][inactive_side_king_square]
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Side with the move must not be in check from more than two squares.
# Cannot use count_attacks_on_square_by_side method because attributes
# are not set until the position is ok.
gap = GAPS[active_side_king_square]
if (
len(
[
s
for s in inactive_side_squares
if (
not board_bitmap & gap[s]
and SQUARE_BITS[s]
& PIECE_CAPTURE_MAP[board[s]][active_side_king_square]
)
]
)
> FEN_MAXIMUM_PIECES_GIVING_CHECK
):
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
self.board_bitmap = board_bitmap
self.board = board
if active_side == FEN_WHITE:
self.occupied_squares[:] = (
active_side_squares,
inactive_side_squares,
)
else:
self.occupied_squares[:] = (
inactive_side_squares,
active_side_squares,
)
self.piece_locations = piece_locations
self.ravstack[:] = [
(
None,
(
tuple(board),
FEN_SIDES[active_side],
castling,
en_passant,
int(halfmove_count),
int(fullmove_number),
),
)
]
self.active_side = FEN_SIDES[active_side]
self.castling = castling
self.en_passant = en_passant
self.halfmove_count = int(halfmove_count)
self.fullmove_number = int(fullmove_number)
self._initial_fen = fen
def _play_move(
self, pgn_piece, pgn_from, pgn_capture, pgn_tosquare, pgn_promote
):
tosquare = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER[pgn_tosquare]
piece = MAPPIECE[self.active_side][pgn_piece]
g = GAPS[tosquare]
b = self.board
bb = self.board_bitmap
if pgn_capture == CAPTURE_MOVE:
pts = PIECE_CAPTURE_MAP[piece][tosquare]
else:
pts = PIECE_MOVE_MAP[piece][tosquare]
from_squares = [
s
for s in self.piece_locations[piece]
if (SQUARE_BITS[s] & pts and not bb & g[s])
]
if len(from_squares) > 1:
if pgn_from:
fm = MAPFILE.get(pgn_from[0])
if fm is not None:
fm = FILES[fm]
from_squares = [
s for s in from_squares if SQUARE_BITS[s] & fm
]
if len(from_squares) > 1:
fm = MAPROW.get(pgn_from[-1])
if fm is not None:
fm = RANKS[fm]
from_squares = [
s for s in from_squares if SQUARE_BITS[s] & fm
]
if len(from_squares) > 1:
inactive_side_squares = self.occupied_squares[
OTHER_SIDE[self.active_side]
]
for active_side_king_square in self.piece_locations[
SIDE_KING[self.active_side]
]:
pass # set active_side_king_square without pop() and add().
gk = GAPS[active_side_king_square]
pinned_to_king = set()
for si in inactive_side_squares:
if (
PIECE_CAPTURE_MAP[b[si]][active_side_king_square]
& SQUARE_BITS[si]
):
for s in from_squares:
if gk[si] & SQUARE_BITS[s]:
if not (
(
bb ^ SQUARE_BITS[s]
| SQUARE_BITS[tosquare]
)
& gk[si]
):
if si != tosquare:
pinned_to_king.add(s)
from_squares = [
s for s in from_squares if s not in pinned_to_king
]
if pgn_capture == PLAIN_MOVE and b[tosquare] == piece:
# If moving piece is on tosquare and the next token is a square
# identity try tosquare as fromsquare and next token as tosquare
# for the piece move.
# Only applies to Q B N non-capture moves where the moving side
# has more than 2 of the moving piece so it is possible there
# are two pieces of the moving kind on the same rank and the
# same file at the same time which can reach the tosquare.
# Check that there are at least three pieces of one kind which
# can move to the same square and note the possibilities for
# evaluation in two subsequent states where the next tokens are
# readily available for comparison. The next two tokens must be
# '' and a square identity and the square identity must be one
# of the possibilities.
if b.count(piece) > 2:
if pgn_piece in PGN_FROM_SQUARE_DISAMBIGUATION:
self._state = PGN_DISAMBIGUATE_MOVE
self._rewind_state = self._state
return
self._illegal_play_move()
return
# After the disambiguation test, plain move to square containing piece
# which is moving, because queen moves like both rook and bishop.
if len(from_squares) != 1:
self._illegal_play_move()
return
piece_locations = self.piece_locations
fromsquare = from_squares.pop()
# pgn_from is null, a file name, a rank name, or a square name. If not
# null it must be part of, or equal, the square name of fromsquare.
if pgn_from is not None:
if pgn_from not in MAP_FEN_ORDER_TO_PGN_SQUARE_NAME[fromsquare]:
self._illegal_play_move()
return
if pgn_capture == CAPTURE_MOVE:
inactive_side_squares = self.occupied_squares[
OTHER_SIDE[self.active_side]
]
if tosquare not in inactive_side_squares:
if pgn_piece != PGN_PAWN:
self._illegal_play_move()
return
elif pgn_tosquare != self.en_passant:
self._illegal_play_move()
return
# Remove pawn captured en passant.
elif self.en_passant in FEN_WHITE_CAPTURE_EN_PASSANT:
eps = FEN_WHITE_CAPTURE_EN_PASSANT[self.en_passant]
b[eps] = NOPIECE
inactive_side_squares.remove(eps)
piece_locations[BPAWN].remove(eps)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[eps]
elif self.en_passant in FEN_BLACK_CAPTURE_EN_PASSANT:
eps = FEN_BLACK_CAPTURE_EN_PASSANT[self.en_passant]
b[eps] = NOPIECE
inactive_side_squares.remove(eps)
piece_locations[WPAWN].remove(eps)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[eps]
else:
self._illegal_play_move()
return
else:
inactive_side_squares.remove(tosquare)
piece_locations[b[tosquare]].remove(tosquare)
self.en_passant = FEN_NULL
self.halfmove_count = 0
elif SQUARE_BITS[tosquare] & bb:
self._illegal_play_move()
return
elif pgn_piece == PGN_PAWN:
# Moves like 'b1' for black, and 'b8' for white, are passed earlier
# to cope with disambiguating queen moves like 'Qd1f1'.
if not (
SQUARE_BITS[tosquare]
& PAWN_MOVE_DESITINATION[self.active_side]
):
if not pgn_promote:
self._illegal_play_move()
return
self.halfmove_count = 0
if (
SQUARE_BITS[fromsquare] & EN_PASSANT_FROM_SQUARES
and SQUARE_BITS[tosquare] & EN_PASSANT_TO_SQUARES
):
self.en_passant = (
pgn_tosquare[0]
+ FEN_EN_PASSANT_TARGET_RANK[pgn_tosquare[1]]
)
else:
self.en_passant = FEN_NULL
else:
self.en_passant = FEN_NULL
self.halfmove_count = self.halfmove_count + 1
active_side_squares = self.occupied_squares[self.active_side]
# Remove moving piece from current square.
b[fromsquare] = NOPIECE
active_side_squares.remove(fromsquare)
piece_locations[piece].remove(fromsquare)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[fromsquare]
# Put moving piece on new square.
b[tosquare] = piece
active_side_squares.add(tosquare)
piece_locations[piece].add(tosquare)
self.board_bitmap |= SQUARE_BITS[tosquare]
# Replace moving pawn on promotion and update inactive king square.
if pgn_promote:
piece_locations[b[tosquare]].remove(tosquare)
b[tosquare] = MAPPIECE[self.active_side][pgn_promote]
piece_locations[b[tosquare]].add(tosquare)
# Undo move if it leaves king in check.
if self.is_active_king_attacked():
self.reset_position(self.ravstack[-1][-1])
self._illegal_play_move()
return
# Castling availabity.
# tosquare tests deal with capture of rooks which have not moved.
# For real games the top condition is false for more than half the game
# and the next condition is usually false.
if self.castling != FEN_NULL:
if (
SQUARE_BITS[fromsquare] | SQUARE_BITS[tosquare]
) & CASTLING_AVAILABITY_SQUARES:
if fromsquare == CASTLING_W:
self.castling = self.castling.replace(WKING, NOPIECE)
self.castling = self.castling.replace(WQUEEN, NOPIECE)
elif fromsquare == CASTLING_WK:
self.castling = self.castling.replace(WKING, NOPIECE)
elif fromsquare == CASTLING_WQ:
self.castling = self.castling.replace(WQUEEN, NOPIECE)
elif fromsquare == CASTLING_B:
self.castling = self.castling.replace(BKING, NOPIECE)
self.castling = self.castling.replace(BQUEEN, NOPIECE)
elif fromsquare == CASTLING_BK:
self.castling = self.castling.replace(BKING, NOPIECE)
elif fromsquare == CASTLING_BQ:
self.castling = self.castling.replace(BQUEEN, NOPIECE)
if tosquare == CASTLING_WK:
self.castling = self.castling.replace(WKING, NOPIECE)
elif tosquare == CASTLING_WQ:
self.castling = self.castling.replace(WQUEEN, NOPIECE)
elif tosquare == CASTLING_BK:
self.castling = self.castling.replace(BKING, NOPIECE)
elif tosquare == CASTLING_BQ:
self.castling = self.castling.replace(BQUEEN, NOPIECE)
if self.castling == NOPIECE:
self.castling = FEN_NULL
self.add_move_to_game()
def _play_castles(self, token):
# Verify castling availability and pick castling rules.
if token.startswith(O_O_O):
if self.active_side == WHITE_SIDE:
if WQUEEN not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[WQUEEN]
else:
if BQUEEN not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[BQUEEN]
elif token.startswith(O_O):
if self.active_side == WHITE_SIDE:
if WKING not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[WKING]
else:
if BKING not in self.castling:
self._illegal_play_castles()
return
castling_squares = CASTLING_SQUARES[BKING]
else:
self._illegal_play_castles()
return
bb = self.board_bitmap
board = self.board
piece_locations = self.piece_locations
active_side_squares = self.occupied_squares[self.active_side]
active_side_king_locations = piece_locations[
SIDE_KING[self.active_side]
]
if self.active_side == WHITE_SIDE:
active_side_rook_locations = piece_locations[WROOK]
else:
active_side_rook_locations = piece_locations[BROOK]
for active_side_king_square in active_side_king_locations:
pass # set active_side_king_square without pop() and add().
# Confirm board position is consistent with castling availability.
if (
active_side_king_square != castling_squares[0]
or board[castling_squares[0]] != castling_squares[5]
or board[castling_squares[1]] != castling_squares[4]
):
self._illegal_play_castles()
return
# Squares between king and castling rook must be empty.
for squares in castling_squares[2:4]:
for s in squares:
if SQUARE_BITS[s] & bb:
self._illegal_play_castles()
return
# Castling king must not be in check.
if self.is_square_attacked_by_side(
castling_squares[0], OTHER_SIDE[self.active_side]
):
self._illegal_play_castles()
return
# Castling king's destination square, and the one between, must not be
# attacked by the other side.
for square in castling_squares[2]:
if self.is_square_attacked_by_side(
square, OTHER_SIDE[self.active_side]
):
self._illegal_play_castles()
return
king_square = castling_squares[0]
new_king_square = castling_squares[2][1]
rook_square = castling_squares[1]
new_rook_square = castling_squares[2][0]
# Put moving pieces on new squares.
board[new_king_square] = board[king_square]
board[new_rook_square] = board[rook_square]
active_side_squares.add(new_king_square)
active_side_king_locations.add(new_king_square)
active_side_squares.add(new_rook_square)
active_side_rook_locations.add(new_rook_square)
self.board_bitmap |= (
SQUARE_BITS[new_king_square] | SQUARE_BITS[new_rook_square]
)
# Remove moving pieces from current squares.
board[king_square] = NOPIECE
board[rook_square] = NOPIECE
active_side_squares.remove(king_square)
active_side_king_locations.remove(king_square)
active_side_squares.remove(rook_square)
active_side_rook_locations.remove(rook_square)
self.board_bitmap &= self.board_bitmap ^ (
SQUARE_BITS[king_square] | SQUARE_BITS[rook_square]
)
# Castling availabity.
if self.active_side == WHITE_SIDE:
self.castling = self.castling.replace(WKING, NOPIECE)
self.castling = self.castling.replace(WQUEEN, NOPIECE)
else:
self.castling = self.castling.replace(BKING, NOPIECE)
self.castling = self.castling.replace(BQUEEN, NOPIECE)
if self.castling == NOPIECE:
self.castling = FEN_NULL
# Cannot be en-passant
self.en_passant = FEN_NULL
self.halfmove_count = self.halfmove_count + 1
self.add_move_to_game()
def is_active_king_attacked(self):
b = self.board
bb = self.board_bitmap
# Only one element in this container.
for ks in self.piece_locations[SIDE_KING[self.active_side]]:
g = GAPS[ks]
for s in self.occupied_squares[OTHER_SIDE[self.active_side]]:
if (
not bb & g[s]
and SQUARE_BITS[s] & PIECE_CAPTURE_MAP[b[s]][ks]
):
return True
return False
def is_square_attacked_by_side(self, square, side):
g = GAPS[square]
b = self.board
bb = self.board_bitmap
for s in self.occupied_squares[side]:
if (
not bb & g[s]
and SQUARE_BITS[s] & PIECE_CAPTURE_MAP[b[s]][square]
):
return True
return False
def count_attacks_on_square_by_side(self, square, side):
g = GAPS[square]
b = self.board
bb = self.board_bitmap
return len(
[
s
for s in self.occupied_squares[side]
if (
not bb & g[s]
and SQUARE_BITS[s] & PIECE_CAPTURE_MAP[b[s]][square]
)
]
)
def add_move_to_game(self):
self.active_side = OTHER_SIDE[self.active_side]
if self.active_side == WHITE_SIDE:
self.fullmove_number += 1
self.ravstack[-1] = (
self.ravstack[-1][-1],
(
tuple(self.board),
self.active_side,
self.castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
),
)
def collect_token(self, match):
self.tokens.append(match)
def collect_game_tokens(self):
self.collected_game = (
self.tags_in_order,
{
m.group(IFG_TAG_SYMBOL): m.group(IFG_TAG_STRING_VALUE)
for m in self.tags_in_order
},
self.tokens,
self.error_tokens,
)
def _play_disambiguated_move(
self, pgn_piece, pgn_fromsquare, pgn_tosquare
):
fromsquare = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER[pgn_fromsquare]
tosquare = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER[pgn_tosquare]
piece = MAPPIECE[self.active_side][pgn_piece]
if fromsquare not in self.piece_locations[piece]:
self._illegal_play_disambiguated_move()
return
if not (
SQUARE_BITS[fromsquare] & PIECE_MOVE_MAP[piece][tosquare]
and not self.board_bitmap & GAPS[tosquare][fromsquare]
):
self._illegal_play_disambiguated_move()
return
if SQUARE_BITS[tosquare] & self.board_bitmap:
self._illegal_play_disambiguated_move()
return
else:
self.halfmove_count = self.halfmove_count + 1
b = self.board
piece_locations = self.piece_locations
active_side_squares = self.occupied_squares[self.active_side]
# Remove moving piece from current square.
b[fromsquare] = NOPIECE
active_side_squares.remove(fromsquare)
piece_locations[piece].remove(fromsquare)
self.board_bitmap &= self.board_bitmap ^ SQUARE_BITS[fromsquare]
# Put moving piece on new square.
b[tosquare] = piece
active_side_squares.add(tosquare)
piece_locations[piece].add(tosquare)
self.board_bitmap |= SQUARE_BITS[tosquare]
# Undo move if it leaves king in check.
if self.is_active_king_attacked():
self.reset_position(self.ravstack[-1][-1])
self._illegal_play_disambiguated_move()
return
# Castling availabity is not affected because rooks cannot be involved
# in moves which need disambiguation.
# Cannot be en-passant
self.en_passant = FEN_NULL
self.add_move_to_game()
# Maybe should not be a method now, but retain shape of pre-FEN class code
# for ease of comparison until sure everything works.
# Just say self._fen = ... where method is called.
def reset_position(self, position):
(
board,
self.active_side,
self.castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
) = position
self.board[:] = list(board)
occupied_squares = self.occupied_squares
for side in occupied_squares:
side.clear()
piece_locations = self.piece_locations
for piece in piece_locations.values():
piece.clear()
board_bitmap = 0
for square, piece in enumerate(board):
if piece in WPIECES:
occupied_squares[0].add(square)
piece_locations[piece].add(square)
board_bitmap |= SQUARE_BITS[square]
elif piece in BPIECES:
occupied_squares[1].add(square)
piece_locations[piece].add(square)
board_bitmap |= SQUARE_BITS[square]
self.board_bitmap = board_bitmap
def _start_variation(self):
self.ravstack.append((None, self.ravstack[-1][0]))
self.reset_position(self.ravstack[-1][-1])
def _end_variation(self):
try:
del self.ravstack[-1]
try:
self.reset_position(self.ravstack[-1][-1])
except:
pass
except:
pass
def _searching(self, match):
mg = match.group
if mg(IFG_START_TAG):
self.tags_in_order.append(match)
if mg(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(mg(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if mg(IFG_PIECE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_MOVE), "", "", mg(IFG_PIECE_SQUARE), ""
)
return
if mg(IFG_PAWN_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move("", "", "", mg(IFG_PAWN_SQUARE), "")
return
if mg(IFG_PIECE_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CAPTURE) or mg(IFG_KING_CAPTURE),
mg(IFG_PIECE_CAPTURE_FROM),
mg(IFG_PIECE_TAKES),
mg(IFG_PIECE_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PAWN_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_CAPTURE_FROM_FILE),
mg(IFG_PAWN_TAKES),
mg(IFG_PAWN_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PIECE_CHOICE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CHOICE),
mg(IFG_PIECE_CHOICE_FILE_OR_RANK),
"",
mg(IFG_PIECE_CHOICE_SQUARE),
"",
)
return
if mg(IFG_CASTLES):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_castles(mg(IFG_CASTLES))
return
if mg(IFG_PAWN_PROMOTE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_PROMOTE_FROM_FILE),
mg(IFG_PAWN_TAKES_PROMOTE),
mg(IFG_PAWN_PROMOTE_SQUARE),
mg(IFG_PAWN_PROMOTE_PIECE)[1],
)
return
if mg(IFG_COMMENT):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_NAG):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_COMMENT_TO_EOL):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
# The captured tokens not accepted when searching for start of game.
if mg(IFG_START_RAV):
self.error_tokens.append(mg())
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
return
if mg(IFG_END_RAV):
self.error_tokens.append(mg())
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
return
if mg(IFG_TERMINATION):
self._termination_while_searching(match)
return
# Action for non-captured groups is decided by looking at whole token.
string = mg()
if not string.strip():
return
if string.isdigit():
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
if string == FULLSTOP:
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
# Only other groups with length > 1:
# '<reserved>'
# '%escaped\n'
# are not captured and are ignored.
if len(string) > 1:
return
self.error_tokens.append(string)
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
return
def _searching_after_error_in_rav(self, match):
if match.group(IFG_START_RAV):
self.error_tokens.append(match.group())
self._ravstack_length += 1
return
if match.group(IFG_END_RAV):
if self._ravstack_length == len(self.ravstack):
self._convert_error_tokens_to_token()
self.collect_token(match)
self._end_variation()
self.error_tokens = []
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
if self._ravstack_length > 2:
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_RAV
else:
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
del self._ravstack_length
else:
self.error_tokens.append(match.group())
self._ravstack_length -= 1
return
if match.group(IFG_TERMINATION):
self._convert_error_tokens_to_token()
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
del self._ravstack_length
return
if match.group(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if match.group(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(match.group(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
del self._ravstack_length
return
self.error_tokens.append(match.group())
def _searching_after_error_in_game(self, match):
if match.group(IFG_TERMINATION):
self._convert_error_tokens_to_token()
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
return
if match.group(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if match.group(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(match.group(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
self.error_tokens.append(match.group())
def _collecting_tag_pairs(self, match):
mg = match.group
if mg(IFG_START_TAG):
self.tags_in_order.append(match)
if mg(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(mg(IFG_TAG_STRING_VALUE))
return
if mg(IFG_PIECE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_MOVE), "", "", mg(IFG_PIECE_SQUARE), ""
)
return
if mg(IFG_PAWN_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move("", "", "", mg(IFG_PAWN_SQUARE), "")
return
if mg(IFG_PIECE_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CAPTURE) or mg(IFG_KING_CAPTURE),
mg(IFG_PIECE_CAPTURE_FROM),
mg(IFG_PIECE_TAKES),
mg(IFG_PIECE_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PAWN_CAPTURE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_CAPTURE_FROM_FILE),
mg(IFG_PAWN_TAKES),
mg(IFG_PAWN_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PIECE_CHOICE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CHOICE),
mg(IFG_PIECE_CHOICE_FILE_OR_RANK),
"",
mg(IFG_PIECE_CHOICE_SQUARE),
"",
)
return
if mg(IFG_CASTLES):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_castles(mg(IFG_CASTLES))
return
if mg(IFG_PAWN_PROMOTE_SQUARE):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_PROMOTE_FROM_FILE),
mg(IFG_PAWN_TAKES_PROMOTE),
mg(IFG_PAWN_PROMOTE_SQUARE),
mg(IFG_PAWN_PROMOTE_PIECE)[1],
)
return
if mg(IFG_TERMINATION):
if not self._initial_fen:
self.set_position_fen()
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
return
if mg(IFG_COMMENT):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_NAG):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
if mg(IFG_COMMENT_TO_EOL):
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.collect_token(match)
return
# The captured tokens not accepted when searching for tag pairs.
if mg(IFG_START_RAV):
self.error_tokens.append(mg())
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if mg(IFG_END_RAV):
self.error_tokens.append(mg())
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Action for non-captured groups is decided by looking at whole token.
string = mg()
if not string.strip():
return
if string.isdigit():
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
if string == FULLSTOP:
if not self._initial_fen:
self.set_position_fen()
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
return
# Only other groups with length > 1:
# '<reserved>'
# '%escaped\n'
# are not captured and are ignored.
if len(string) > 1:
return
self.error_tokens.append(string)
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
def _collecting_movetext(self, match):
mg = match.group
if mg(IFG_PIECE_SQUARE):
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_MOVE), "", "", mg(IFG_PIECE_SQUARE), ""
)
return
if mg(IFG_PAWN_SQUARE):
self.tokens.append(match)
self._play_move("", "", "", mg(IFG_PAWN_SQUARE), "")
return
if mg(IFG_PIECE_CAPTURE_SQUARE):
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CAPTURE) or mg(IFG_KING_CAPTURE),
mg(IFG_PIECE_CAPTURE_FROM),
mg(IFG_PIECE_TAKES),
mg(IFG_PIECE_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PAWN_CAPTURE_SQUARE):
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_CAPTURE_FROM_FILE),
mg(IFG_PAWN_TAKES),
mg(IFG_PAWN_CAPTURE_SQUARE),
"",
)
return
if mg(IFG_PIECE_CHOICE_SQUARE):
self.tokens.append(match)
self._play_move(
mg(IFG_PIECE_CHOICE),
mg(IFG_PIECE_CHOICE_FILE_OR_RANK),
"",
mg(IFG_PIECE_CHOICE_SQUARE),
"",
)
return
if mg(IFG_CASTLES):
self.tokens.append(match)
self._play_castles(mg(IFG_CASTLES))
return
if mg(IFG_PAWN_PROMOTE_SQUARE):
self.tokens.append(match)
self._play_move(
"",
mg(IFG_PAWN_PROMOTE_FROM_FILE),
mg(IFG_PAWN_TAKES_PROMOTE),
mg(IFG_PAWN_PROMOTE_SQUARE),
mg(IFG_PAWN_PROMOTE_PIECE)[1],
)
return
if mg(IFG_START_RAV):
self._start_variation()
self.collect_token(match)
return
if mg(IFG_END_RAV):
if len(self.ravstack) > 1:
self._end_variation()
self.collect_token(match)
else:
self.error_tokens.append(mg())
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if mg(IFG_TERMINATION):
self.collect_token(match)
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = []
self._state = PGN_SEARCHING
self._rewind_state = self._state
return
if mg(IFG_COMMENT):
self.collect_token(match)
return
if mg(IFG_NAG):
self.collect_token(match)
return
if mg(IFG_COMMENT_TO_EOL):
self.collect_token(match)
return
# Other groups are not put on self.tokens because they are not shown in
# game displays and do not need to the associated with a position on
# the board.
# The non-captured groups which are accepted without action.
string = mg()
if not string.strip():
return
if string.isdigit():
return
if string == FULLSTOP:
return
# Current movetext finishes in error, no termination, assume start of
# new game.
if mg(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if mg(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(mg(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
# Only other groups with length > 1:
# '<reserved>'
# '%escaped\n'
# are not captured and are ignored.
if len(string) > 1:
return
# The non-captured groups which cause an error condition.
self.error_tokens.append(string)
self._ravstack_length = len(self.ravstack)
if self._ravstack_length > 1:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_RAV
else:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
def _collecting_non_whitespace_while_searching(self, match):
if match.group(IFG_START_TAG):
self._convert_error_tokens_to_token()
self.collect_game_tokens()
self._initial_fen = False
self.tokens = []
self.error_tokens = []
self.tags_in_order = [match]
if match.group(IFG_TAG_SYMBOL) == TAG_FEN:
self.set_position_fen(match.group(IFG_TAG_STRING_VALUE))
self._state = PGN_COLLECTING_TAG_PAIRS
self._rewind_state = self._state
self._move_error_state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
return
if not match.group().split():
self.error_tokens.append(match.group())
return
self.error_tokens.append(match.group())
def _disambiguate_move(self, match):
mg = match.group
if mg(IFG_PAWN_SQUARE):
start = self.tokens.pop()
match = re_disambiguate_error.match(start.group() + mg())
if match is None:
match = re_disambiguate_non_move.match(start.group() + mg())
self.tokens.append(match)
self._illegal_play_disambiguated_move()
return
self._state = PGN_COLLECTING_MOVETEXT
self._rewind_state = self._state
self.tokens.append(match)
self._play_disambiguated_move(
start.group(IFG_PIECE_MOVE),
start.group(IFG_PIECE_SQUARE),
mg(IFG_PAWN_SQUARE),
)
return
self.error_tokens.append(self.tokens.pop().group() + mg())
self._ravstack_length = len(self.ravstack)
if self._ravstack_length > 1:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_RAV
else:
self._state = PGN_SEARCHING_AFTER_ERROR_IN_GAME
def _illegal_play_move(self):
self._state = self._move_error_state
et = self.tokens.pop()
self.error_tokens.append(et.group())
def _illegal_play_castles(self):
self._illegal_play_move()
def _illegal_play_disambiguated_move(self):
self._illegal_play_move()
def _convert_error_tokens_to_token(self):
"""Generate error token '{Error: <original tokens> }'.
Any '}' in <original tokens> replaced by '::{{::'. Assume '::{{::' and
'{Error: ' do not occur naturally in '{}' comments.
"""
self.collect_token(
re_tokens.match(
"".join(
(
ERROR_START_COMMENT,
"".join(self.error_tokens).replace(
END_COMMENT, ESCAPE_END_COMMENT
),
END_COMMENT,
)
)
)
)
# Should this method clear self.error_tokens too?
def _termination_while_searching(self, match):
self.error_tokens.append(match.group())
self._state = PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING
def __eq__(self, other):
if len(self.collected_game[2]) != len(other.collected_game[2]):
return False
if self.collected_game[3] or other.collected_game[3]:
return False
for ta, tb in zip(self.collected_game[2], other.collected_game[2]):
if ta.group() != tb.group():
return False
return True
def __ne__(self, other):
return not self == other
# Subclass PGN132 to collect inconsistent FENs: meaning verify they do not
# exist for PGN copied from pgn_read.core.parser version 1.3.2.
class PGNFen(PGN132):
def __init__(self):
super().__init__()
self.position_fens = []
self.board_fens = []
def add_move_to_game(self):
super().add_move_to_game()
board = self.board
castling = self.castling
if (
(board[0] != BROOK and BQUEEN in castling)
or (board[7] != BROOK and BKING in castling)
or (board[56] != WROOK and WQUEEN in castling)
or (board[63] != WROOK and WKING in castling)
):
self.position_fens.append(
get_fen_string(
(
board,
self.active_side,
castling,
self.en_passant,
self.halfmove_count,
self.fullmove_number,
)
)
)
# Versions of the classes in core.chessrecord which use PGNUpdate modified to
# use PGNUpdate131, defined in this module above, so the records which have the
# inconsistent castling options can be deleted in full.
class ChessDBkeyGame131(KeyData):
def __eq__(self, other):
try:
return self.recno == other.recno
except:
return False
def __ne__(self, other):
try:
return self.recno != other.recno
except:
return True
class ChessDBvaluePGN131(Value):
@staticmethod
def encode_move_number(key):
return key.to_bytes(2, byteorder="big")
def load(self, value):
self.get_first_game(literal_eval(value))
def pack_value(self):
return repr(
"".join(
(
"".join(
[
"".join(
(
"[",
t.group(IFG_TAG_SYMBOL),
'"',
t.group(IFG_TAG_STRING_VALUE),
'"]',
)
)
for t in self.collected_game[0]
]
),
"".join([t.group() for t in self.collected_game[2]]),
"".join([t for t in self.collected_game[3]]),
)
)
)
class ChessDBvaluePGNUpdate131(PGNUpdate131, ChessDBvaluePGN131):
# Replaces ChessDBvaluePGNUpdate and ChessDBvalueGameImport which had been
# identical for a considerable time.
# Decided that PGNUpdate should remain in pgn.core.parser because that code
# generates data while this code updates a database.
# ChessDBvalueGameImport had this comment:
# Implication of original is encode_move_number not supported and load in
# ChessDBvaluePGN superclass is used.
def __init__(self):
super().__init__()
self.gamesource = None
def pack(self):
v = super().pack()
index = v[1]
cg = self.collected_game
if self.do_full_indexing():
tags = cg[1]
for field in SEVEN_TAG_ROSTER:
if field in PLAYER_NAME_TAGS:
# PGN specification states colon is used to separate player
# names in consultation games.
index[field] = [
" ".join(re_normalize_player_name.findall(tf))
for tf in tags[field].split(":")
]
else:
index[field] = [tags[field]]
index[POSITIONS_FIELD_DEF] = cg[4]
index[PIECESQUAREMOVE_FIELD_DEF] = cg[5]
index[PIECEMOVE_FIELD_DEF] = cg[6]
index[SQUAREMOVE_FIELD_DEF] = cg[7]
index[PGN_DATE_FIELD_DEF] = [
tags[TAG_DATE].replace(*SPECIAL_TAG_DATE)
]
else:
index[SOURCE_FIELD_DEF] = [self.gamesource]
return v
def set_game_source(self, source):
self.gamesource = source
def do_full_indexing(self):
return self.gamesource is None
def is_error_comment_present(self):
return ERROR_START_COMMENT in self.collected_game[2][0].string
class ChessDBrecordGameUpdate131(Record):
def __init__(self):
super(ChessDBrecordGameUpdate131, self).__init__(
ChessDBkeyGame131, ChessDBvaluePGNUpdate131
)
def clone(self):
# are conditions for deleting this method in place?
clone = super(ChessDBrecordGameUpdate131, self).clone()
return clone
@staticmethod
def decode_move_number(skey):
return int.from_bytes(skey, byteorder="big")
def get_keys(self, datasource=None, partial=None):
dbname = datasource.dbname
if dbname != POSITIONS_FIELD_DEF:
if dbname == GAMES_FILE_DEF:
return [(self.key.recno, self.srvalue)]
elif dbname in self.value.collected_game[1]:
return [
(self.value.collected_game[1][dbname], self.key.pack())
]
else:
return []
if partial == None:
return []
moves = self.value.moves
gamekey = datasource.dbhome.encode_record_number(self.key.pack())
rav = 0
ref = 0
keys = []
convert_format = datasource.dbhome.db_compatibility_hack
p = tuple(partial)
for mt in moves:
if mt == START_RAV:
rav += 1
elif mt == END_RAV:
rav -= 1
elif mt == NON_MOVE:
pass
else:
if mt[-1] == p:
record = (partial, None)
keys.append(convert_format(record, gamekey))
ref += 1
return keys
def load_instance(self, database, dbset, dbname, record):
super(ChessDBrecordGameUpdate131, self).load_instance(
database, dbset, dbname, record
)
# Never called because attribute is not bound anywhere and no
# exceptions are seen ever.
# if self.value.callbacktried:
# pass
# elif self.value.callbacktried == None:
# pass
# elif not self.value.callbacktried:
# self.value.set_game_source(record[0])
class Main:
def __init__(self):
root = tkinter.Tk()
root.wm_title(string="Castling Option Corrections")
root.wm_resizable(width=tkinter.FALSE, height=tkinter.TRUE)
tkinter.ttk.Label(
master=root, text="ChessTab Database Directory"
).grid(row=0, column=0)
tkinter.ttk.Label(master=root, text="Log").grid(
row=1, column=1, pady=5
)
tkinter.ttk.Label(master=root, text="Right-click for menu").grid(
row=1, column=3, pady=5, sticky="e"
)
progress = tkinter.ttk.Label(master=root)
progress.grid(row=1, column=2, pady=5)
counter = tkinter.StringVar(root, "")
progress["textvariable"] = counter
entry = tkinter.ttk.Entry(master=root)
entry.grid(row=0, column=1, columnspan=3, sticky="ew", pady=5)
chesstab_directory = tkinter.StringVar(root, "")
entry["textvariable"] = chesstab_directory
frame = tkinter.ttk.Frame(master=root)
frame.grid(row=2, column=0, columnspan=5, sticky="nsew")
root.rowconfigure(2, weight=1)
text = tkinter.Text(master=frame, wrap=tkinter.WORD)
scrollbar = tkinter.ttk.Scrollbar(
master=frame, orient=tkinter.VERTICAL, command=text.yview
)
text.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.menu = tkinter.Menu(master=frame, tearoff=False)
self.__menu = self.menu
self.root = root
self.text = text
self.entry = entry
self.chesstab_directory = chesstab_directory
self.counter = counter
self.set_menu_and_entry_events(True)
entry.bind("<ButtonPress-3>", self.show_menu)
text.bind("<ButtonPress-3>", self.show_menu)
entry.focus_set()
self._database_class = None
self._fullposition_class = None
self._engineanalysis_class = None
self.opendatabase = None
self._database_enginename = None
self._database_modulename = None
def insert_text(self, text):
self.text.insert(tkinter.END, text)
def report_action(self, msg):
self.insert_text("\n")
self.insert_text(" ".join(msg))
self.text.see(tkinter.END)
tkinter.messagebox.showinfo(master=self.root, message="\n".join(msg))
def report_error(self, msg):
self.insert_text("\n")
self.insert_text(" ".join(msg))
self.text.see(tkinter.END)
tkinter.messagebox.showerror(master=self.root, message="\n".join(msg))
def show_menu(self, event=None):
self.__menu.tk_popup(*event.widget.winfo_pointerxy())
self.__xy = event.x, event.y
self.__menu = self.menu
def select_chesstab_database(self, event=None):
directoryname = tkinter.filedialog.askdirectory(
title="Select ChessTab Database", initialdir="~"
)
if directoryname:
self.chesstab_directory.set(directoryname)
def save_log_as(self, event=None):
openfile = tkinter.filedialog.asksaveasfile(
title="Save Log As ...",
defaultextension=".txt",
filetypes=(("Log file", "*.txt"),),
)
if openfile:
try:
openfile.write(self.text.get("1.0", tkinter.END))
finally:
openfile.close()
def report_games_with_inconsistent_castling_options(self, event=None):
if self.chesstab_directory.get() == "":
tkinter.messagebox.showerror(
master=self.root,
message="Please select a ChessTab Database Directory.",
)
return
path = self.chesstab_directory.get()
if not os.path.exists(path):
msg = (
"Cannot process\n",
self.chesstab_directory.get(),
"\nwhich does not exist.",
)
self.report_error(msg)
return
if not os.path.isdir(path):
msg = (
"Cannot process\n",
self.chesstab_directory.get(),
"\nbecause it is not a directory.",
)
self.report_error(msg)
return
# Copied from chesstab.gui.chess.py (start)
ed = modulequery.modules_for_existing_databases(path, FileSpec())
if not ed:
tkinter.messagebox.showinfo(
message="".join(
(
"Chess database in ",
os.path.basename(path),
" cannot be opened, or there isn't one.\n\n",
"(Is correct database engine available?)",
)
),
title="Open",
)
return
elif len(ed) > 1:
tkinter.messagebox.showinfo(
message="".join(
(
"There is more than one chess database in folder\n\n",
os.path.basename(path),
"\n\nMove the databases to separate folders and try ",
"again. (Use the platform tools for moving files to ",
"relocate the database files.)",
)
),
title="Open",
)
return
idm = modulequery.installed_database_modules()
_enginename = None
for k, v in idm.items():
if v in ed[0]:
if _enginename:
tkinter.messagebox.showinfo(
message="".join(
(
"Several modules able to open database in\n\n",
os.path.basename(path),
"\n\navailable. Unable to choose.",
)
),
title="Open",
)
return
_enginename = k
if _enginename is None:
tkinter.messagebox.showinfo(
message="".join(
(
"No modules able to open database in\n\n",
os.path.basename(path),
"\n\navailable.",
)
),
title="Open",
)
return
_modulename = APPLICATION_DATABASE_MODULE[_enginename]
if self._database_modulename != _modulename:
if self._database_modulename is not None:
tkinter.messagebox.showinfo(
message="".join(
(
"The database engine needed for this database ",
"is not the one already in use.\n\nYou will ",
"have to Quit and start the application again ",
"to open this database.",
)
),
title="Open",
)
return
self._database_enginename = _enginename
self._database_modulename = _modulename
def import_name(modulename, name):
try:
module = __import__(
modulename, globals(), locals(), [name]
)
except ImportError:
return None
return getattr(module, name)
self._database_class = import_name(_modulename, _ChessDB)
self._fullposition_class = import_name(
FULL_POSITION_MODULE[_enginename], _FullPositionDS
)
self._engineanalysis_class = import_name(
ANALYSIS_MODULE[_enginename], _AnalysisDS
)
# Copied from chesstab.gui.chess.py (end)
# Adapted from chesstab.gui.chess.py but much simpler.
try:
self.opendatabase = self._database_class(path, allowcreate=True)
self.opendatabase.open_database()
except Exception as exc:
tkinter.messagebox.showinfo(
message="".join(
(
"Unable to open database\n\n",
str(path),
"\n\nThe reported reason is:\n\n",
str(exc),
)
),
title="Open",
)
if self.opendatabase:
self.opendatabase.close_database()
self.opendatabase = None
return
not_fixed_count = self.do_verification()
if self.opendatabase:
self.opendatabase.close_database()
self.report_action(
(
"PGN file",
os.path.basename(path),
"done at",
time.ctime(),
)
)
self.insert_text("\n")
self.text.see(tkinter.END)
self.chesstab_directory.set("")
def do_verification(self):
"""Report games on database with inconsistent castling options and
piece placement.
"""
fullposition = self._fullposition_class(
self.opendatabase, GAMES_FILE_DEF, POSITIONS_FIELD_DEF
)
engineanalysis = self._engineanalysis_class(
self.opendatabase, ANALYSIS_FILE_DEF, VARIATION_FIELD_DEF
)
gc = self.opendatabase.database_cursor(GAMES_FILE_DEF, GAMES_FILE_DEF)
not_fixed_count = 0
errors = []
error_fens = []
board_fens = []
while True:
r = gc.next()
if r is None:
break
game = PGN131Fen()
game.get_first_game(r[1])
for ps in game.position_strings:
fullposition.get_full_position_games(ps)
if fullposition.recordset.is_record_number_in_record_set(r[0]):
break
else:
self.counter.set(str(r[0]))
self.text.see(tkinter.END)
self.text.update()
continue
if game.position_fens:
errors.append(r)
error_fens.append([])
board_fens.append([])
self.insert_text("\n")
self.insert_text(
"Serial " + str(len(errors)) + "\t\tRecord " + str(r[0])
)
self.insert_text("\n")
gpf = game.position_fens
gbf = game.board_fens
for e, p in enumerate(gpf):
error_fens[-1].append(p)
board_fens[-1].append(gbf[e])
self.insert_text(p)
self.insert_text("\n")
self.insert_text(literal_eval(r[1]))
self.insert_text("\n")
g = PGNFen()
g.get_first_game(r[1])
if g.position_fens:
not_fixed_count += 1
gpf = g.position_fens
for p in gpf:
self.insert_text(p)
self.insert_text("\n")
self.counter.set(str(r[0]))
self.text.see(tkinter.END)
self.text.update()
gc.close()
self.insert_text("\n")
self.insert_text("\n")
self.insert_text(
"Total errors " + str(sum(len(e) for e in error_fens))
)
self.insert_text("\n")
self.insert_text("Total games with errors " + str(len(errors)))
self.insert_text("\n")
self.insert_text("Total games " + self.counter.get())
self.insert_text("\n")
if not_fixed_count:
return not_fixed_count
if not errors:
return 0
self.insert_text("\n")
self.insert_text("Fixing castling options.")
self.insert_text("\n")
# If transaction surrounds loop a MemoryError is encountered, at no
# more than 100 games on OpenBSD with bsddb3, citing 'Lock table is out
# of available locks'.
le = len(errors)
for e, x in enumerate(zip(errors, error_fens, board_fens)):
r, fens, board = x
del x
oldgame = ChessDBrecordGameUpdate131()
oldgame.load_instance(
self.opendatabase, GAMES_FILE_DEF, GAMES_FILE_DEF, r
)
newgame = ChessDBrecordGameUpdate()
newgame.load_instance(
self.opendatabase, GAMES_FILE_DEF, GAMES_FILE_DEF, r
)
self.opendatabase.start_transaction()
self.insert_text("\n")
self.insert_text("Deleting record " + str(r[0]))
self.text.see(tkinter.END)
self.text.update()
self.opendatabase.delete_instance("games", oldgame)
self.insert_text("\n")
self.insert_text("Inserting replacement for record " + str(r[0]))
self.text.see(tkinter.END)
self.text.update()
self.opendatabase.put_instance("games", newgame)
self.insert_text("\n")
self.insert_text(
str(e + 1)
+ " of "
+ str(le)
+ " done. "
+ str(le - e - 1)
+ " to do."
)
self.text.see(tkinter.END)
self.text.update()
ac = self.opendatabase.database_cursor(
ANALYSIS_FILE_DEF, VARIATION_FIELD_DEF
)
replacements = []
for f, b in zip(fens, board):
engineanalysis.find_position_analysis(f)
ar = ac.nearest(f)
while True:
if ar is None:
break
if ar[0] != f:
break
replacements.append((ar, get_fen_string(b)))
ar = ac.next()
ac.close()
if len(replacements):
self.insert_text("\n")
self.insert_text(
"Correcting "
+ str(len(replacements))
+ " position analysis records."
)
self.text.see(tkinter.END)
self.text.update()
ac = self.opendatabase.database_cursor(
ANALYSIS_FILE_DEF, ANALYSIS_FILE_DEF
)
for ar, fen in replacements:
oldanalysis = ChessDBrecordAnalysis()
newanalysis = ChessDBrecordAnalysis()
dar = ac.setat((ar[1],))
if dar is None:
self.insert_text("\n")
self.insert_text("Unable to apply " + fen + " correction.")
self.text.see(tkinter.END)
self.text.update()
continue
oldanalysis.load_instance(
self.opendatabase,
ANALYSIS_FILE_DEF,
ANALYSIS_FILE_DEF,
dar,
)
newanalysis.load_instance(
self.opendatabase,
ANALYSIS_FILE_DEF,
ANALYSIS_FILE_DEF,
dar,
)
newanalysis.value.position = fen
oldanalysis.newrecord = newanalysis
self.opendatabase.edit_instance(ANALYSIS_FILE_DEF, oldanalysis)
ac.close()
self.opendatabase.commit()
self.insert_text("\n")
self.text.see(tkinter.END)
def set_menu_and_entry_events(self, active):
menu = self.menu
if active:
menu.add_separator()
menu.add_command(
label="Process ChessTab Database",
command=self.report_games_with_inconsistent_castling_options,
accelerator="Alt F4",
)
menu.add_separator()
menu.add_command(
label="Select ChessTab Database Directory",
command=self.select_chesstab_database,
accelerator="Alt F5",
)
menu.add_separator()
menu.add_command(
label="Save Log As ...",
command=self.save_log_as,
accelerator="Alt F2",
)
menu.add_separator()
else:
menu.delete(0, tkinter.END)
for entry in (self.text,):
self._bind_for_scrolling_only(entry)
for entry in self.entry, self.text:
entry.bind(
"<Alt-KeyPress-F5>",
"" if not active else self.select_chesstab_database,
)
entry.bind(
"<Alt-KeyPress-F4>",
""
if not active
else self.report_games_with_inconsistent_castling_options,
)
entry.bind(
"<KeyPress-Return>",
""
if not active
else self.report_games_with_inconsistent_castling_options,
)
entry.bind(
"<Alt-KeyPress-F2>", "" if not active else self.save_log_as
)
def _bind_for_scrolling_only(self, widget):
widget.bind("<KeyPress>", "break")
widget.bind("<Home>", "return")
widget.bind("<Left>", "return")
widget.bind("<Up>", "return")
widget.bind("<Right>", "return")
widget.bind("<Down>", "return")
widget.bind("<Prior>", "return")
widget.bind("<Next>", "return")
widget.bind("<End>", "return")
if __name__ == "__main__":
Main().root.mainloop()
| 140,774
| 10,414
| 797
|
95267cbca2f7b75442e0d4693416ef0ee8ad422a
| 4,439
|
py
|
Python
|
game/Hangman2.0.py
|
archu2020/python-2
|
19c626ca9fd37168db8a7ac075fd80c8e2971313
|
[
"Apache-2.0"
] | 48
|
2017-12-24T12:19:55.000Z
|
2022-02-26T13:14:27.000Z
|
game/Hangman2.0.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 3
|
2018-12-05T08:48:14.000Z
|
2020-07-29T01:56:16.000Z
|
game/Hangman2.0.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 113
|
2017-08-09T03:10:04.000Z
|
2022-03-26T16:05:01.000Z
|
'''
made by YeahKun in 2017-7-22 11:50:42
猜字谜游戏2.0
增加了提示,扩展了单词的种类和数量
'''
import random
HANGMANPICS = [
'''
=====
+---+
| |
|
|
|
|
=====''',
'''
=====
+---+
| |
O |
|
|
|
=====''',
'''
=====
+---+
| |
O |
| |
|
|
=====''',
'''
======
+---+
| |
O |
/| |
|
|
======''',
'''
======
+---+
| |
O |
/|\ |
|
|
======''',
'''
======
+---+
| |
O |
/|\ |
/ |
|
======''',
'''
======
+---+
| |
O |
/|\ |
/ \ |
|
======''',
'''
======
+---+
| |
(O |
/|\ |
/ \ |
|
======''',
'''
======
+---+
| |
(O) |
/|\ |
/ \ |
|
======'''
]
# 囊括所有神秘单词的字典
words = {
'Colors': 'red blue pink yellow green white gray black purple orange clear tan'.split(),
'Fruits': 'tomato orange banana berry mango pear cherry melon plum jackfrult grape'.split(),
'Animals': 'tiger deer lion sheep dog cat horse monkey snake frog fox pig ox duck chicken elephant'.split()
}
if __name__ == '__main__':
print('H A N G M A N')
missedLetters = '' # 玩家已经猜过的不属于神秘单词的字符串
correctLetters = '' # 玩家已经猜过的属于神秘单词的字符串
serectWord, wordKey = getRandomWord(words) # 获得随机的神秘单词
gameIsDone = False
while True:
disPlayGround(HANGMANPICS, missedLetters,
correctLetters, serectWord) # 显示游戏版
# 玩家输入猜测字母
guess = getGuess(missedLetters + correctLetters) # 玩家输入过的字母构成的字符串
# 判断字母是否属于神秘单词中
if guess in serectWord: # 如果属于
correctLetters = correctLetters + guess
# 判断玩家是否获胜
foundAllLetters = True
for i in range(len(serectWord)):
if serectWord[i] not in correctLetters:
foundAllLetters = False
break
if foundAllLetters:
print("Yes! The secret word is " +
serectWord + "! You have won!")
gameIsDone = True
else:
missedLetters = missedLetters + guess
#
if len(missedLetters) == len(HANGMANPICS) - 1:
disPlayGround(HANGMANPICS, missedLetters,
correctLetters, serectWord)
print("\nYou have run out of guesses!\n " + "The secret word is " + serectWord + "\nAfter " + str(len(missedLetters)) + " missed guesses and " +
str(len(correctLetters)) + " correct guesses, the word was" + serectWord)
gameIsDone = True
if gameIsDone:
if playAgain():
missedLetters = ''
correctLetters = ''
gameIsDone = False
serectWord = getRandomWord(words)
else:
break
| 22.195
| 161
| 0.478937
|
'''
made by YeahKun in 2017-7-22 11:50:42
猜字谜游戏2.0
增加了提示,扩展了单词的种类和数量
'''
import random
HANGMANPICS = [
'''
=====
+---+
| |
|
|
|
|
=====''',
'''
=====
+---+
| |
O |
|
|
|
=====''',
'''
=====
+---+
| |
O |
| |
|
|
=====''',
'''
======
+---+
| |
O |
/| |
|
|
======''',
'''
======
+---+
| |
O |
/|\ |
|
|
======''',
'''
======
+---+
| |
O |
/|\ |
/ |
|
======''',
'''
======
+---+
| |
O |
/|\ |
/ \ |
|
======''',
'''
======
+---+
| |
(O |
/|\ |
/ \ |
|
======''',
'''
======
+---+
| |
(O) |
/|\ |
/ \ |
|
======'''
]
# 囊括所有神秘单词的字典
words = {
'Colors': 'red blue pink yellow green white gray black purple orange clear tan'.split(),
'Fruits': 'tomato orange banana berry mango pear cherry melon plum jackfrult grape'.split(),
'Animals': 'tiger deer lion sheep dog cat horse monkey snake frog fox pig ox duck chicken elephant'.split()
}
def getRandomWord(wordDict):
# 选择字典的其中一个值
wordKey = random.choice(list(wordDict.keys()))
letter = random.randint(0, len(wordDict[wordKey]) - 1)
return [wordDict[wordKey][letter],wordKey] # 不仅返回神秘单词,还要返回单词所属的类型,用于给玩家提示
def disPlayGround(HANGMANPICS, missedLetters, correctLetters, serectWord):
# 游戏显示板,用于展现游戏情况
print(HANGMANPICS[len(missedLetters)], end='\n')
print('Missed letters:', end='')
for letter in missedLetters:
print(letter, end=' ')
print() # 起到换行的作用,为了美观好看
blanks = '_' * len(serectWord)
for i in range(len(serectWord)):
# 用猜对的单词替代空白位置
if serectWord[i] in correctLetters:
blanks = blanks[:i] + serectWord[i] + blanks[i + 1:]
for letter in blanks:
# 展示神秘单词的全部内容
print(letter, end=' ')
def getGuess(alreadyGuessed):
# 确保玩家只输入一个字母
while True:
print('\n\ntips:', wordKey)
print('\nGuess a letter:')
guess = input()
if len(guess) != 1:
print("Please enter a single letter.")
elif guess in alreadyGuessed:
print("You have already guessed that letter,Choose again.")
elif guess.isalpha() == False:
print("Please enter a letter.")
else:
return guess
def playAgain():
# 如果玩家想继续玩,返回True,否则返回False
print('Do you want to play again?(yes or no)')
return input().lower().startswith('y')
if __name__ == '__main__':
print('H A N G M A N')
missedLetters = '' # 玩家已经猜过的不属于神秘单词的字符串
correctLetters = '' # 玩家已经猜过的属于神秘单词的字符串
serectWord, wordKey = getRandomWord(words) # 获得随机的神秘单词
gameIsDone = False
while True:
disPlayGround(HANGMANPICS, missedLetters,
correctLetters, serectWord) # 显示游戏版
# 玩家输入猜测字母
guess = getGuess(missedLetters + correctLetters) # 玩家输入过的字母构成的字符串
# 判断字母是否属于神秘单词中
if guess in serectWord: # 如果属于
correctLetters = correctLetters + guess
# 判断玩家是否获胜
foundAllLetters = True
for i in range(len(serectWord)):
if serectWord[i] not in correctLetters:
foundAllLetters = False
break
if foundAllLetters:
print("Yes! The secret word is " +
serectWord + "! You have won!")
gameIsDone = True
else:
missedLetters = missedLetters + guess
#
if len(missedLetters) == len(HANGMANPICS) - 1:
disPlayGround(HANGMANPICS, missedLetters,
correctLetters, serectWord)
print("\nYou have run out of guesses!\n " + "The secret word is " + serectWord + "\nAfter " + str(len(missedLetters)) + " missed guesses and " +
str(len(correctLetters)) + " correct guesses, the word was" + serectWord)
gameIsDone = True
if gameIsDone:
if playAgain():
missedLetters = ''
correctLetters = ''
gameIsDone = False
serectWord = getRandomWord(words)
else:
break
| 1,603
| 0
| 99
|
ed5cb3b2ba71d67a9a4146c3d8fa1453b913f27b
| 13,743
|
py
|
Python
|
dt-learn.py
|
atasipanda/Decision-Trees
|
6719d5d4ff282b8858f0bf8222af6660a3c113d2
|
[
"MIT"
] | null | null | null |
dt-learn.py
|
atasipanda/Decision-Trees
|
6719d5d4ff282b8858f0bf8222af6660a3c113d2
|
[
"MIT"
] | null | null | null |
dt-learn.py
|
atasipanda/Decision-Trees
|
6719d5d4ff282b8858f0bf8222af6660a3c113d2
|
[
"MIT"
] | null | null | null |
import sys
import math
import re
from collections import OrderedDict
import random
import numpy
import matplotlib.pyplot as plt
features = OrderedDict({})
trainingData = []
testFeatures = OrderedDict({})
testData = []
class_attribute = []
# returns
# feature index of the best feature
# information gain by that feature in data
# ----------------------------------------------------------------------------------------
# feature = None # slope
# parent = None # anotehr node
# children = [] # some nodes
# feature_values = [] # [up, down, flat] # TODO think this way or store conditions
# class_type = None # negative or positive
# negPosCount = None # number of +ves and -ves at this node
# head = None # head node for the tree
# def add_node(self, node_to_add, nodes_parent):
# nodes_parent.
# eg: (56,'male','atyp_angina',120,236,'f','normal',178,'no',0.8,'up',0,'normal') => (negative)
def get_class_type(data, features=None):
'''
gets the class type if this is a stopping phase of data
:param data:
:param features:
:return: positive or negative class if this is a stopping phase, else None
'''
posNegCounts = findPosNegCounts(data)
if posNegCounts[0] == 0:
return 'positive'
elif posNegCounts[1] == 0:
return 'negative'
return None
if __name__ == '__main__':
main()
| 34.27182
| 150
| 0.594266
|
import sys
import math
import re
from collections import OrderedDict
import random
import numpy
import matplotlib.pyplot as plt
features = OrderedDict({})
trainingData = []
testFeatures = OrderedDict({})
testData = []
class_attribute = []
def parse_file(filename):
global lines, f, lines_read, l, m, fvalues, values, index, value
data = []
lines = None
with open((filename), 'r')as f:
lines = f.read().splitlines()
lines_read = 1 # @relation line ignored
for l in lines[lines_read:]:
m = re.search(r'@attribute', l)
if m:
# @attribute 'age' real
m = re.search(r'@attribute \'([a-zA-Z0-9]+)\' {\s*(.*)\s*}', l)
if m:
# @attribute 'sex' { female, male}
features[m.group(1)] = m.group(2).replace(' ', "").split(',')
else:
m = re.search(r'@attribute \'([a-zA-Z0-9]+)\'', l)
features[m.group(1)] = 'real'
else:
break # assumption, all attributes are declared in order at the beginning of the file
lines_read += len(features)
lines_read += 1 # @data line ignored
print features
fvalues = features.values()
for l in lines[lines_read:]:
# 63,male,typ_angina,145,233,t,left_vent_hyper,150,no,2.3,down,0,fixed_defect,negative
values = l.split(',')
for index, value in enumerate(values):
if fvalues[index] == 'real':
values[index] = float(value)
data.append(values)
class_attribute = features[features.keys()[-1]]
del features[features.keys()[-1]] # class attribute ignored; assuming it is the last element
return features, data, class_attribute
def splitData(data, element, feature_index):
elementInstances = []
for d in data:
if d[feature_index] == element:
elementInstances.append(d)
return elementInstances
def findPosNegCounts(data):
negativeCount = 0
positiveCount = 0
for d in data:
if d[-1] == 'negative':
negativeCount = negativeCount + 1
else:
positiveCount = positiveCount + 1
return negativeCount, positiveCount
def findEntropy(data):
posNegCounts = findPosNegCounts(data)
totalCount = len(data)
if totalCount == 0:
return 0
# # TODO: validate this assumption
if posNegCounts[0] * posNegCounts[1] == 0:
return 0
negFraction = (posNegCounts[0] / float(totalCount)) or 1
posFraction = (posNegCounts[1] / float(totalCount)) or 1
return -1 * ((posFraction * (math.log(posFraction, 2))) + (negFraction * (math.log(negFraction, 2))))
def findInfoGainForThreshold(data, Left, Right):
parentEntropy = findEntropy(data)
parentTotal = len(data)
LTotal = len(Left)
RTotal = len(Right)
return parentEntropy - (
((LTotal / float(parentTotal)) * findEntropy(Left)) + ((RTotal / float(parentTotal)) * findEntropy(Right)))
def findThreshold(data, position):
keyList = []
infogains = []
for d in data:
if d[position] not in keyList:
keyList.append(d[position])
i = 0
keyList = sorted(keyList)
if len(keyList) == 1:
return (keyList[0], 0)
while i < (len(keyList) - 1):
threshold = (keyList[i] + keyList[i + 1]) / 2.0
L = []
R = []
for d in data:
if d[position] <= threshold:
L.append(d)
else:
R.append(d)
i = i + 1
infoTuple = (threshold, findInfoGainForThreshold(data, L, R))
infogains.append(infoTuple)
return sorted(infogains, key=lambda x: x[1], reverse=True)[0]
def numericSplit(data, feature_index):
threshold = findThreshold(data, feature_index)[0]
leftArray = []
rightArray = []
for d in data:
# print "Culprit-------------"
# print 'Threshold', threshold
# print 'feature_values',float(d[feature_index])
# print 'Is',d[feature_index],'>',threshold,':',(float(d[feature_index])>threshold)
if d[feature_index] > threshold:
rightArray.append(d)
else:
leftArray.append(d)
return leftArray, rightArray
def findInformationGain(data, feature, feature_index):
parentEntropy = findEntropy(data)
# print 'Parent Entropy %f' % parentEntropy
parentTotal = len(data)
if features[feature] == 'real':
# print 'Feature is numerical'
L, R = numericSplit(data, feature_index)
LTotal = len(L)
RTotal = len(R)
return parentEntropy - (
((LTotal / float(parentTotal)) * findEntropy(L)) + ((RTotal / float(parentTotal)) * findEntropy(R)))
else:
# print 'feature is nominal'
s = 0
for element in features[feature]:
matchingData = splitData(data, element, feature_index)
# if len(matchingData) == 0:
# continue
subArrayTotal = len(matchingData)
s = s + ((subArrayTotal / float(parentTotal)) * findEntropy(matchingData))
return parentEntropy - s
# returns
# feature index of the best feature
# information gain by that feature in data
def findBestCandidate(data, features):
i = 0
infoGains = []
for f in features.keys():
# print "\n\n**Feature", f, "\nIndex", i
infoTuple = (i, findInformationGain(data, f, i))
infoGains.append(infoTuple)
i = i + 1
sortedList = sorted(infoGains, key=lambda x: x[0])
return sorted(sortedList, key=lambda x: x[1], reverse=True)[0]
# ----------------------------------------------------------------------------------------
def predict_class(head, instance):
# if head.class_type is not None:
# return head.class_type
if head.feature is None:
return head.class_type # decision tree prediction
keyList = features.keys()
feature_value = instance[keyList.index(head.feature)]
if features[head.feature] == 'real':
if feature_value <= head.children[0].feature_values:
return predict_class(head.children[0], instance)
else:
return predict_class(head.children[1], instance)
else:
matching_child = None
for c in head.children:
if c.feature_values == feature_value:
matching_child = c
break
return predict_class(matching_child, instance)
class Node:
# feature = None # slope
# parent = None # anotehr node
# children = [] # some nodes
# feature_values = [] # [up, down, flat] # TODO think this way or store conditions
# class_type = None # negative or positive
# negPosCount = None # number of +ves and -ves at this node
def __init__(self, feature=None, parent=None, children=[], feature_values=[], class_type=None,
negPosCount=None):
self.feature = feature
self.parent = parent
self.children = []
self.feature_values = feature_values
self.class_type = class_type
self.negPosCount = negPosCount
def add_child(self, node):
self.children.append(node)
class Tree:
# head = None # head node for the tree
def __init__(self):
self.head = Node()
# def add_node(self, node_to_add, nodes_parent):
# nodes_parent.
def createTree(self, data, head, depth, n):
s = get_class_type(data)
if s:
# base case
head.class_type = s # positive or negative
print ':', head.class_type
return head
if len(data) < n: # TODO this should check if it is less than n and should not be hardcoded
head.class_type = 'negative'
return head
best_feature_index, info_gain = findBestCandidate(data, features)
if info_gain == 0:
negPosCount = findPosNegCounts(data)
if negPosCount[0] < negPosCount[1]:
head.class_type = class_attribute[1]
else:
head.class_type = class_attribute[0]
print ':', head.class_type
return head
else:
print
f = features.keys()[best_feature_index]
head.feature = f
if features[f] == 'real':
for i, filteredData in enumerate(numericSplit(data, best_feature_index)):
child = Node(negPosCount=findPosNegCounts(filteredData))
child.feature_values = findThreshold(data, best_feature_index)[0]
for x in range(0, depth): print '|\t',
if i == 0:
print head.feature, '<=', findThreshold(data, best_feature_index)[0], '[', child.negPosCount[
0], \
child.negPosCount[1], ']',
else:
print head.feature, '>', findThreshold(data, best_feature_index)[0], '[', child.negPosCount[0], \
child.negPosCount[1], ']',
remainingfeatures = features.copy()
del remainingfeatures[f]
head.children.append(child)
self.createTree(filteredData, child, depth + 1, n)
else:
for element in features[f]:
filteredData = splitData(data, element, best_feature_index)
child = Node(negPosCount=findPosNegCounts(filteredData))
child.feature_values = element
for i in range(0, depth): print '|\t',
print head.feature, '=', element, '[', child.negPosCount[0], child.negPosCount[1], ']',
remainingfeatures = features.copy()
del remainingfeatures[f]
head.children.append(child)
self.createTree(filteredData, child, depth + 1, n)
return head
# eg: (56,'male','atyp_angina',120,236,'f','normal',178,'no',0.8,'up',0,'normal') => (negative)
def find_class(data):
# traverses the tree and finds the class
pass
def get_class_type(data, features=None):
'''
gets the class type if this is a stopping phase of data
:param data:
:param features:
:return: positive or negative class if this is a stopping phase, else None
'''
posNegCounts = findPosNegCounts(data)
if posNegCounts[0] == 0:
return 'positive'
elif posNegCounts[1] == 0:
return 'negative'
return None
def main():
argumentList=(sys.argv)
n = int(argumentList[3])
trainingFileName = argumentList[1]
testFileName = argumentList[2]
features,trainingData,class_attribute=parse_file(trainingFileName)
testFeatures,testData,class_attribute=parse_file(testFileName)
t = Tree()
t.createTree(trainingData, t.head, 0, n)
i = 1
sum = 0
print '<Predictions for the Test Set Instances>'
for d in testData:
prediction = predict_class(t.head,d)
realClass = d[-1]
print i,': Actual: ', realClass,' Predicted: ',prediction
if prediction==realClass:
sum = sum+1
print 'Number of correctly classified: ',sum,'Total number of test instances: ',len(testData)
#------------------------------Code for plotting various graphs based on data size-accuracy and tree size-accuracy-------------------------
#newDataSetSizeList = [int(.05*len(trainingData)),int(.1*len(trainingData)),int(.2*len(trainingData)),int(.5*len(trainingData)),len(trainingData)]
#plotForSubset(newDataSetSizeList,trainingData,testData)
'''
mvalues = [2,5,10,20]
accuracyList=[]
for m in mvalues:
t = Tree()
t.createTree(trainingData, t.head, 0, m)
sum = 0
for d in testData:
prediction = predict_class(t.head,d)
realClass = d[-1]
#print i,': Actual: ', realClass,' Predicted: ',prediction
if prediction==realClass:
sum = sum+1
accuracy = sum/float(len(testData))
accuracyList.append(accuracy)
plt.plot(mvalues,accuracyList,'r')
plt.axis([0, 25, 0, 1])
plt.xlabel('Tree Size')
plt.ylabel('Accuracy')
plt.show()
'''
def plotForSubset(newDataSetSizeList,trainingData,testData):
xvalues=[]
ymaxvalues=[]
yminvalues=[]
ymeanvalues=[]
for newDataSetSize in newDataSetSizeList:
accuracyList =get_accuracyList(trainingData,newDataSetSize,testData)
max = sorted(accuracyList)[-1]
min = sorted(accuracyList)[0]
mean = numpy.mean(accuracyList)
xvalues.append(newDataSetSize)
ymaxvalues.append(max)
yminvalues.append(min)
ymeanvalues.append(mean)
plt.plot(xvalues,ymaxvalues,'r')
plt.plot(xvalues,yminvalues,'b')
plt.plot(xvalues,ymeanvalues,'g')
plt.axis([0, len(trainingData), 0, 1])
plt.xlabel('Data Size')
plt.ylabel('Accuracy')
plt.show()
def get_accuracyList(trainingData,newDataSetSize,testData):
j=0
accuracyList = []
while j<10:
t = Tree()
t.createTree(get_subset(trainingData,newDataSetSize), t.head, 0, 4)
i = 1
sum = 0
for d in testData:
prediction = predict_class(t.head,d)
realClass = d[-1]
#print i,': Actual: ', realClass,' Predicted: ',prediction
if prediction==realClass:
sum = sum+1
accuracy = sum/float(len(testData))
accuracyList.append(accuracy)
j=j+1
return accuracyList
def get_subset(trainingData,newDatasetSize):
rand_smpl = []
rand_smpl_indices = random.sample(xrange(len(trainingData)), newDatasetSize)
for i in rand_smpl_indices:
rand_smpl.append(trainingData[i])
#print rand_smpl
return rand_smpl
if __name__ == '__main__':
main()
| 11,868
| -20
| 499
|
787f01363f8f53606c734d695875b0fc99b9907f
| 392
|
py
|
Python
|
code/class.py
|
spartam/basic_python_course
|
5e940c2363ba01880386244ad17692b4593611f8
|
[
"MIT"
] | null | null | null |
code/class.py
|
spartam/basic_python_course
|
5e940c2363ba01880386244ad17692b4593611f8
|
[
"MIT"
] | null | null | null |
code/class.py
|
spartam/basic_python_course
|
5e940c2363ba01880386244ad17692b4593611f8
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
P = coordinate()
print(P)
P = coordinate(x=2)
print(P)
P = coordinate(y=3)
print(P)
P = coordinate(x=5, y=7)
print(P)
| 14.518519
| 42
| 0.57398
|
class coordinate:
x = 0
y = 0
def __init__(self, **kwargs):
keys = kwargs.keys()
if 'x' in keys:
self.x = kwargs['x']
if 'y' in keys:
self.y = kwargs['y']
def __str__(self):
return 'x : %s\ty: %s' %(self.x, self.y)
if __name__ == '__main__':
P = coordinate()
print(P)
P = coordinate(x=2)
print(P)
P = coordinate(y=3)
print(P)
P = coordinate(x=5, y=7)
print(P)
| 155
| 58
| 23
|
3d084a7fbfa072a06df92b18c57c17c769087e2c
| 623
|
py
|
Python
|
ClickKaleidoscope.py
|
avinja/python-samples
|
0f4f94dbc1a886926ad31719a4f34b21c56f2056
|
[
"Apache-2.0"
] | null | null | null |
ClickKaleidoscope.py
|
avinja/python-samples
|
0f4f94dbc1a886926ad31719a4f34b21c56f2056
|
[
"Apache-2.0"
] | null | null | null |
ClickKaleidoscope.py
|
avinja/python-samples
|
0f4f94dbc1a886926ad31719a4f34b21c56f2056
|
[
"Apache-2.0"
] | null | null | null |
#ClickKaleidoscope.py
import random
import turtle
t = turtle.Pen()
t.speed(0)
t.hideturtle()
turtle.bgcolor("black")
colors = ["red", "yellow", "blue", "green", "orange", "purple",
"white", "gray"]
turtle.onscreenclick(draw_kaleido)
| 23.074074
| 63
| 0.598716
|
#ClickKaleidoscope.py
import random
import turtle
t = turtle.Pen()
t.speed(0)
t.hideturtle()
turtle.bgcolor("black")
colors = ["red", "yellow", "blue", "green", "orange", "purple",
"white", "gray"]
def draw_kaleido(x,y):
t.pencolor(random.choice(colors))
size = random.randint(10,40)
draw_spiral(x,y, size)
draw_spiral(-x,y, size)
draw_spiral(-x,-y, size)
draw_spiral(x,-y, size)
def draw_spiral(x,y, size):
t.penup()
t.setpos(x,y)
t.pendown()
for m in range(size):
t.forward(m*2)
t.left(92)
turtle.onscreenclick(draw_kaleido)
| 334
| 0
| 44
|
0823e3f1378a191c96f2c6e83021f577431436d6
| 812
|
py
|
Python
|
tests/submodules/run.py
|
jonnyrocks/pyangbind
|
7a7c6df6ddad7cbec941800431840253b5e2f186
|
[
"Apache-2.0"
] | 176
|
2015-06-17T15:44:07.000Z
|
2022-03-18T01:16:19.000Z
|
tests/submodules/run.py
|
jonnyrocks/pyangbind
|
7a7c6df6ddad7cbec941800431840253b5e2f186
|
[
"Apache-2.0"
] | 245
|
2015-05-29T07:04:13.000Z
|
2022-03-25T14:44:37.000Z
|
tests/submodules/run.py
|
jonnyrocks/pyangbind
|
7a7c6df6ddad7cbec941800431840253b5e2f186
|
[
"Apache-2.0"
] | 118
|
2015-07-02T07:04:36.000Z
|
2022-03-31T20:32:38.000Z
|
#!/usr/bin/env python
import unittest
from tests.base import PyangBindTestCase
if __name__ == "__main__":
unittest.main()
| 23.2
| 55
| 0.644089
|
#!/usr/bin/env python
import unittest
from tests.base import PyangBindTestCase
class PyangbindSubmoduleTests(PyangBindTestCase):
yang_files = ["mod-a.yang"]
pyang_flags = ["--use-extmethods"]
def setUp(self):
self.mod_a = self.bindings.mod_a()
def test_001_check_correct_import(self):
self.assertTrue(hasattr(self.mod_a, "a"))
self.assertTrue(hasattr(self.mod_a.a, "b"))
def test_002_identity_in_submodule(self):
self.assertTrue(hasattr(self.mod_a, "q"))
self.assertTrue(hasattr(self.mod_a.q, "idref"))
def test_assign_idref(self):
passed = True
try:
self.mod_a.q.idref = "j"
except ValueError:
passed = False
self.assertTrue(passed)
if __name__ == "__main__":
unittest.main()
| 451
| 207
| 23
|
c33950bf8d2032710832c6024b8e90c72bb403b7
| 5,340
|
py
|
Python
|
src/eduid_userdb/support/models.py
|
SUNET/eduid-userdb
|
5970880caf0b0e2bdee6c23869ef287acc87af2a
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/eduid_userdb/support/models.py
|
SUNET/eduid-userdb
|
5970880caf0b0e2bdee6c23869ef287acc87af2a
|
[
"BSD-2-Clause-FreeBSD"
] | 12
|
2015-08-28T12:05:32.000Z
|
2020-06-23T13:31:29.000Z
|
src/eduid_userdb/support/models.py
|
SUNET/eduid-userdb
|
5970880caf0b0e2bdee6c23869ef287acc87af2a
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2016-10-24T06:37:33.000Z
|
2016-11-21T11:39:39.000Z
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from typing import List, Optional
__author__ = 'lundberg'
# Models for filtering out unneeded or unwanted data from eduID database objects
| 28.55615
| 113
| 0.63633
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from typing import List, Optional
__author__ = 'lundberg'
# Models for filtering out unneeded or unwanted data from eduID database objects
class GenericFilterDict(dict):
add_keys: Optional[List[str]] = None
remove_keys: Optional[List[str]] = None
def __init__(self, data):
"""
Create a filtered dict with white- or blacklisting of keys
:param data: Data to filter
:type data: dict
"""
_data = deepcopy(data)
super(GenericFilterDict, self).__init__()
if _data is None:
pass
elif self.add_keys:
for key in self.add_keys:
try:
self[key] = _data[key]
except KeyError:
pass
elif self.remove_keys:
for key in self.remove_keys:
_data.pop(key, None)
self.update(_data)
else:
self.update(_data)
class SupportUserFilter(GenericFilterDict):
remove_keys = ['_id', 'letter_proofing_data']
def __init__(self, data):
_data = deepcopy(data)
super(SupportUserFilter, self).__init__(_data)
self['mailAliases'] = [MailAlias(alias) for alias in self['mailAliases']]
self['passwords'] = [Credential(password) for password in self['passwords']]
self['tou'] = [ToU(tou) for tou in self['tou']]
class SupportSignupUserFilter(GenericFilterDict):
remove_keys = ['_id', 'letter_proofing_data']
def __init__(self, data):
_data = deepcopy(data)
super(SupportSignupUserFilter, self).__init__(_data)
self['mailAliases'] = [MailAlias(alias) for alias in self['mailAliases']]
self['passwords'] = [Credential(password) for password in self['passwords']]
self['tou'] = [ToU(tou) for tou in self['tou']]
self['pending_mail_address'] = PendingMailAddress(self.get('pending_mail_address'))
class MailAlias(GenericFilterDict):
remove_keys = ['verification_code']
class PendingMailAddress(MailAlias):
pass
class Credential(GenericFilterDict):
add_keys = ['_id', 'created_by', 'created_ts', 'type', 'success_ts']
def __init__(self, data):
_data = deepcopy(data)
# Figure out type of credential
if 'salt' in _data:
_data['type'] = 'Password'
elif 'keyhandle' in _data:
_data['type'] = 'U2F'
super(Credential, self).__init__(_data)
class ToU(GenericFilterDict):
remove_keys = ['id']
class UserAuthnInfo(GenericFilterDict):
add_keys = ['success_ts', 'fail_count', 'success_count']
def __init__(self, data):
_data = deepcopy(data)
# Remove months with 0 failures or successes
for attrib in ['fail_count', 'success_count']:
for key, value in data.get(attrib, {}).items():
if value == 0:
del _data[attrib][key]
super(UserAuthnInfo, self).__init__(_data)
class UserVerifications(GenericFilterDict):
add_keys = ['verified', 'obj_id', 'timestamp', 'model_name', 'verified_timestamp']
class UserActions(GenericFilterDict):
add_keys = ['action', 'params']
class ProofingLogEntry(GenericFilterDict):
add_keys = ['verified_data', 'created_ts', 'proofing_method', 'proofing_version', 'created_by', 'vetting_by']
def __init__(self, data):
_data = deepcopy(data)
# Rename the verified data key to verified_data
verified_data_names = ['nin', 'mail_address', 'phone_number', 'orcid']
for name in verified_data_names:
if name in _data:
_data['verified_data'] = _data[name]
super(ProofingLogEntry, self).__init__(_data)
class UserLetterProofing(GenericFilterDict):
add_keys = ['nin', 'proofing_letter']
class Nin(GenericFilterDict):
add_keys = ['created_ts', 'number']
class ProofingLetter(GenericFilterDict):
add_keys = ['sent_ts', 'is_sent', 'address']
def __init__(self, data):
_data = deepcopy(data)
super(UserLetterProofing, self).__init__(_data)
self['nin'] = self.Nin(self['nin'])
self['proofing_letter'] = self.ProofingLetter(self['proofing_letter'])
class UserOidcProofing(GenericFilterDict):
add_keys = ['nin', 'modified_ts', 'state']
class Nin(GenericFilterDict):
add_keys = ['created_ts', 'number']
def __init__(self, data):
_data = deepcopy(data)
super(UserOidcProofing, self).__init__(_data)
self['nin'] = self.Nin(self['nin'])
class UserEmailProofing(GenericFilterDict):
add_keys = ['verification', 'modified_ts']
class Verification(GenericFilterDict):
add_keys = ['created_ts', 'email']
def __init__(self, data):
_data = deepcopy(data)
super(UserEmailProofing, self).__init__(_data)
self['verification'] = self.Verification(self['verification'])
class UserPhoneProofing(GenericFilterDict):
add_keys = ['verification', 'modified_ts']
class Verification(GenericFilterDict):
add_keys = ['created_ts', 'number']
def __init__(self, data):
_data = deepcopy(data)
super(UserPhoneProofing, self).__init__(_data)
self['verification'] = self.Verification(self['verification'])
| 2,330
| 2,460
| 344
|
7996acf87911f57f60db3c91ce56015c83a551c3
| 4,414
|
py
|
Python
|
skspec/pandas_utils/dataframeserial.py
|
hugadams/scikit-spectra
|
c451be6d54080fbcc2a3bc5daf8846b83b7343ee
|
[
"BSD-3-Clause"
] | 83
|
2015-01-15T18:57:22.000Z
|
2022-01-18T11:43:55.000Z
|
skspec/pandas_utils/dataframeserial.py
|
hugadams/scikit-spectra
|
c451be6d54080fbcc2a3bc5daf8846b83b7343ee
|
[
"BSD-3-Clause"
] | 18
|
2015-02-02T22:46:51.000Z
|
2019-04-29T17:23:32.000Z
|
skspec/pandas_utils/dataframeserial.py
|
hugadams/scikit-spectra
|
c451be6d54080fbcc2a3bc5daf8846b83b7343ee
|
[
"BSD-3-Clause"
] | 43
|
2015-01-02T20:47:11.000Z
|
2021-12-18T16:14:40.000Z
|
''' Serialization interface for custom DataFrame objects. Allows to save/load
for memory streams or files. Because one cannot serialize DataFrames with
custom attributes, this uses an intermediate object for that process. Plan
it implement pickling saved methods later (requires more work). These are meant to
supplant the DataFrame's save() and load() methods when custom attributes must persist.
Note, this program assesses custom attributes by inspecting your DataFrame's
attributes using Python's builting function, dir(). It compares these to the
attributes of an empty DataFrame. This adds a bit of overhead, but should allow
this program to work with new versions of pandas, as Dataframe's methods and attributes
are likely to change. Is there are better way to do this?
The following four functions are defined:
df_dumps: Serialize a DataFrame into memory. Returns serialized stream.
df_dump: Serialize a DataFrame into a file. Returns None.
df_loads: Return a DataFrame from a serialized stream.
df_load: Return a Dataframe from a serialized file.
See bottom of file for test cases: '''
__author__ = "Adam Hughes"
__maintainer__ = "Adam Hughes"
__email__ = "hugadams@gwmail.gwu.edu"
__status__ = "Prototype"
import cPickle
from operator import attrgetter
from pandas import DataFrame
### For testing ###
from numpy.random import randn
class TempDump(object):
''' Temporary class to dump DataFrame object with custom attributes. Custom attrubutes are
passed in as a dictionary and then temporarily stored upon serialization as _metadict. Upon
deserialization, the attributes and values are re-appended to the DataFrame automatically.'''
dfempty=DataFrame()
defattrs=dir(dfempty)
def print_customattr(df):
'''Formatted output of all custom attributes found in a DataFrame. For all
attributes and methods, use dir(df).'''
metadict=_get_metadict(df)
if len(metadict) > 0:
print '\nFound %s custom attributes:\n'%len(metadict)
print '\n'.join([(k+'\t'+v) for k,v in sorted(metadict.items())])
else:
print 'No custom attributes found'
def _get_metadict(df):
''' Returns dictionary of attributes in a dataframe not found in the default frame.'''
attrs=dir(df)
newattr=[att for att in attrs if att not in defattrs] #if not is type(instancemethod?)
if len(newattr) > 1:
fget=attrgetter(*newattr)
return dict(zip(newattr, fget(df)))
else:
return {}
def df_dumps(df):
''' Save dataframe as a stream into memory.'''
metadict=_get_metadict(df)
return cPickle.dumps(TempDump(df, metadict )) #Dumps writes the object to memory
def df_dump(df, outfile):
''' Save dataframe as a file.'''
outstream=df_dumps(df) #Dumps writes the object to memory
f=open(outfile, 'w') #Should this be 'wb'
f.write(outstream)
f.close()
return None #Should I return none or stream?
def df_load(infile):
'''Returns dataframe from a serialized file '''
f=open(infile, 'r')
tempobj=cPickle.load(f)
f.close()
df=tempobj.dataframe
for attr, value in tempobj._metadict.items():
setattr(df, attr, value)
return df
def df_loads(stream):
''' Returns dataframe from a serialized stream'''
tempobj=cPickle.loads(stream) #loads not load
df=tempobj.dataframe
for attr, value in tempobj._metadict.items():
setattr(df, attr, value)
return df
if __name__ == '__main__':
### Make a random dataframe, add some attributes
df=DataFrame(((randn(3,3))), columns=['a','b','c'])
print_customattr(df)
print 'adding some attributes'
df.name='Billy'
df.junk='in the trunk'
print_customattr(df)
### Serialize into memory
stream=df_dumps(df)
print 'wrote dataframe to memory'
### Restore from memory
dfnew=df_loads(stream)
print 'restored from memory'
print_customattr(dfnew)
### Serialize into file
outfile='dftest.df' #What file extension is commonly used for this?
df_dump(df, outfile)
print 'wrote dataframe to file %s'%outfile
### Restore from file
dfnewnew=df_load(outfile)
print 'Restored from file%s'%outfile
print_customattr(dfnewnew)
| 35.031746
| 97
| 0.698006
|
''' Serialization interface for custom DataFrame objects. Allows to save/load
for memory streams or files. Because one cannot serialize DataFrames with
custom attributes, this uses an intermediate object for that process. Plan
it implement pickling saved methods later (requires more work). These are meant to
supplant the DataFrame's save() and load() methods when custom attributes must persist.
Note, this program assesses custom attributes by inspecting your DataFrame's
attributes using Python's builting function, dir(). It compares these to the
attributes of an empty DataFrame. This adds a bit of overhead, but should allow
this program to work with new versions of pandas, as Dataframe's methods and attributes
are likely to change. Is there are better way to do this?
The following four functions are defined:
df_dumps: Serialize a DataFrame into memory. Returns serialized stream.
df_dump: Serialize a DataFrame into a file. Returns None.
df_loads: Return a DataFrame from a serialized stream.
df_load: Return a Dataframe from a serialized file.
See bottom of file for test cases: '''
__author__ = "Adam Hughes"
__maintainer__ = "Adam Hughes"
__email__ = "hugadams@gwmail.gwu.edu"
__status__ = "Prototype"
import cPickle
from operator import attrgetter
from pandas import DataFrame
### For testing ###
from numpy.random import randn
class TempDump(object):
''' Temporary class to dump DataFrame object with custom attributes. Custom attrubutes are
passed in as a dictionary and then temporarily stored upon serialization as _metadict. Upon
deserialization, the attributes and values are re-appended to the DataFrame automatically.'''
def __init__(self, dataframe, metadict):
self.dataframe=dataframe
self._metadict=metadict
dfempty=DataFrame()
defattrs=dir(dfempty)
def print_customattr(df):
'''Formatted output of all custom attributes found in a DataFrame. For all
attributes and methods, use dir(df).'''
metadict=_get_metadict(df)
if len(metadict) > 0:
print '\nFound %s custom attributes:\n'%len(metadict)
print '\n'.join([(k+'\t'+v) for k,v in sorted(metadict.items())])
else:
print 'No custom attributes found'
def _get_metadict(df):
''' Returns dictionary of attributes in a dataframe not found in the default frame.'''
attrs=dir(df)
newattr=[att for att in attrs if att not in defattrs] #if not is type(instancemethod?)
if len(newattr) > 1:
fget=attrgetter(*newattr)
return dict(zip(newattr, fget(df)))
else:
return {}
def df_dumps(df):
''' Save dataframe as a stream into memory.'''
metadict=_get_metadict(df)
return cPickle.dumps(TempDump(df, metadict )) #Dumps writes the object to memory
def df_dump(df, outfile):
''' Save dataframe as a file.'''
outstream=df_dumps(df) #Dumps writes the object to memory
f=open(outfile, 'w') #Should this be 'wb'
f.write(outstream)
f.close()
return None #Should I return none or stream?
def df_load(infile):
'''Returns dataframe from a serialized file '''
f=open(infile, 'r')
tempobj=cPickle.load(f)
f.close()
df=tempobj.dataframe
for attr, value in tempobj._metadict.items():
setattr(df, attr, value)
return df
def df_loads(stream):
''' Returns dataframe from a serialized stream'''
tempobj=cPickle.loads(stream) #loads not load
df=tempobj.dataframe
for attr, value in tempobj._metadict.items():
setattr(df, attr, value)
return df
if __name__ == '__main__':
### Make a random dataframe, add some attributes
df=DataFrame(((randn(3,3))), columns=['a','b','c'])
print_customattr(df)
print 'adding some attributes'
df.name='Billy'
df.junk='in the trunk'
print_customattr(df)
### Serialize into memory
stream=df_dumps(df)
print 'wrote dataframe to memory'
### Restore from memory
dfnew=df_loads(stream)
print 'restored from memory'
print_customattr(dfnew)
### Serialize into file
outfile='dftest.df' #What file extension is commonly used for this?
df_dump(df, outfile)
print 'wrote dataframe to file %s'%outfile
### Restore from file
dfnewnew=df_load(outfile)
print 'Restored from file%s'%outfile
print_customattr(dfnewnew)
| 84
| 0
| 26
|
2de5ea2e9bf0051ad8a8662c57ae610be26ed5ad
| 378
|
py
|
Python
|
SSH.example.py
|
WikiCommunityHealth/wikimedia-user-metrics
|
1bf643a37d5066932dcad71a465d2c4b4a4a1a92
|
[
"MIT"
] | null | null | null |
SSH.example.py
|
WikiCommunityHealth/wikimedia-user-metrics
|
1bf643a37d5066932dcad71a465d2c4b4a4a1a92
|
[
"MIT"
] | null | null | null |
SSH.example.py
|
WikiCommunityHealth/wikimedia-user-metrics
|
1bf643a37d5066932dcad71a465d2c4b4a4a1a92
|
[
"MIT"
] | null | null | null |
import pymongo
from sshtunnel import SSHTunnelForwarder
| 27
| 79
| 0.65873
|
import pymongo
from sshtunnel import SSHTunnelForwarder
def get_mongo_client():
server = SSHTunnelForwarder(
('192.168.184.92',22),
ssh_username='<USER>',
ssh_password='<PWD>',
remote_bind_address=('127.0.0.1', 27017)
)
server.start()
client = pymongo.MongoClient(host='127.0.0.1', port=server.local_bind_port)
return client
| 300
| 0
| 23
|
6fb6af1bd89cf83860901987794468bea5e527d0
| 21,167
|
py
|
Python
|
src/omniglot/wrapper.py
|
rcmalli/warpgrad
|
d9ef72af10eec62ae92bc24595cb1a4a0207e319
|
[
"Apache-2.0"
] | 80
|
2020-02-18T09:55:10.000Z
|
2022-02-10T12:59:49.000Z
|
src/omniglot/wrapper.py
|
rcmalli/warpgrad
|
d9ef72af10eec62ae92bc24595cb1a4a0207e319
|
[
"Apache-2.0"
] | 5
|
2020-07-21T16:47:42.000Z
|
2021-05-31T06:19:36.000Z
|
src/omniglot/wrapper.py
|
rcmalli/warpgrad
|
d9ef72af10eec62ae92bc24595cb1a4a0207e319
|
[
"Apache-2.0"
] | 14
|
2020-02-18T13:19:48.000Z
|
2022-01-05T23:52:11.000Z
|
"""Meta-learners for Omniglot experiment.
Based on original implementation:
https://github.com/amzn/metalearn-leap
"""
import random
from abc import abstractmethod
from torch import nn
from torch import optim
import maml
import warpgrad
from leap import Leap
from leap.utils import clone_state_dict
from utils import Res, AggRes
class BaseWrapper(object):
"""Generic training wrapper.
Arguments:
criterion (func): loss criterion to use.
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
"""
@abstractmethod
def _partial_meta_update(self, loss, final):
"""Meta-model specific meta update rule.
Arguments:
loss (nn.Tensor): loss value for given mini-batch.
final (bool): whether iteration is the final training step.
"""
NotImplementedError('Implement in meta-learner class wrapper.')
@abstractmethod
def _final_meta_update(self):
"""Meta-model specific meta update rule."""
NotImplementedError('Implement in meta-learner class wrapper.')
def run_tasks(self, tasks, meta_train):
"""Train on a mini-batch tasks and evaluate test performance.
Arguments:
tasks (list, torch.utils.data.DataLoader): list of task-specific
dataloaders.
meta_train (bool): whether current run in during meta-training.
"""
results = []
for task in tasks:
task.dataset.train()
trainres = self.run_task(task, train=True, meta_train=meta_train)
task.dataset.eval()
valres = self.run_task(task, train=False, meta_train=False)
results.append((trainres, valres))
##
results = AggRes(results)
# Meta gradient step
if meta_train:
self._final_meta_update()
return results
def run_task(self, task, train, meta_train):
"""Run model on a given task.
Arguments:
task (torch.utils.data.DataLoader): task-specific dataloaders.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
optimizer = None
if train:
self.model.init_adaptation()
self.model.train()
optimizer = self.optimizer_cls(
self.model.parameters(), **self.optimizer_kwargs)
else:
self.model.eval()
return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)
def run_batches(self, batches, optimizer, train=False, meta_train=False):
"""Iterate over task-specific batches.
Arguments:
batches (torch.utils.data.DataLoader): task-specific dataloaders.
optimizer (torch.nn.optim): optimizer instance if training is True.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
device = next(self.model.parameters()).device
res = Res()
N = len(batches)
for n, (input, target) in enumerate(batches):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# Evaluate model
prediction = self.model(input)
loss = self.criterion(prediction, target)
res.log(loss=loss.item(), pred=prediction, target=target)
# TRAINING #
if not train:
continue
final = (n+1) == N
loss.backward()
if meta_train:
self._partial_meta_update(loss, final)
optimizer.step()
optimizer.zero_grad()
if final:
break
###
res.aggregate()
return res
class WarpGradWrapper(BaseWrapper):
"""Wrapper around WarpGrad meta-learners.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""
def run_task(self, task, train, meta_train):
"""Run model on a given task, first adapting and then evaluating"""
if meta_train and train:
# Register new task in buffer.
self.model.register_task(task)
self.model.collect()
else:
# Make sure we're not collecting non-meta-train data
self.model.no_collect()
optimizer = None
if train:
# Initialize model adaptation
self.model.init_adaptation()
optimizer = self.optimizer_cls(
self.model.optimizer_parameter_groups(),
**self.optimizer_kwargs)
if self.model.collecting and self.model.learn_optimizer:
# Register optimiser to collect potential momentum buffers
self.model.register_optimizer(optimizer)
else:
self.model.eval()
return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)
class LeapWrapper(BaseWrapper):
"""Wrapper around the Leap meta-learner.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""
class MAMLWrapper(object):
"""Wrapper around the MAML meta-learner.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
def run_meta_batch(self, meta_batch, meta_train):
"""Run on meta-batch.
Arguments:
meta_batch (list): list of task-specific dataloaders
meta_train (bool): meta-train on batch.
"""
loss, results = self.meta(meta_batch,
return_predictions=False,
return_results=True,
create_graph=meta_train)
if meta_train:
loss.backward()
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
return results
class NoWrapper(BaseWrapper):
"""Wrapper for baseline without any meta-learning.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""
class _FOWrapper(BaseWrapper):
"""Base wrapper for First-order MAML and Reptile.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = None
class ReptileWrapper(_FOWrapper):
"""Wrapper for Reptile.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = True
class FOMAMLWrapper(_FOWrapper):
"""Wrapper for FOMAML.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = False
class FtWrapper(BaseWrapper):
"""Wrapper for Multi-headed finetuning.
This wrapper differs from others in that it blends batches from all tasks
into a single epoch.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""
@staticmethod
def gen_multitask_batches(tasks, train):
"""Generates one batch iterator across all tasks."""
iterator_id = 0
all_batches = []
for task_id, iterator in tasks:
if train:
iterator.dataset.train()
else:
iterator.dataset.eval()
for batch in iterator:
all_batches.append((iterator_id, task_id, batch))
iterator_id += 1
if train:
random.shuffle(all_batches)
return all_batches
def run_multitask(self, batches, train):
"""Train on task in multi-task mode
This is equivalent to the run_task method but differs in that
batches are assumed to be mixed from different tasks.
"""
N = len(batches)
if train:
self.model.train()
else:
self.model.eval()
device = next(self.model.parameters()).device
res = {}
for n, (iterator_id, task_id, (input, target)) in enumerate(batches):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
prediction = self.model(input, task_id)
loss = self.criterion(prediction, target)
if iterator_id not in res:
res[iterator_id] = Res()
res[iterator_id].log(loss=loss.item(),
pred=prediction,
target=target)
# TRAINING #
if not train:
continue
final = (n + 1) == N
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if final:
break
###
res = [r[1] for r in sorted(res.items(), key=lambda r: r[0])]
for r in res:
r.aggregate()
return res
| 33.073438
| 79
| 0.58081
|
"""Meta-learners for Omniglot experiment.
Based on original implementation:
https://github.com/amzn/metalearn-leap
"""
import random
from abc import abstractmethod
from torch import nn
from torch import optim
import maml
import warpgrad
from leap import Leap
from leap.utils import clone_state_dict
from utils import Res, AggRes
class BaseWrapper(object):
"""Generic training wrapper.
Arguments:
criterion (func): loss criterion to use.
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
"""
def __init__(self, criterion, model, optimizer_cls, optimizer_kwargs):
self.criterion = criterion
self.model = model
self.optimizer_cls = \
optim.SGD if optimizer_cls.lower() == 'sgd' else optim.Adam
self.optimizer_kwargs = optimizer_kwargs
def __call__(self, tasks, meta_train=True):
return self.run_tasks(tasks, meta_train=meta_train)
@abstractmethod
def _partial_meta_update(self, loss, final):
"""Meta-model specific meta update rule.
Arguments:
loss (nn.Tensor): loss value for given mini-batch.
final (bool): whether iteration is the final training step.
"""
NotImplementedError('Implement in meta-learner class wrapper.')
@abstractmethod
def _final_meta_update(self):
"""Meta-model specific meta update rule."""
NotImplementedError('Implement in meta-learner class wrapper.')
def run_tasks(self, tasks, meta_train):
"""Train on a mini-batch tasks and evaluate test performance.
Arguments:
tasks (list, torch.utils.data.DataLoader): list of task-specific
dataloaders.
meta_train (bool): whether current run in during meta-training.
"""
results = []
for task in tasks:
task.dataset.train()
trainres = self.run_task(task, train=True, meta_train=meta_train)
task.dataset.eval()
valres = self.run_task(task, train=False, meta_train=False)
results.append((trainres, valres))
##
results = AggRes(results)
# Meta gradient step
if meta_train:
self._final_meta_update()
return results
def run_task(self, task, train, meta_train):
"""Run model on a given task.
Arguments:
task (torch.utils.data.DataLoader): task-specific dataloaders.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
optimizer = None
if train:
self.model.init_adaptation()
self.model.train()
optimizer = self.optimizer_cls(
self.model.parameters(), **self.optimizer_kwargs)
else:
self.model.eval()
return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)
def run_batches(self, batches, optimizer, train=False, meta_train=False):
"""Iterate over task-specific batches.
Arguments:
batches (torch.utils.data.DataLoader): task-specific dataloaders.
optimizer (torch.nn.optim): optimizer instance if training is True.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
device = next(self.model.parameters()).device
res = Res()
N = len(batches)
for n, (input, target) in enumerate(batches):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# Evaluate model
prediction = self.model(input)
loss = self.criterion(prediction, target)
res.log(loss=loss.item(), pred=prediction, target=target)
# TRAINING #
if not train:
continue
final = (n+1) == N
loss.backward()
if meta_train:
self._partial_meta_update(loss, final)
optimizer.step()
optimizer.zero_grad()
if final:
break
###
res.aggregate()
return res
class WarpGradWrapper(BaseWrapper):
"""Wrapper around WarpGrad meta-learners.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""
def __init__(self,
model,
optimizer_cls,
meta_optimizer_cls,
optimizer_kwargs,
meta_optimizer_kwargs,
meta_kwargs,
criterion):
replay_buffer = warpgrad.ReplayBuffer(
inmem=meta_kwargs.pop('inmem', True),
tmpdir=meta_kwargs.pop('tmpdir', None))
optimizer_parameters = warpgrad.OptimizerParameters(
trainable=meta_kwargs.pop('learn_opt', False),
default_lr=optimizer_kwargs['lr'],
default_momentum=optimizer_kwargs['momentum']
if 'momentum' in optimizer_kwargs else 0.)
updater = warpgrad.DualUpdater(criterion, **meta_kwargs)
model = warpgrad.Warp(model=model,
adapt_modules=list(model.adapt_modules()),
warp_modules=list(model.warp_modules()),
updater=updater,
buffer=replay_buffer,
optimizer_parameters=optimizer_parameters)
super(WarpGradWrapper, self).__init__(criterion,
model,
optimizer_cls,
optimizer_kwargs)
self.meta_optimizer_cls = optim.SGD \
if meta_optimizer_cls.lower() == 'sgd' else optim.Adam
lra = meta_optimizer_kwargs.pop(
'lr_adapt', meta_optimizer_kwargs['lr'])
lri = meta_optimizer_kwargs.pop(
'lr_init', meta_optimizer_kwargs['lr'])
lrl = meta_optimizer_kwargs.pop(
'lr_lr', meta_optimizer_kwargs['lr'])
self.meta_optimizer = self.meta_optimizer_cls(
[{'params': self.model.init_parameters(), 'lr': lri},
{'params': self.model.warp_parameters(), 'lr': lra},
{'params': self.model.optimizer_parameters(), 'lr': lrl}],
**meta_optimizer_kwargs)
def _partial_meta_update(self, loss, final):
pass
def _final_meta_update(self):
def step_fn():
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
self.model.backward(step_fn, **self.optimizer_kwargs)
def run_task(self, task, train, meta_train):
"""Run model on a given task, first adapting and then evaluating"""
if meta_train and train:
# Register new task in buffer.
self.model.register_task(task)
self.model.collect()
else:
# Make sure we're not collecting non-meta-train data
self.model.no_collect()
optimizer = None
if train:
# Initialize model adaptation
self.model.init_adaptation()
optimizer = self.optimizer_cls(
self.model.optimizer_parameter_groups(),
**self.optimizer_kwargs)
if self.model.collecting and self.model.learn_optimizer:
# Register optimiser to collect potential momentum buffers
self.model.register_optimizer(optimizer)
else:
self.model.eval()
return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)
class LeapWrapper(BaseWrapper):
"""Wrapper around the Leap meta-learner.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""
def __init__(self,
model,
optimizer_cls,
meta_optimizer_cls,
optimizer_kwargs,
meta_optimizer_kwargs,
meta_kwargs,
criterion):
super(LeapWrapper, self).__init__(criterion,
model,
optimizer_cls,
optimizer_kwargs)
self.meta = Leap(model, **meta_kwargs)
self.meta_optimizer_cls = \
optim.SGD if meta_optimizer_cls.lower() == 'sgd' else optim.Adam
self.meta_optimizer = self.meta_optimizer_cls(
self.meta.parameters(), **meta_optimizer_kwargs)
def _partial_meta_update(self, l, final):
self.meta.update(l, self.model)
def _final_meta_update(self):
self.meta.normalize()
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
def run_task(self, task, train, meta_train):
if meta_train:
self.meta.init_task()
if train:
self.meta.to(self.model)
return super(LeapWrapper, self).run_task(task, train, meta_train)
class MAMLWrapper(object):
"""Wrapper around the MAML meta-learner.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
def __init__(self, model, optimizer_cls, meta_optimizer_cls,
optimizer_kwargs, meta_optimizer_kwargs, criterion):
self.criterion = criterion
self.model = model
self.optimizer_cls = \
maml.SGD if optimizer_cls.lower() == 'sgd' else maml.Adam
self.meta = maml.MAML(optimizer_cls=self.optimizer_cls,
criterion=criterion,
model=model,
tensor=False,
**optimizer_kwargs)
self.meta_optimizer_cls = \
optim.SGD if meta_optimizer_cls.lower() == 'sgd' else optim.Adam
self.optimizer_kwargs = optimizer_kwargs
self.meta_optimizer = self.meta_optimizer_cls(self.meta.parameters(),
**meta_optimizer_kwargs)
def __call__(self, meta_batch, meta_train):
tasks = []
for t in meta_batch:
t.dataset.train()
inner = [b for b in t]
t.dataset.eval()
outer = [b for b in t]
tasks.append((inner, outer))
return self.run_meta_batch(tasks, meta_train=meta_train)
def run_meta_batch(self, meta_batch, meta_train):
"""Run on meta-batch.
Arguments:
meta_batch (list): list of task-specific dataloaders
meta_train (bool): meta-train on batch.
"""
loss, results = self.meta(meta_batch,
return_predictions=False,
return_results=True,
create_graph=meta_train)
if meta_train:
loss.backward()
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
return results
class NoWrapper(BaseWrapper):
"""Wrapper for baseline without any meta-learning.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""
def __init__(self, model, optimizer_cls, optimizer_kwargs, criterion):
super(NoWrapper, self).__init__(criterion,
model,
optimizer_cls,
optimizer_kwargs)
self._original = clone_state_dict(model.state_dict(keep_vars=True))
def __call__(self, tasks, meta_train=False):
return super(NoWrapper, self).__call__(tasks, meta_train=False)
def run_task(self, task, train, meta_train):
if train:
self.model.load_state_dict(self._original)
return super(NoWrapper, self).run_task(task, train, meta_train)
def _partial_meta_update(self, loss, final):
pass
def _final_meta_update(self):
pass
class _FOWrapper(BaseWrapper):
"""Base wrapper for First-order MAML and Reptile.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = None
def __init__(self, model, optimizer_cls, meta_optimizer_cls,
optimizer_kwargs, meta_optimizer_kwargs, criterion):
super(_FOWrapper, self).__init__(criterion,
model,
optimizer_cls,
optimizer_kwargs)
self.meta_optimizer_cls = \
optim.SGD if meta_optimizer_cls.lower() == 'sgd' else optim.Adam
self.meta_optimizer_kwargs = meta_optimizer_kwargs
self._counter = 0
self._updates = None
self._original = clone_state_dict(
self.model.state_dict(keep_vars=True))
params = [p for p in self._original.values()
if getattr(p, 'requires_grad', False)]
self.meta_optimizer = self.meta_optimizer_cls(params,
**meta_optimizer_kwargs)
def run_task(self, task, train, meta_train):
if meta_train:
self._counter += 1
if train:
self.model.load_state_dict(self._original)
return super(_FOWrapper, self).run_task(task, train, meta_train)
def _partial_meta_update(self, loss, final):
if not final:
return
if self._updates is None:
self._updates = {}
for n, p in self._original.items():
if not getattr(p, 'requires_grad', False):
continue
if p.size():
self._updates[n] = p.new(*p.size()).zero_()
else:
self._updates[n] = p.clone().zero_()
for n, p in self.model.state_dict(keep_vars=True).items():
if n not in self._updates:
continue
if self._all_grads is True:
self._updates[n].add_(p.data)
else:
self._updates[n].add_(p.grad.data)
def _final_meta_update(self):
for n, p in self._updates.items():
p.data.div_(self._counter)
for n, p in self._original.items():
if n not in self._updates:
continue
if self._all_grads:
p.grad = p.data - self._updates[n].data
else:
p.grad = self._updates[n]
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
self._counter = 0
self._updates = None
class ReptileWrapper(_FOWrapper):
"""Wrapper for Reptile.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = True
def __init__(self, *args, **kwargs):
super(ReptileWrapper, self).__init__(*args, **kwargs)
class FOMAMLWrapper(_FOWrapper):
"""Wrapper for FOMAML.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = False
def __init__(self, *args, **kwargs):
super(FOMAMLWrapper, self).__init__(*args, **kwargs)
class FtWrapper(BaseWrapper):
"""Wrapper for Multi-headed finetuning.
This wrapper differs from others in that it blends batches from all tasks
into a single epoch.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""
def __init__(self, model, optimizer_cls, optimizer_kwargs, criterion):
super(FtWrapper, self).__init__(criterion,
model,
optimizer_cls,
optimizer_kwargs)
# We use the same inner optimizer throughout
self.optimizer = self.optimizer_cls(self.model.parameters(),
**self.optimizer_kwargs)
@staticmethod
def gen_multitask_batches(tasks, train):
"""Generates one batch iterator across all tasks."""
iterator_id = 0
all_batches = []
for task_id, iterator in tasks:
if train:
iterator.dataset.train()
else:
iterator.dataset.eval()
for batch in iterator:
all_batches.append((iterator_id, task_id, batch))
iterator_id += 1
if train:
random.shuffle(all_batches)
return all_batches
def run_tasks(self, tasks, meta_train):
original = None
if not meta_train:
original = clone_state_dict(self.model.state_dict(keep_vars=True))
# Non-transductive task evaluation for fair comparison
for module in self.model.modules():
if hasattr(module, 'reset_running_stats'):
module.reset_running_stats()
# Training #
all_batches = self.gen_multitask_batches(tasks, train=True)
trainres = self.run_multitask(all_batches, train=True)
# Eval #
all_batches = self.gen_multitask_batches(tasks, train=False)
valres = self.run_multitask(all_batches, train=False)
results = AggRes(zip(trainres, valres))
if not meta_train:
self.model.load_state_dict(original)
return results
def _partial_meta_update(self, l, final):
return
def _final_meta_update(self):
return
def run_multitask(self, batches, train):
"""Train on task in multi-task mode
This is equivalent to the run_task method but differs in that
batches are assumed to be mixed from different tasks.
"""
N = len(batches)
if train:
self.model.train()
else:
self.model.eval()
device = next(self.model.parameters()).device
res = {}
for n, (iterator_id, task_id, (input, target)) in enumerate(batches):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
prediction = self.model(input, task_id)
loss = self.criterion(prediction, target)
if iterator_id not in res:
res[iterator_id] = Res()
res[iterator_id].log(loss=loss.item(),
pred=prediction,
target=target)
# TRAINING #
if not train:
continue
final = (n + 1) == N
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if final:
break
###
res = [r[1] for r in sorted(res.items(), key=lambda r: r[0])]
for r in res:
r.aggregate()
return res
| 9,290
| 0
| 701
|
3206936433ce667f0ce4f6df51f664f04496ea93
| 9,077
|
py
|
Python
|
coinbase.py
|
foppini975/FinRL
|
aead943817d1387dc3654de2c189767d10140b78
|
[
"MIT"
] | null | null | null |
coinbase.py
|
foppini975/FinRL
|
aead943817d1387dc3654de2c189767d10140b78
|
[
"MIT"
] | null | null | null |
coinbase.py
|
foppini975/FinRL
|
aead943817d1387dc3654de2c189767d10140b78
|
[
"MIT"
] | null | null | null |
# Coinbase Pro library:
# https://github.com/danpaquin/coinbasepro-python
#curl "https://api.pro.coinbase.com/products/BTC-USD/candles?start=2021-01-01T12:00:00&end=2021-01-12T12:00:00&granularity=3600"
import cbpro
import numpy as np
import pandas as pd
import logging
from datetime import datetime, timedelta
import json
#from IPython.core.debugger import set_trace
| 46.076142
| 128
| 0.593588
|
# Coinbase Pro library:
# https://github.com/danpaquin/coinbasepro-python
#curl "https://api.pro.coinbase.com/products/BTC-USD/candles?start=2021-01-01T12:00:00&end=2021-01-12T12:00:00&granularity=3600"
import cbpro
import numpy as np
import pandas as pd
import logging
from datetime import datetime, timedelta
import json
#from IPython.core.debugger import set_trace
class Coinbase:
def __init__(self, product, logging_level = logging.INFO, products_file = None):
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging_level, format=FORMAT)
# init
self.product = product
self.df = None
# client creation
self.public_client = cbpro.PublicClient()
# get products
self.products = self.public_client.get_products()
if products_file is not None:
with open(products_file, 'w') as fp:
json.dump(self.products, fp)
logging.info(f"Found {len(self.products)} products, saved to {products_file}")
else:
logging.info(f"Found {len(self.products)} products")
found = False
for prod in self.products:
if prod['id'] == self.product:
found = True
logging.info(prod)
self.product = self.product
break
if found is False:
raise Exception(f"Product {self.product} not valid")
@staticmethod
def getProductList(products_file = None):
products = cbpro.PublicClient().get_products()
if products_file is not None:
with open(products_file, 'w') as fp:
json.dump(products, fp)
return products
@staticmethod
def getPrice(product):
return float(cbpro.PublicClient().get_product_ticker(product)['price'])
def loadHistory(self, start_date, end_date, granularity = 86400, moving_average = 20):
#
# dates are datetime objects, can be crated with:
# start_utc = datetime(2021, 1, 1)
#
start_interval = start_date - timedelta(days=moving_average)
end_interval = None
Granularity_Map = {
60: timedelta(hours=5), # 1 day per each call
86400: timedelta(days=28 * 6 -1) # 42 weeks per each call
}
if granularity not in Granularity_Map:
raise Exception(f"Granularity {granularity} not valid")
self.df = pd.DataFrame()
while True:
if end_interval is not None:
start_interval = end_interval + timedelta(seconds=1)
if start_interval > end_date:
break
end_interval = start_interval + Granularity_Map[granularity]
if end_interval > end_date:
end_interval = end_date
start_interval_iso = start_interval.isoformat()
end_interval_iso = end_interval.isoformat()
btc_history = self.public_client.get_product_historic_rates(
self.product, start=start_interval_iso,
end=end_interval_iso,
granularity=granularity)
if len(btc_history) == 1 and 'message' in btc_history:
raise Exception(btc_history['message'])
logging.info(f"Fetched from {start_interval_iso} to {end_interval_iso} : #{len(btc_history)} points")
if len(btc_history) == 0:
continue
btc_history_np = np.array(btc_history)
df_new = pd.DataFrame(btc_history_np, columns = ['Time','Low','High','Open','Close','Volume'])
self.df = self.df.append(df_new, ignore_index=True, sort=True)
self.df['tic'] = self.product
self.df['Time'] = pd.to_datetime(self.df['Time'], unit='s')
moving_average_label = f"MA{moving_average}"
self.df.sort_values(by='Time', inplace=True)
self.df[moving_average_label] = self.df['Close'].rolling(window=moving_average).mean()
# let's remove the initial points where moving average was not available
self.df = self.df[self.df['Time'] >= start_date]
self.df.reset_index(drop=True, inplace=True)
#time bucket start time
#low lowest price during the bucket interval
#high highest price during the bucket interval
#open opening price (first trade) in the bucket interval
#close closing price (last trade) in the bucket interval
#volume volume of trading activity during the bucket interval
def calculateBuy(self, moving_average = 20, below_threshold = 0.1):
# "Buy" significa che il valore era sceso del x% sotto il valore attuale e ora e' tornato sopra la moving average
#
# Let's generate the Below column (min-hold below moving average)
moving_average_label = f"MA{moving_average}"
self.df['Below'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
if current_value < row[moving_average_label]:
below = current_value - row[moving_average_label]
try:
previous_below = self.df.loc[index-1, 'Below']
except:
previous_below = 0
if below < previous_below:
self.df.loc[index, 'Below'] = below
else:
self.df.loc[index, 'Below'] = previous_below
# Let's generate the BUY trigger based on the Below column
self.df['Buy'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
try:
previous_below = self.df.loc[index-1, 'Below']
except:
previous_below = 0
if current_value > row[moving_average_label] and previous_below < -1*below_threshold*current_value:
self.df.loc[index, 'Buy'] = self.df['Close'].max()/5 # placeholder value to facilitate the plot
def calculateSell(self, moving_average = 20, above_threshold = 0.1):
# "Sell" significa che il valore era salito del x% sopra il valore attuale e ora e' sceso sotto la moving average
#
# Let's generate the Above column (max-hold above moving average)
moving_average_label = f"MA{moving_average}"
self.df['Above'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
if current_value > row[moving_average_label]:
above = current_value - row[moving_average_label]
try:
previous_above = self.df.loc[index-1, 'Above']
except:
previous_above = 0
if above > previous_above:
self.df.loc[index, 'Above'] = above
else:
self.df.loc[index, 'Above'] = previous_above
# Let's generate the SELL trigger based on the Above column
self.df['Sell'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
try:
previous_above= self.df.loc[index-1, 'Above']
except:
previous_above = 0
if current_value < row[moving_average_label] and previous_above > above_threshold*current_value:
self.df.loc[index, 'Sell'] = -1*self.df['Close'].max()/5 # placeholder value to facilitate the plot
def backSimulate(self, initial_amount = 100):
self.df['Wallet_USD'] = 0
self.df['Wallet_Crypto'] = 0
self.df['Wallet_Crypto_Hold'] = 0
for index, row in self.df.iterrows():
self.df.loc[index, 'Wallet_Crypto_Hold'] = initial_amount/self.df.loc[0,'Close'] * self.df.loc[index,'Close']
if index == 0:
self.df.loc[0, 'Wallet_USD'] = initial_amount
continue
if self.df.loc[index, 'Buy'] != 0 and self.df.loc[index-1,'Wallet_USD'] > 0:
# Buy
purchased_crypto = self.df.loc[index-1,'Wallet_USD'] / self.df.loc[index,'Close']
logging.info(f"Buy : {self.df.loc[index-1,'Wallet_USD']} USD ---> {purchased_crypto} BTC")
self.df.loc[index,'Wallet_Crypto'] = purchased_crypto
self.df.loc[index,'Wallet_USD'] = 0
elif self.df.loc[index, 'Sell'] != 0 and self.df.loc[index-1,'Wallet_Crypto'] > 0:
# Sell
sold_crypto = self.df.loc[index-1,'Wallet_Crypto'] * self.df.loc[index,'Close']
logging.info(f"Sell: {self.df.loc[index-1,'Wallet_Crypto']} BTC ---> {sold_crypto} BUSDTC")
self.df.loc[index,'Wallet_USD'] = sold_crypto
self.df.loc[index,'Wallet_Crypto'] = 0
else:
# Hold
self.df.loc[index,'Wallet_USD'] = self.df.loc[index-1,'Wallet_USD']
self.df.loc[index,'Wallet_Crypto'] = self.df.loc[index-1,'Wallet_Crypto']
def getTicker(self):
return self.public_client.get_product_ticker(self.product)
| 8,418
| 262
| 23
|
c5b03901171c853c7fa793d7b36da4ea9b2c603e
| 2,407
|
py
|
Python
|
links/models.py
|
gminds/rapidnewsng
|
7528f751f657f29f2da23a1dd160479947f87977
|
[
"BSD-3-Clause"
] | null | null | null |
links/models.py
|
gminds/rapidnewsng
|
7528f751f657f29f2da23a1dd160479947f87977
|
[
"BSD-3-Clause"
] | null | null | null |
links/models.py
|
gminds/rapidnewsng
|
7528f751f657f29f2da23a1dd160479947f87977
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.utils.timezone import now
'''
def votes(self):
num_submissions = Submission.objects.filter(uploaded_by=self).count()
return num_submissions
'''
# with_votes = Link.objects.count()
# with_votes = Link.objects.filter(with_votes__gt=0).annotate(votes=Count('with_votes')).order_by('-votes')
# Signal while saving user
from django.db.models.signals import post_save
post_save.connect(create_profile, sender=User)
| 32.972603
| 113
| 0.705442
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.utils.timezone import now
class LinkVoteCountManager(models.Manager):
def get_query_set(self):
return super(LinkVoteCountManager, self).get_query_set().annotate(votes=Count('vote')).order_by('-votes')
class Link(models.Model):
title = models.CharField("Headline", max_length=100, unique=True)
submitter = models.ForeignKey(User)
linksource = models.CharField("Linksource", max_length=250)
submitted_on = models.DateTimeField(auto_now_add=True)
rank_score = models.FloatField(default=0.0)
url = models.URLField("URL", max_length=250, blank=True)
description = models.TextField(blank=True)
# with_votes = LinkVoteCountManager()
votes = models.IntegerField(default=1)
objects = models.Manager() #default manager
def __unicode__(self):
return self.title
def set_rank(self):
# Based on HN ranking algo at http://amix.dk/blog/post/19574
SECS_IN_HOUR = float(60*60)
GRAVITY = 1.2
delta = now() - self.submitted_on
item_hour_age = delta.total_seconds() // SECS_IN_HOUR
votes = self.votes - 1
self.rank_score = votes / pow((item_hour_age+2), GRAVITY)
self.save()
'''
def votes(self):
num_submissions = Submission.objects.filter(uploaded_by=self).count()
return num_submissions
'''
class Vote(models.Model):
voter = models.ForeignKey(User)
link = models.ForeignKey(Link)
# with_votes = Link.objects.count()
# with_votes = Link.objects.filter(with_votes__gt=0).annotate(votes=Count('with_votes')).order_by('-votes')
def __unicode__(self):
return "%s upvoted %s" % (self.voter.username, self.link.title)
class UserProfile(models.Model):
user = models.OneToOneField(User, unique=True)
bio = models.TextField(null=True)
def __unicode__(self):
return "%s's profile" % self.user
def create_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
# Signal while saving user
from django.db.models.signals import post_save
post_save.connect(create_profile, sender=User)
def get_absolute_url(self):
return reverse("link_detail", kwargs={"pk": str(self.id)})
| 803
| 803
| 191
|
5c8d4feb6829f166cf531181b9ddbbc0f14f1120
| 1,393
|
py
|
Python
|
driver_support.py
|
yhamidullah/alphormdownloader
|
d520ec5d6d3289acb184308471a38d2b196a708d
|
[
"MIT"
] | 2
|
2020-04-07T10:59:23.000Z
|
2020-12-24T09:18:56.000Z
|
driver_support.py
|
yhamidullah/alphormdownloader
|
d520ec5d6d3289acb184308471a38d2b196a708d
|
[
"MIT"
] | 5
|
2020-04-07T11:04:43.000Z
|
2021-01-05T16:48:09.000Z
|
driver_support.py
|
yhamidullah/alphormdownloader
|
d520ec5d6d3289acb184308471a38d2b196a708d
|
[
"MIT"
] | 3
|
2020-04-04T04:56:24.000Z
|
2020-04-08T12:26:11.000Z
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
import urllib.request,re,os
import config
| 46.433333
| 142
| 0.698492
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
import urllib.request,re,os
import config
def get_available_driver():
driver = ""
if config.firefox_driver_path != "":
try:
driver = webdriver.Firefox(executable_path=config.firefox_driver_path)
except:
print("firefox driver path is not set correctly | the selected driver doesn't match your firefox installed browser")
elif config.chrome_driver_path != "":
try:
driver = webdriver.Chrome(executable_path=config.chrome_driver_path)
except:
print("chrome driver path is not set correctly | the selected driver doesn't match your installed chrome browser")
elif config.microsoft_edge_driver_path != "":
try:
driver = webdriver.Edge(executable_path=config.chrome_driver_path)
except:
print("microsoft edge driver path is not set correctly | the selected driver doesn't match your installed microsoft edge browser")
elif config.safari_driver_path != "":
try:
driver = webdriver.Safari(executable_path=config.chrome_driver_path)
except:
print("safari driver path is not set correctly | the selected driver doesn't match your installed safari browser")
else:
return driver
return driver
| 1,213
| 0
| 22
|
2f564c5e8b5c38b1f42fe2b874cd512a34b4cd79
| 569
|
py
|
Python
|
nested.py
|
codingwithahmad/FastAPI
|
4453fa74a1164161a1528f20c9ead5dccdd494a2
|
[
"MIT"
] | null | null | null |
nested.py
|
codingwithahmad/FastAPI
|
4453fa74a1164161a1528f20c9ead5dccdd494a2
|
[
"MIT"
] | null | null | null |
nested.py
|
codingwithahmad/FastAPI
|
4453fa74a1164161a1528f20c9ead5dccdd494a2
|
[
"MIT"
] | null | null | null |
from typing import Optional, List, Set
from fastapi import FastAPI
from pydantic import BaseModel, HttpUrl
app = FastAPI()
@app.put('/items/{item_id}')
| 23.708333
| 113
| 0.724077
|
from typing import Optional, List, Set
from fastapi import FastAPI
from pydantic import BaseModel, HttpUrl
app = FastAPI()
class Image(BaseModel):
url: HttpUrl
name: str
class Item(BaseModel):
name: str
description: Optional[str] = None
price: float
image: Optional[Image] = None
tax: Optional[float] = None
tags: Set[str] = set() # we use sets for haveing a list of unique tags because duplicate is not allowed in sets
@app.put('/items/{item_id}')
async def update(item_id: int, item: Item):
results = {"item_id": item_id, "item": item}
return result
| 83
| 262
| 69
|
810d9276dbbb5e8d7b50ea6c272bd5caea7cf779
| 4,262
|
py
|
Python
|
demo.py
|
soarodo/flappy_yolo
|
f4747528e707b18dffcc84f814aea9c41990344f
|
[
"MIT"
] | 1
|
2020-01-08T16:41:39.000Z
|
2020-01-08T16:41:39.000Z
|
demo.py
|
soarodo/flappy_yolo
|
f4747528e707b18dffcc84f814aea9c41990344f
|
[
"MIT"
] | null | null | null |
demo.py
|
soarodo/flappy_yolo
|
f4747528e707b18dffcc84f814aea9c41990344f
|
[
"MIT"
] | null | null | null |
import cv2
import os
import time
import numpy as np
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_eval, yolo_head
| 35.22314
| 132
| 0.641248
|
import cv2
import os
import time
import numpy as np
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_eval, yolo_head
class YOLO(object):
def __init__(self):
self.model_path = 'model_data/yolo.h5'
self.anchors_path = 'model_data/yolo_anchors.txt'
self.classes_path = 'model_data/flappy_classes.txt'
self.score = 0.3
self.iou = 0.5
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
self.yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(self.class_names)
num_anchors = len(self.anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = self.yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
self.model_image_size = self.yolo_model.layers[0].input_shape[1:3]
self.is_fixed_size = self.model_image_size != (None, None)
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(self.yolo_model.output, self.anchors, len(self.class_names))
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(yolo_outputs, self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = time.time()
y, x, _ = image.shape
if self.is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = cv2.resize(image, tuple(reversed(self.model_image_size)), interpolation=cv2.INTER_CUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
image_data = np.array(image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.shape[0], image.shape[1]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
return out_boxes,out_scores,out_classes,self.class_names
def close_session(self):
self.sess.close()
def detect_vedio(video, yolo):
camera = cv2.VideoCapture(video)
cv2.namedWindow("detection", cv2.WINDOW_NORMAL)
while True:
res, frame = camera.read()
if not res:
break
image = yolo.detect_image(frame)
cv2.imshow("detection", image)
if cv2.waitKey(110) & 0xff == 27:
break
yolo.close_session()
def detect_img(img, yolo):
image = cv2.imread(img)
r_image = yolo.detect_image(image)
cv2.namedWindow("detection")
while True:
cv2.imshow("detection", r_image)
if cv2.waitKey(110) & 0xff == 27:
break
yolo.close_session()
| 3,850
| -2
| 230
|
c7ed802b7ea74b20fb7005a7225b0eec42837fbc
| 1,053
|
py
|
Python
|
homeassistant/components/doorbird/logbook.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/doorbird/logbook.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/doorbird/logbook.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 3
|
2016-10-03T20:14:06.000Z
|
2019-04-19T15:56:56.000Z
|
"""Describe logbook events."""
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import callback
from .const import DOMAIN, DOOR_STATION, DOOR_STATION_EVENT_ENTITY_IDS
@callback
def async_describe_events(hass, async_describe_event):
"""Describe logbook events."""
@callback
def async_describe_logbook_event(event):
"""Describe a logbook event."""
_, doorbird_event = event.event_type.split("_", 1)
return {
"name": "Doorbird",
"message": f"Event {event.event_type} was fired.",
"entity_id": hass.data[DOMAIN][DOOR_STATION_EVENT_ENTITY_IDS].get(
doorbird_event, event.data.get(ATTR_ENTITY_ID)
),
}
domain_data = hass.data[DOMAIN]
for config_entry_id in domain_data:
door_station = domain_data[config_entry_id][DOOR_STATION]
for event in door_station.doorstation_events:
async_describe_event(
DOMAIN, f"{DOMAIN}_{event}", async_describe_logbook_event
)
| 30.085714
| 78
| 0.665717
|
"""Describe logbook events."""
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import callback
from .const import DOMAIN, DOOR_STATION, DOOR_STATION_EVENT_ENTITY_IDS
@callback
def async_describe_events(hass, async_describe_event):
"""Describe logbook events."""
@callback
def async_describe_logbook_event(event):
"""Describe a logbook event."""
_, doorbird_event = event.event_type.split("_", 1)
return {
"name": "Doorbird",
"message": f"Event {event.event_type} was fired.",
"entity_id": hass.data[DOMAIN][DOOR_STATION_EVENT_ENTITY_IDS].get(
doorbird_event, event.data.get(ATTR_ENTITY_ID)
),
}
domain_data = hass.data[DOMAIN]
for config_entry_id in domain_data:
door_station = domain_data[config_entry_id][DOOR_STATION]
for event in door_station.doorstation_events:
async_describe_event(
DOMAIN, f"{DOMAIN}_{event}", async_describe_logbook_event
)
| 0
| 0
| 0
|
46f0c5324d010240f717ecc52ef5a0820704904a
| 661
|
py
|
Python
|
gigfinder/config/wsgi.py
|
casamfiml/django_Pruebas
|
6a2d0e17fcbd4c3428037c2354ce9e5ce19c4d4b
|
[
"Apache-2.0"
] | null | null | null |
gigfinder/config/wsgi.py
|
casamfiml/django_Pruebas
|
6a2d0e17fcbd4c3428037c2354ce9e5ce19c4d4b
|
[
"Apache-2.0"
] | null | null | null |
gigfinder/config/wsgi.py
|
casamfiml/django_Pruebas
|
6a2d0e17fcbd4c3428037c2354ce9e5ce19c4d4b
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for gigfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os,sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
#path a donde esta el manage.py de nuestro proyecto Django
sys.path.append('/home/ubuntu/workspace/gigfinder')
os.environ.setdefault("LANG", "en_US.UTF-8")
os.environ.setdefault("LC_ALL", "en_US.UTF-8")
activate_this = '/home/ubuntu/workspace/myvenv/bin/activate_this.py'
application = get_wsgi_application()
| 30.045455
| 78
| 0.779123
|
"""
WSGI config for gigfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os,sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
#path a donde esta el manage.py de nuestro proyecto Django
sys.path.append('/home/ubuntu/workspace/gigfinder')
os.environ.setdefault("LANG", "en_US.UTF-8")
os.environ.setdefault("LC_ALL", "en_US.UTF-8")
activate_this = '/home/ubuntu/workspace/myvenv/bin/activate_this.py'
application = get_wsgi_application()
| 0
| 0
| 0
|
6fcb9a39da1d14723888c9560f273ac52d31e6a3
| 2,453
|
py
|
Python
|
src/z3c/testsetup/tests/layered_cave/layer.py
|
zopefoundation/z3c.testsetup
|
7a07939b5df33d2124916385efdc9c82495d1b32
|
[
"ZPL-2.1"
] | 1
|
2020-09-08T06:51:02.000Z
|
2020-09-08T06:51:02.000Z
|
src/z3c/testsetup/tests/layered_cave/layer.py
|
zopefoundation/z3c.testsetup
|
7a07939b5df33d2124916385efdc9c82495d1b32
|
[
"ZPL-2.1"
] | 2
|
2015-04-14T18:00:03.000Z
|
2020-05-15T08:56:34.000Z
|
src/z3c/testsetup/tests/layered_cave/layer.py
|
zopefoundation/z3c.testsetup
|
7a07939b5df33d2124916385efdc9c82495d1b32
|
[
"ZPL-2.1"
] | 1
|
2020-09-08T06:19:35.000Z
|
2020-09-08T06:19:35.000Z
|
"""Layer definitions.
This could also be done in the setup file itself.
"""
import os
from zope.app.testing.functional import ZCMLLayer
# We define a ZCML test layer. ZCML layers are special as they define
# some setup code for creation of empty ZODBs and more. If you only
# want some ZCML registrations to be done, you can use it like so:
FunctionalLayer1 = ZCMLLayer(
# As first argument we need the absolute path of a ZCML file
os.path.join(os.path.dirname(__file__), 'ftesting.zcml'),
# Second argument is the module, where the layer is defined.
__name__,
# This is the name of our layer. It can be an arbitrary string.
'FunctionalLayer1',
# By default ZCML layers are not torn down. You should make sure,
# that any registrations you do in your ZCML are removed in a
# tearDown method if you specify this parameter to be `True`. This
# parameter is optional.
allow_teardown=True)
class UnitLayer1(object):
"""This represents a layer.
A layer is a way to have common setup and teardown that happens
once for a whole group of tests.
It must be an object with a `setUp` and a `tearDown` method, which
are run once before or after all the tests applied to a layer
respectively.
Optionally you can additionally define `testSetUp` and
`testTearDown` methods, which are run before and after each single
test.
This class is not instantiated. Therefore we use classmethods.
"""
@classmethod
def setUp(self):
"""This gets run once for the whole test run, or at most once per
TestSuite that depends on the layer.
(The latter can happen if multiple suites depend on the layer
and the testrunner decides to tear down the layer after first
suite finishes.)
"""
@classmethod
def tearDown(self):
"""This gets run once for the whole test run, or at most
once per TestSuite that depends on the layer,
after all tests in the suite have finished.
"""
@classmethod
def testSetUp(self):
"""This method is run before each single test in the current
layer. It is optional.
"""
print " Running testSetUp of UnitLayer1"
@classmethod
def testTearDown(self):
"""This method is run before each single test in the current
layer. It is optional.
"""
print " Running testTearDown of UnitLayer1"
| 33.60274
| 73
| 0.681614
|
"""Layer definitions.
This could also be done in the setup file itself.
"""
import os
from zope.app.testing.functional import ZCMLLayer
# We define a ZCML test layer. ZCML layers are special as they define
# some setup code for creation of empty ZODBs and more. If you only
# want some ZCML registrations to be done, you can use it like so:
FunctionalLayer1 = ZCMLLayer(
# As first argument we need the absolute path of a ZCML file
os.path.join(os.path.dirname(__file__), 'ftesting.zcml'),
# Second argument is the module, where the layer is defined.
__name__,
# This is the name of our layer. It can be an arbitrary string.
'FunctionalLayer1',
# By default ZCML layers are not torn down. You should make sure,
# that any registrations you do in your ZCML are removed in a
# tearDown method if you specify this parameter to be `True`. This
# parameter is optional.
allow_teardown=True)
class UnitLayer1(object):
"""This represents a layer.
A layer is a way to have common setup and teardown that happens
once for a whole group of tests.
It must be an object with a `setUp` and a `tearDown` method, which
are run once before or after all the tests applied to a layer
respectively.
Optionally you can additionally define `testSetUp` and
`testTearDown` methods, which are run before and after each single
test.
This class is not instantiated. Therefore we use classmethods.
"""
@classmethod
def setUp(self):
"""This gets run once for the whole test run, or at most once per
TestSuite that depends on the layer.
(The latter can happen if multiple suites depend on the layer
and the testrunner decides to tear down the layer after first
suite finishes.)
"""
@classmethod
def tearDown(self):
"""This gets run once for the whole test run, or at most
once per TestSuite that depends on the layer,
after all tests in the suite have finished.
"""
@classmethod
def testSetUp(self):
"""This method is run before each single test in the current
layer. It is optional.
"""
print " Running testSetUp of UnitLayer1"
@classmethod
def testTearDown(self):
"""This method is run before each single test in the current
layer. It is optional.
"""
print " Running testTearDown of UnitLayer1"
| 0
| 0
| 0
|
271e2acdf71604e7da0fc06ab6eab405b21c08fd
| 1,086
|
py
|
Python
|
hpc_acm_cli/async_op.py
|
coin8086/hpc_acm_cli
|
688c68e23df1ffae3430d1807c5ba1b67c5dc857
|
[
"MIT"
] | 2
|
2018-10-06T04:15:06.000Z
|
2020-06-01T14:25:40.000Z
|
hpc_acm_cli/async_op.py
|
coin8086/hpc_acm_cli
|
688c68e23df1ffae3430d1807c5ba1b67c5dc857
|
[
"MIT"
] | 2
|
2018-10-05T15:30:20.000Z
|
2018-11-02T08:35:46.000Z
|
hpc_acm_cli/async_op.py
|
coin8086/hpc_acm_cli
|
688c68e23df1ffae3430d1807c5ba1b67c5dc857
|
[
"MIT"
] | 3
|
2018-10-05T14:54:20.000Z
|
2021-01-10T10:09:30.000Z
|
from tqdm import tqdm
import time
import platform
# ops is a list of AsyncOp object
| 25.857143
| 79
| 0.513812
|
from tqdm import tqdm
import time
import platform
class AsyncOp:
class NotReady(Exception):
pass
def get_result(self):
pass
# ops is a list of AsyncOp object
def async_wait(ops, handler=None, desc=None):
total = len(ops)
done = [False for i in range(total)]
done_count = 0
prog = tqdm(total=total, desc=desc, ascii=(platform.system() == 'Windows'))
results = [None for i in range(total)] if not handler else []
while done_count != total:
yielded = False
for idx, op in enumerate(ops):
if done[idx]:
continue
try:
result = op.get_result()
except AsyncOp.NotReady:
pass
else:
yielded = True
done[idx] = True
done_count += 1
if handler:
handler(idx, result)
else:
results[idx] = result
prog.update(1)
if not yielded:
time.sleep(0.1)
prog.close()
return results
| 891
| 64
| 45
|
460b0ad8437549783c4db4fc6f7378a14379d77a
| 2,282
|
py
|
Python
|
takahe/SFR.py
|
Krytic/Takahe
|
6d6bdf234ae7e3cfe8ef40e48d4621dc9a9a2f6c
|
[
"MIT"
] | 1
|
2020-12-09T02:34:43.000Z
|
2020-12-09T02:34:43.000Z
|
takahe/SFR.py
|
Krytic/Takahe
|
6d6bdf234ae7e3cfe8ef40e48d4621dc9a9a2f6c
|
[
"MIT"
] | 8
|
2020-03-02T06:22:43.000Z
|
2020-11-10T03:20:46.000Z
|
takahe/SFR.py
|
Krytic/Takahe
|
6d6bdf234ae7e3cfe8ef40e48d4621dc9a9a2f6c
|
[
"MIT"
] | 1
|
2020-05-13T00:41:24.000Z
|
2020-05-13T00:41:24.000Z
|
import matplotlib.pyplot as plt
from numba import njit
import numpy as np
import pandas as pd
from scipy.special import gamma, gammainc
import takahe
from tqdm import tqdm
def MadauDickinson(Z, z):
"""Computes the Madau & Dickinson SFRD at metallicity Z and redshift z.
Implements the SFRD given by eqn(15) of [1]. Returns a value in
M_sun / yr / Mpc^3.
Assumes Z_sun = 0.020, and that input metallicity is NOT already
measured relative to this.
[1] https://www.annualreviews.org/doi/pdf/10.1146/annurev-astro-081811-125615
Arguments:
Z {float} -- The metallicity under consideration.
z {float} -- The redshift under consideration.
Returns:
{float} -- The SFRD at metallicity Z and redshift z.
"""
GAM = gammainc(0.84, (Z / 0.02)**2 * 10**(0.3*z))
NUM = 0.015 * (1+z)**2.7
DEM = (1+((1+z)/2.9)**5.6)
SFRDi = GAM * (NUM / DEM)
return SFRDi
def MilkyWay(Z, z):
"""Implements the SFR equation from (Wiktorowicz et. al. 2020) [1]
for the Milky Way Galaxy.
Piecewise function for the SFR in the Milky Way galaxy. Assumes a
four-component formalism - consisting of a thin disk, thick disk,
bulge, and halo. Precise values of the SFR come from
(Olejak et. al 2019) [2].
[1] https://arxiv.org/pdf/2006.08317.pdf
[2] https://arxiv.org/pdf/1908.08775.pdf
Arguments:
Z {float} -- The metallicity under consideration.
z {float} -- The redshift under consideration.
Returns:
{float} -- The SFRD at metallicity Z and redshift z.
"""
SFR_arr = np.zeros(len(z))
for i in range(len(z)):
zi = z[i]
tL = takahe.helpers.redshift_to_lookback(zi)
Z_sun = takahe.constants.SOLAR_METALLICITY
SFR = 0
if Z == Z_sun:
# Thin Disk
if 0 <= tL <= 10:
SFR += 4.7
# Bulge
if 0 <= tL <= 10:
SFR += 0.45
elif 10 <= tL <= 12:
SFR += 2.3
elif Z == Z_sun / 10:
# Thick Disk
if 9 <= tL <= 11:
SFR += 2.5
elif Z == 1e-4:
# Halo
if 10 <= tL <= 12:
SFR += 0.5
SFR_arr[i] += SFR
return SFR_arr
| 27.166667
| 81
| 0.565294
|
import matplotlib.pyplot as plt
from numba import njit
import numpy as np
import pandas as pd
from scipy.special import gamma, gammainc
import takahe
from tqdm import tqdm
def MadauDickinson(Z, z):
"""Computes the Madau & Dickinson SFRD at metallicity Z and redshift z.
Implements the SFRD given by eqn(15) of [1]. Returns a value in
M_sun / yr / Mpc^3.
Assumes Z_sun = 0.020, and that input metallicity is NOT already
measured relative to this.
[1] https://www.annualreviews.org/doi/pdf/10.1146/annurev-astro-081811-125615
Arguments:
Z {float} -- The metallicity under consideration.
z {float} -- The redshift under consideration.
Returns:
{float} -- The SFRD at metallicity Z and redshift z.
"""
GAM = gammainc(0.84, (Z / 0.02)**2 * 10**(0.3*z))
NUM = 0.015 * (1+z)**2.7
DEM = (1+((1+z)/2.9)**5.6)
SFRDi = GAM * (NUM / DEM)
return SFRDi
def MilkyWay(Z, z):
"""Implements the SFR equation from (Wiktorowicz et. al. 2020) [1]
for the Milky Way Galaxy.
Piecewise function for the SFR in the Milky Way galaxy. Assumes a
four-component formalism - consisting of a thin disk, thick disk,
bulge, and halo. Precise values of the SFR come from
(Olejak et. al 2019) [2].
[1] https://arxiv.org/pdf/2006.08317.pdf
[2] https://arxiv.org/pdf/1908.08775.pdf
Arguments:
Z {float} -- The metallicity under consideration.
z {float} -- The redshift under consideration.
Returns:
{float} -- The SFRD at metallicity Z and redshift z.
"""
SFR_arr = np.zeros(len(z))
for i in range(len(z)):
zi = z[i]
tL = takahe.helpers.redshift_to_lookback(zi)
Z_sun = takahe.constants.SOLAR_METALLICITY
SFR = 0
if Z == Z_sun:
# Thin Disk
if 0 <= tL <= 10:
SFR += 4.7
# Bulge
if 0 <= tL <= 10:
SFR += 0.45
elif 10 <= tL <= 12:
SFR += 2.3
elif Z == Z_sun / 10:
# Thick Disk
if 9 <= tL <= 11:
SFR += 2.5
elif Z == 1e-4:
# Halo
if 10 <= tL <= 12:
SFR += 0.5
SFR_arr[i] += SFR
return SFR_arr
| 0
| 0
| 0
|
b94e0588527afb6e0682a3ad466fe2b23170f529
| 1,391
|
py
|
Python
|
src/mapstp/cli/logging.py
|
MC-kit/map-stp
|
a82b6560358a37f704fd0fe76c76def27a15458d
|
[
"MIT"
] | null | null | null |
src/mapstp/cli/logging.py
|
MC-kit/map-stp
|
a82b6560358a37f704fd0fe76c76def27a15458d
|
[
"MIT"
] | 19
|
2021-11-29T10:29:30.000Z
|
2022-03-17T11:21:08.000Z
|
src/mapstp/cli/logging.py
|
MC-kit/map-stp
|
a82b6560358a37f704fd0fe76c76def27a15458d
|
[
"MIT"
] | null | null | null |
"""Intercept log messages from the used libraries and pass them to `loguru`.
See https://github.com/Delgan/loguru
"""
import logging
from loguru import logger
# class PropagateHandler(logging.Handler):
# """Send events from loguru to standard logging"""
# def emit(self, record):
# logging.getLogger(record.name).handle(record)
#
#
# logger.add(PropagateHandler(), format="{message}")
class InterceptHandler(logging.Handler):
"""Send events from standard logging to loguru."""
def emit(self, record: logging.LogRecord) -> None:
"""See :meth:`logging.Handler.emit`.
Args:
record: data to log
"""
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = logging.getLevelName(record.levelno)
# Find caller from where originated the logged message
frame = logging.currentframe()
depth = 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back # type: ignore
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
log = logging.getLogger()
# log.setLevel(0)
log.addHandler(InterceptHandler())
# logging.basicConfig(handlers=[InterceptHandler()], level=0, style='{')
| 27.82
| 76
| 0.64486
|
"""Intercept log messages from the used libraries and pass them to `loguru`.
See https://github.com/Delgan/loguru
"""
import logging
from loguru import logger
# class PropagateHandler(logging.Handler):
# """Send events from loguru to standard logging"""
# def emit(self, record):
# logging.getLogger(record.name).handle(record)
#
#
# logger.add(PropagateHandler(), format="{message}")
class InterceptHandler(logging.Handler):
"""Send events from standard logging to loguru."""
def emit(self, record: logging.LogRecord) -> None:
"""See :meth:`logging.Handler.emit`.
Args:
record: data to log
"""
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = logging.getLevelName(record.levelno)
# Find caller from where originated the logged message
frame = logging.currentframe()
depth = 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back # type: ignore
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
log = logging.getLogger()
# log.setLevel(0)
log.addHandler(InterceptHandler())
# logging.basicConfig(handlers=[InterceptHandler()], level=0, style='{')
| 0
| 0
| 0
|
da9ba344b9f98f766776e34ee07106326af77c14
| 91
|
py
|
Python
|
src/housie/models/__init__.py
|
Eclair24/housie
|
7892002914c2ced422f89cd8c050993c2f931deb
|
[
"Apache-2.0"
] | 1
|
2020-08-10T07:44:01.000Z
|
2020-08-10T07:44:01.000Z
|
src/housie/models/__init__.py
|
Eclair24/housie
|
7892002914c2ced422f89cd8c050993c2f931deb
|
[
"Apache-2.0"
] | 1
|
2020-12-07T16:31:05.000Z
|
2020-12-09T09:04:58.000Z
|
src/housie/models/__init__.py
|
Eclair24/housie
|
7892002914c2ced422f89cd8c050993c2f931deb
|
[
"Apache-2.0"
] | 1
|
2020-10-01T17:54:19.000Z
|
2020-10-01T17:54:19.000Z
|
from .board import Board, demo_board
from .ticket import Ticket, load_tickets, demo_ticket
| 30.333333
| 53
| 0.824176
|
from .board import Board, demo_board
from .ticket import Ticket, load_tickets, demo_ticket
| 0
| 0
| 0
|
0d7e6c92009db30a4a0cda37191f99149779e2a9
| 616
|
py
|
Python
|
examples/example1.py
|
va-dudnikov/pygraph7
|
bf3469ca75f44283ee647dc10fa7d94bb7834cf3
|
[
"MIT"
] | 2
|
2018-08-02T10:15:39.000Z
|
2018-08-02T17:32:11.000Z
|
examples/example1.py
|
va-dudnikov/pygraph7
|
bf3469ca75f44283ee647dc10fa7d94bb7834cf3
|
[
"MIT"
] | null | null | null |
examples/example1.py
|
va-dudnikov/pygraph7
|
bf3469ca75f44283ee647dc10fa7d94bb7834cf3
|
[
"MIT"
] | null | null | null |
# Example 1
# This example show how to encoding matrix in graph7 format
import graph7 as g7
import random
order = 10
# Matrix contains only 0 and 1, so for encoding a matrix on one element
# needed only 1 bit
rand_mat = [[random.randint(0, 1) for _ in range(order)] for _ in range(order)]
directed = g7.encode(rand_mat)
# We leave only the upper triangle of the matrix
for i in range(order):
for j in range(i, order):
if i == j:
rand_mat[i][j] = 0
continue
rand_mat[j][i] = rand_mat[i][j]
undirected = g7.encode(rand_mat)
# Compare
print(directed)
print(undirected)
| 22.814815
| 79
| 0.672078
|
# Example 1
# This example show how to encoding matrix in graph7 format
import graph7 as g7
import random
order = 10
# Matrix contains only 0 and 1, so for encoding a matrix on one element
# needed only 1 bit
rand_mat = [[random.randint(0, 1) for _ in range(order)] for _ in range(order)]
directed = g7.encode(rand_mat)
# We leave only the upper triangle of the matrix
for i in range(order):
for j in range(i, order):
if i == j:
rand_mat[i][j] = 0
continue
rand_mat[j][i] = rand_mat[i][j]
undirected = g7.encode(rand_mat)
# Compare
print(directed)
print(undirected)
| 0
| 0
| 0
|
c8f3bc6c094eeb66cdada903f9c92d378863e6b8
| 6,369
|
py
|
Python
|
pybot/plugins/message/__init__.py
|
jkent/pybot
|
0c70a7c29caa709413e04a411a5fdb22a8dbdb12
|
[
"MIT"
] | 1
|
2017-06-01T00:52:44.000Z
|
2017-06-01T00:52:44.000Z
|
pybot/plugins/message/__init__.py
|
jkent/pybot
|
0c70a7c29caa709413e04a411a5fdb22a8dbdb12
|
[
"MIT"
] | 17
|
2015-03-21T19:35:45.000Z
|
2019-04-14T05:17:49.000Z
|
pybot/plugins/message/__init__.py
|
jkent/jkent-pybot
|
0c70a7c29caa709413e04a411a5fdb22a8dbdb12
|
[
"MIT"
] | 1
|
2015-03-27T22:52:42.000Z
|
2015-03-27T22:52:42.000Z
|
# -*- coding: utf-8 -*-
# vim: set ts=4 et
from datetime import datetime, timedelta
import re
from sqlalchemy import or_, and_, func
from pybot.plugin import *
from . import models
from .models import *
RETRY_INTERVAL = 3600
| 27.934211
| 80
| 0.508714
|
# -*- coding: utf-8 -*-
# vim: set ts=4 et
from datetime import datetime, timedelta
import re
from sqlalchemy import or_, and_, func
from pybot.plugin import *
from . import models
from .models import *
RETRY_INTERVAL = 3600
class Plugin(BasePlugin):
def on_load(self):
self.db = models.init(self.bot)
def on_unload(self):
self.db.close()
@hook
def message_send_trigger(self, msg, args, argstr):
try:
addressee, text = argstr.strip().split(None, 1)
except:
msg.reply('Expected: <addressee> <text>')
return True
optout = self.db.query(Preference).filter_by(nick=addressee,
key='optout').first()
if optout and optout.value.lower() == 'true':
msg.reply('Recipient has opted out of messages.')
return True
channel = msg.channel
delta = None
while True:
m = re.match(r'.*\s+(as dm|in \d{1,3}[mhdw])$', text)
if not m:
break
cmd = m.group(1)
if cmd == 'as dm':
channel = None
elif cmd.startswith('in '):
n, unit = int(cmd[3:-1]), cmd[-1]
if unit == 'm':
delta = timedelta(minutes=n)
elif unit == 'h':
delta = timedelta(hours=n)
elif unit == 'd':
delta = timedelta(days=n)
elif unit == 'w':
delta = timedelta(weeks=n)
text = re.sub(cmd, '', text)
if not self.db.query(Block).filter_by(nick=addressee,
block=msg.source).first():
message = Message(msg.source, addressee, text, channel, delta)
self.db.add(message)
self.db.commit()
msg.reply('Message queued!')
return True
@hook
@hook('ack')
def message_ack_trigger(self, msg, args, argstr):
if len(args) != 1:
msg.reply('No arguments allowed.')
return True
count = 0
query = self.db.query(Message) \
.filter(func.lower(Message.addressee)==func.lower(msg.source))
for message in query:
count += 1
self.db.delete(message)
if count:
self.db.commit()
msg.reply('Ack\'d %d message%s.' %
(count, 's' if count > 1 else ''))
else:
msg.reply('No messages to ack.')
return True
@hook
def message_list_trigger(self, msg, args, argstr):
if len(args) != 1:
msg.reply('No arguments allowed.')
return True
query = self.db.query(Message).filter_by(nick=msg.source)
for message in query:
text = '(%d) %s: %s' % (message.id, message.addressee,
message.text)
msg.reply(text, direct=True)
query = self.db.query(Message) \
.filter_by(presented=True) \
.filter(func.lower(Message.addressee)==func.lower(msg.source))
for message in query:
text = '(%d) <%s> %s' % (message.id, message.nick, message.text)
msg.reply(text, direct=True)
return True
@hook
def message_del_trigger(self, msg, args, argstr):
if len(args) != 2:
msg.reply('Expected: <id>')
return True
try:
id = int(args[1])
except:
msg.reply('Expected: <id>')
return True
message = self.db.query(Message).filter_by(id=id) \
.filter(or_(func.lower(Message.nick) == func.lower(msg.source),
and_(func.lower(Message.addressee) == func.lower(msg.source),
Message.presented == True))).first()
if message:
self.db.delete(message)
msg.reply('Message deleted.')
else:
msg.reply('Unknown message.')
return True
@hook
def message_opt_trigger(self, msg, args, argstr):
if len(args) != 2 or args[1].lower() not in ['in', 'out']:
msg.reply('Expected: <in | out>')
return True
optout = self.db.query(Preference) \
.filter_by(key='optout') \
.filter(func.lower(Preference.nick)==func.lower(msg.source)) \
.first()
if not optout:
optout = Preference(msg.source, 'optout', 'False')
optout.value = 'False'
if args[1].lower() == 'out':
optout.value = 'True'
self.db.add(optout)
self.db.commit()
return True
@hook
def message_block_trigger(self, msg, args, argstr):
if len(args) != 2:
msg.reply('Expected: <nick>')
return True
try:
block = Block(msg.source, args[1])
self.db.add(block)
msg.reply('Blocked %s.' % (args[1],))
except:
msg.reply('Already blocked.')
return True
@hook
def message_unblock_trigger(self, msg, args, argstr):
if len(args) != 2:
msg.reply('Expected: <nick>')
return True
block = self.db.query(Block) \
.filter_by(block=args[1]) \
.filter(func.lower(Block.nick)==func.lower(msg.source)) \
.first()
if block:
self.db.delete(block)
msg.reply('Unblocked %s.' % (args[1],))
else:
msg.reply('Not blocked.')
return True
@hook
def privmsg_command(self, msg):
now = datetime.utcnow()
query = self.db.query(Message) \
.filter(func.lower(Message.addressee)==func.lower(msg.source)) \
.filter(Message.next_notify < now)
presented = False
for message in query:
if message.channel and message.channel != msg.channel:
continue
text = '<%s> %s' % (message.nick, message.text)
if message.channel:
msg.reply('%s: %s' % (message.addressee, text))
else:
msg.reply(text, True)
message.presented = True
message.next_notify = now + timedelta(seconds=RETRY_INTERVAL)
self.db.add(message)
if presented:
self.db.commit()
| 5,737
| 379
| 23
|
c3e380b03a6306239eeb08e10db96af6e13a5bd6
| 6,879
|
py
|
Python
|
main.py
|
sharyar/symmetrical-waffle
|
f7de2c11849c841363dc60e225133818ca5423eb
|
[
"MIT"
] | null | null | null |
main.py
|
sharyar/symmetrical-waffle
|
f7de2c11849c841363dc60e225133818ca5423eb
|
[
"MIT"
] | null | null | null |
main.py
|
sharyar/symmetrical-waffle
|
f7de2c11849c841363dc60e225133818ca5423eb
|
[
"MIT"
] | null | null | null |
# Token types
# EOF (end-of-file) token -> indicates that no more input is left for lexical analysis.
# Lexical Analysis: Breaking input strings into tokens -> scanner, tokenizer, lexical analyzer, lexer
# Lexeme -> a sequence of characters that form a token. This is for multidigit for example. Here we implement the intger method for this reason.
# Expr method -> finds structure via the stream of tokens from get_next_token() method. Then generates results by computing.
# Parsing -> recognizing a phrase in a stream of tokens -> Parser
# Expr -> Does both parsing and interpreting.
# Here are the guidelines that we will use to convert the grammar to source code. By following them, you can literally
# translate the grammar to a working parser:
# Each rule, R, defined in the grammar, becomes a method with the same name, and references to that rule become a method call: R().
# The body of the method follows the flow of the body of the rule using the very same guidelines.
# Alternatives (a1 | a2 | aN) become an if-elif-else statement
# An optional grouping (…)* becomes a while statement that can loop over zero or more times
# Each token reference T becomes a call to the method eat: eat(T). The way the eat method works is that it consumes the token T if it matches the
# current lookahead token, then it gets a new token from the lexer and assigns that token to the current_token internal variable.
INTEGER, PLUS, MINUS, MUL, DIV, LPAREN, RPAREN, EOF = (
"INTEGER",
"PLUS",
"MINUS",
"MUL",
"DIV",
"(",
")",
"EOF",
)
if __name__ == "__main__":
main()
| 30.986486
| 145
| 0.573775
|
# Token types
# EOF (end-of-file) token -> indicates that no more input is left for lexical analysis.
# Lexical Analysis: Breaking input strings into tokens -> scanner, tokenizer, lexical analyzer, lexer
# Lexeme -> a sequence of characters that form a token. This is for multidigit for example. Here we implement the intger method for this reason.
# Expr method -> finds structure via the stream of tokens from get_next_token() method. Then generates results by computing.
# Parsing -> recognizing a phrase in a stream of tokens -> Parser
# Expr -> Does both parsing and interpreting.
# Here are the guidelines that we will use to convert the grammar to source code. By following them, you can literally
# translate the grammar to a working parser:
# Each rule, R, defined in the grammar, becomes a method with the same name, and references to that rule become a method call: R().
# The body of the method follows the flow of the body of the rule using the very same guidelines.
# Alternatives (a1 | a2 | aN) become an if-elif-else statement
# An optional grouping (…)* becomes a while statement that can loop over zero or more times
# Each token reference T becomes a call to the method eat: eat(T). The way the eat method works is that it consumes the token T if it matches the
# current lookahead token, then it gets a new token from the lexer and assigns that token to the current_token internal variable.
INTEGER, PLUS, MINUS, MUL, DIV, LPAREN, RPAREN, EOF = (
"INTEGER",
"PLUS",
"MINUS",
"MUL",
"DIV",
"(",
")",
"EOF",
)
class Token(object):
def __init__(self, type, value):
# token type: INTEGER, PLUS, EOF
self.type = type
# token value: 0,1,2,3,4,5,6,8,9, '+', '*', '-', '/' or None
self.value = value
def __str__(self):
"""String representation of the instance.
Examples:
Token(INTEGER, 3)
Token(PLUS, '+')
"""
return "Token({type}, {value})".format(
type=self.type,
value=repr(self.value), # returns a printable representation of value
)
def __repr__(self):
return self.__str__()
class Lexer(object):
def __init__(self, text):
# string input, e.g. "3+5"
self.text = text
# self.pos is in index into self.text
self.pos = 0
# current token instnce
self.current_token = None
self.current_char = self.text[self.pos]
def error(self):
raise Exception("Invalid character")
def advance(self):
"""Advance the 'pos' pointer and se the current_char variable"""
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None
else:
self.current_char = self.text[self.pos]
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
def integer(self):
"""Returns a multi-digit integer consumed from the input"""
result = ""
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
return int(result)
def get_next_token(self):
"""Lexical Analyzer aka tokenizer/scanner
Breaks up a sentence into tokens, one token at a time.
Returns:
Token: returns a token
"""
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isdigit():
return Token(INTEGER, self.integer())
if self.current_char == "+":
self.advance()
return Token(PLUS, "+")
if self.current_char == "-":
self.advance()
return Token(MINUS, "-")
if self.current_char == "/":
self.advance()
return Token(DIV, "/")
if self.current_char == "*":
self.advance()
return Token(MUL, "*")
if self.current_char == "(":
self.advance()
return Token(LPAREN, "(")
if self.current_char == ")":
self.advance()
return Token(RPAREN, ")")
self.error()
return Token(EOF, None)
class Interpreter(object):
def __init__(self, lexer):
self.lexer = lexer
# set current token to the first token taken from the input
self.current_token = self.lexer.get_next_token()
def error(self):
raise Exception("Invalid syntax")
def eat(self, token_type):
# Compare the current token type with passed token
# type and if the match then "eat" the current token
# and assign the next token to the self.current token
if self.current_token.type == token_type:
self.current_token = self.lexer.get_next_token()
else:
self.error()
def factor(self):
"""factor: INTEGER|LPAREN expr RPAREN"""
token = self.current_token # we keep a reference to the current token
if token.type == INTEGER:
self.eat(INTEGER)
return token.value
elif token.type == LPAREN:
self.eat(LPAREN)
result = self.expr()
self.eat(RPAREN)
return result
def term(self):
"""term: factor((MUL | DIV) factor) *"""
result = self.factor()
while self.current_token.type in (MUL, DIV):
token = self.current_token
if token.type == MUL:
self.eat(MUL)
result = result * self.factor()
elif token.type == DIV:
self.eat(DIV)
result = result / self.factor()
return result
def expr(self):
"""
Arithmetic expression parser/ interpreter
calc> 87 + 3 * (10/ 12 (3+1))
expr: term((PLUS | MINUS) term)*
term: factor((MUL | DIV) factor) *
factor: INTEGER | LPAREN expr RPAREN
"""
result = self.term()
while self.current_token.type in (PLUS, MINUS):
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
result = result + self.term()
elif token.type == MINUS:
self.eat(MINUS)
result = result - self.term()
return result
def main():
while True:
try:
_text = input("calc> ")
except EOFError:
break
if not _text:
continue
lexer = Lexer(_text)
interpreter = Interpreter(lexer)
result = interpreter.expr()
print(result)
if __name__ == "__main__":
main()
| 1,395
| 3,782
| 92
|
a57dea131ef9158b8695a6ed87ec825cd29d695d
| 3,063
|
py
|
Python
|
tools/changelog_to_deb.py
|
thusoy/laim
|
01b02f5910d046603df8e50ba513b28673fbd3cf
|
[
"MIT"
] | 2
|
2019-12-26T19:45:04.000Z
|
2019-12-26T20:40:18.000Z
|
tools/changelog_to_deb.py
|
thusoy/laim
|
01b02f5910d046603df8e50ba513b28673fbd3cf
|
[
"MIT"
] | 4
|
2020-03-24T17:56:56.000Z
|
2021-04-06T18:02:10.000Z
|
tools/changelog_to_deb.py
|
thusoy/laim
|
01b02f5910d046603df8e50ba513b28673fbd3cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import datetime
import re
VERSION_HEADER_RE = re.compile(r'(?P<version>\d+\.\d+\.\d+) ?- ?(?P<date>\d{4}-\d{2}-\d{2})')
CHANGE_TYPE_RE = re.compile(r'^### ?(?P<change_type>.*)')
SKIP_LINE_RE = re.compile(r'^-{2,}|^$')
LIST_LINE_RE = re.compile(r'^[-*] ?(?P<line_item>.*)')
CONTINUATION_LINE_RE = re.compile(r'^ {1,4}.*')
# This date format doesn't include the timezone, since we just hard-code that
# to +0000
PARTIAL_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S'
if __name__ == '__main__':
main()
| 31.255102
| 118
| 0.640875
|
#!/usr/bin/env python3
import argparse
import datetime
import re
VERSION_HEADER_RE = re.compile(r'(?P<version>\d+\.\d+\.\d+) ?- ?(?P<date>\d{4}-\d{2}-\d{2})')
CHANGE_TYPE_RE = re.compile(r'^### ?(?P<change_type>.*)')
SKIP_LINE_RE = re.compile(r'^-{2,}|^$')
LIST_LINE_RE = re.compile(r'^[-*] ?(?P<line_item>.*)')
CONTINUATION_LINE_RE = re.compile(r'^ {1,4}.*')
# This date format doesn't include the timezone, since we just hard-code that
# to +0000
PARTIAL_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S'
def main():
args = get_args()
with open(args.changelog) as fh:
source_changelog = fh.read()
new_changelog = convert_changelog(source_changelog, args.project, args.author,
args.author_email)
print(new_changelog, end='')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('changelog')
parser.add_argument('project')
parser.add_argument('author')
parser.add_argument('author_email')
return parser.parse_args()
def convert_changelog(changelog, project_name, author, author_email):
def finish_changeset():
if not changeset_date:
return
date = changeset_date.strftime(PARTIAL_DATE_FORMAT) + ' +0000'
changesets.append('\n -- %s <%s> %s\n' % (author, author_email, date))
new_changelog = []
changesets = []
changes = []
current_change_type = None
has_found_first_change = False
changeset_date = None
for line in changelog.split('\n'):
version_header_match = VERSION_HEADER_RE.search(line)
if version_header_match:
# Start parsing a new section, finishing up the old one
if has_found_first_change:
finish_changeset()
has_found_first_change = True
current_change_type = None
version = version_header_match.group('version')
changeset_date_raw = version_header_match.group('date')
changeset_date = datetime.datetime.strptime(changeset_date_raw, '%Y-%m-%d') + datetime.timedelta(hours=12)
changesets.append('%s (%s) unstable; urgency=low\n' % (project_name, version))
continue
if not has_found_first_change:
continue
change_type_match = CHANGE_TYPE_RE.match(line)
if change_type_match:
current_change_type = change_type_match.group('change_type')
continue
line = line.rstrip()
skip_match = SKIP_LINE_RE.match(line)
if skip_match:
continue
line_item_match = LIST_LINE_RE.match(line)
if line_item_match:
line = line_item_match.group('line_item')
is_continuation = CONTINUATION_LINE_RE.match(line)
if current_change_type and not is_continuation:
changesets.append(' * %s: %s' % (current_change_type, line))
else:
changesets.append(' %s' % line)
finish_changeset()
new_changelog.append('\n'.join(changesets))
ret = ''.join(new_changelog)
return ret
if __name__ == '__main__':
main()
| 2,454
| 0
| 69
|
d3d378e95dac58f6824d81d41bd8200f128f3554
| 11,406
|
py
|
Python
|
src/app/views.py
|
pk400/pick-a-spot
|
7d2a5db2bcad91f86d3b40097494b74f4b45483f
|
[
"MIT"
] | 2
|
2016-02-02T06:41:34.000Z
|
2016-04-11T21:22:39.000Z
|
src/app/views.py
|
pk400/pick-a-spot
|
7d2a5db2bcad91f86d3b40097494b74f4b45483f
|
[
"MIT"
] | null | null | null |
src/app/views.py
|
pk400/pick-a-spot
|
7d2a5db2bcad91f86d3b40097494b74f4b45483f
|
[
"MIT"
] | 1
|
2016-02-24T01:16:05.000Z
|
2016-02-24T01:16:05.000Z
|
from django.shortcuts import render
from .models import UserProfile, Friend, RoomInstance
from django.contrib.auth.models import User, Group
from django.contrib.sessions.models import Session
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from registration.views import RegistrationView
from django.db.models import Q
from datetime import datetime,timedelta
from django.contrib.gis.geoip2 import GeoIP2
from geopy import geocoders
from registration.views import RegistrationView
from django.core.mail import send_mail
from lazysignup.utils import is_lazy_user
from lazysignup.decorators import allow_lazy_user
from django.template import RequestContext
from django.shortcuts import render_to_response
import json
import ast
import random
import string
mapapikey = ('<script src="https://maps.googleapis.com/maps/api/'
'js?key=AIzaSyAvDRB7PnQbIVNtRHf3x-MTB5y-3OXD1xg&libraries=places">async defer> </script>')
"""
HOME
"""
@login_required(login_url='/')
"""
SPLASH
"""
"""
MAP
"""
@allow_lazy_user
"""
Removes old entries
"""
"""
PREFERENCES
"""
@login_required
"""
FRIENDS
"""
@login_required
"""
CHAT
"""
"""
PROFILE
"""
| 30.994565
| 139
| 0.701385
|
from django.shortcuts import render
from .models import UserProfile, Friend, RoomInstance
from django.contrib.auth.models import User, Group
from django.contrib.sessions.models import Session
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from registration.views import RegistrationView
from django.db.models import Q
from datetime import datetime,timedelta
from django.contrib.gis.geoip2 import GeoIP2
from geopy import geocoders
from registration.views import RegistrationView
from django.core.mail import send_mail
from lazysignup.utils import is_lazy_user
from lazysignup.decorators import allow_lazy_user
from django.template import RequestContext
from django.shortcuts import render_to_response
import json
import ast
import random
import string
mapapikey = ('<script src="https://maps.googleapis.com/maps/api/'
'js?key=AIzaSyAvDRB7PnQbIVNtRHf3x-MTB5y-3OXD1xg&libraries=places">async defer> </script>')
def custom_404(request):
return render(request, '404.html')
def custom_500(request):
return render(request, '500.html')
"""
HOME
"""
@login_required(login_url='/')
def home(request):
context = {
'title': 'Home',
}
return render(request, 'home.html', context)
def rename_lazyaccount(request):
user = request.user
username = user.username
# Makes random username
if is_lazy_user(user) and len(username) >= 30:
user = User.objects.get(username = username)
user.username = "Guest - " + ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(7))
group = Group.objects.get(name="Guest")
group.user_set.add(user)
user.save()
request.user = user
"""
SPLASH
"""
def splash(request):
#rename_lazyaccount(request)
context = {
'title': 'Splash'
}
if request.user.is_authenticated():
return HttpResponseRedirect('/home')
else:
return render(request, 'splash.html', context)
"""
MAP
"""
@allow_lazy_user
def map(request):
rename_lazyaccount(request)
prefval = ""
friendlist = ""
if request.user.is_authenticated():
user = UserProfile.objects.filter(user=request.user)
prefval = user[0].preferences
user = User.objects.get(id=request.user.id)
friendlist = Friend.objects.filter(Q(user1=user) | Q(user2=user)).order_by()
# Get IP then lat/long
g = GeoIP2()
ip = request.META.get('HTTP_X_REAL_IP', None)
try:
lonlat = g.lon_lat(ip)
except Exception as err:
# Private network
lonlat = [-79.4163000, 43.7001100]
context = {
'title': 'Map',
'mapapi': mapapikey,
'preferences' : prefval,
"lon": lonlat[0],
"lat": lonlat[1],
"friends" : friendlist,
"listofusers" : []
}
check_expiry()
getrequest = request.GET.get('room','')
if getrequest:
ri = RoomInstance.objects.filter(roomname=getrequest)
if not ri:
return HttpResponseRedirect('/')
else:
ri = RoomInstance.objects.get(roomname=getrequest)
users = json.loads(ri.listofusers)
listofusers = []
for user in users:
listofusers.append(user['username'])
context['listofusers'] = listofusers
if request.method == 'POST':
result = json.loads(json.dumps(request.POST))
if result['type'] == "makeroom":
ri = RoomInstance(
roomname=result['roomname'],
listofpref=result['listofpref'],
owner=result['owner'],
expirydate=datetime.today() + timedelta(days=1))
ri.save()
elif result['type'] == "grabpref":
# Returns the list of preferences
ri = RoomInstance.objects.get(roomname=result['roomname'])
return HttpResponse(ri.listofpref)
elif result['type'] == "updatepref":
# Updates preferences
ri = RoomInstance.objects.filter(roomname=result['roomname'])
ri.update(listofpref=result['listofpref'])
elif result['type'] == "getlocation":
# Grabs lat/lng from address
value = ""
try:
geolocator = geocoders.GoogleV3('AIzaSyB_djldgwM0HGAg7opZpVx5StLQB1KDkQc')
location = geolocator.geocode(result["address"])
value = "[" + str(location.longitude) + ", " + str(location.latitude) + "]"
except Exception as err:
value = "Error"
return HttpResponse(value)
elif result['type'] == 'sendinvites' and request.user.is_authenticated():
"""
# Chosen users to send email to
chosenfriends = result['friends'].split()[0]
chosenfriends = ast.literal_eval(chosenfriends)
user = User.objects.get(id=request.user.id)
friendlist = Friend.objects.filter(Q(user1=user) | Q(user2=user)).order_by()
emaillist = []
for friend in friendlist:
# Grabs only one person
# If match exists and it's not the user sending the request
user = User.objects.get((Q(id=friend.user1_id) | Q(id=friend.user2_id)) & ~Q(id=request.user.id))
for chosen in chosenfriends:
# Since all friends are grabbed, only sends email to those picked
if chosen.lower() == user.username.lower() and user.email.lower() not in emaillist:
emaillist.append(user.email.lower())
#TODO: Make this better
send_mail('PickASpot Room', 'Hi, join my room!' + result['roomlink'], 'pickaspotmail@gmail.com', emaillist)
"""
ri = RoomInstance.objects.get(roomname=result['roomname'])
listofusers = json.loads(ri.listofusers)
listofpref = json.loads(ri.listofpref)
friends = json.loads(result['friends'])
for friend in friends:
newuserinroom = {"username": friend, "online" : "false"}
listofusers.append(newuserinroom)
userid = User.objects.get(username__iexact=friend)
user = UserProfile.objects.get(user=userid.id)
try:
newuserpref = {"user" : friend, "preferences" : json.loads(user.preferences)}
except Exception as err:
newuserpref = {"user" : friend, "preferences" : "[]"}
listofpref.append(newuserpref)
# Includes into notification
pendingnotifications = json.loads(user.pendingnotifications)
message = {
'message' : "You have been added into room " + str(result['roomname']) + " by your friend " + str(request.user),
"reason": "NewRoom",
"notificationid": ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(7))}
pendingnotifications.insert(0,message)
user.pendingnotifications = json.dumps(pendingnotifications)
user.save()
ri.listofusers = json.dumps(listofusers)
ri.listofpref = json.dumps(listofpref)
ri.save()
elif result['type'] == 'savechathistory':
# Saves all data to db
data = result['chathistory']
ri = RoomInstance.objects.filter(roomname=result['roomname'])
ri.update(chathistory=result['chathistory'])
elif result['type'] == 'grabchathistory':
# grabs and returns history
ri = RoomInstance.objects.get(roomname=result['roomname'])
return HttpResponse(ri.chathistory)
elif result['type'] == 'listofusers':
ri = RoomInstance.objects.get(roomname=result['roomname'])
return HttpResponse(ri.listofusers)
elif result['type'] == 'grabroomsettings':
ri = RoomInstance.objects.get(roomname=result['roomname'])
return HttpResponse(ri.roomsetting)
elif result['type'] == 'saveroomsettings':
onlineonly = result['onlineonly']
randomrslt = result['random']
setting = [onlineonly, randomrslt]
ri = RoomInstance.objects.filter(roomname=result['roomname'])
ri.update(roomsetting=json.dumps(setting))
elif result['type'] == 'savelastresult':
lastresults = result['results']
ri = RoomInstance.objects.filter(roomname=result['roomname'])
ri.update(lastresult=lastresults)
elif result['type'] == 'getlastresult':
ri = RoomInstance.objects.get(roomname=result['roomname'])
return HttpResponse(ri.lastresult)
elif result['type'] == 'updateuserpref':
user = UserProfile.objects.filter(user=request.user)
update = user.update(preferences=result['preferences'])
return render(request, 'map.html', context)
"""
Removes old entries
"""
def check_expiry():
ri = RoomInstance.objects.filter(expirydate__lt = datetime.now() )
ri.update(isexpired = True)
"""
PREFERENCES
"""
@login_required
def preferences(request):
user = UserProfile.objects.filter(user=request.user)
if request.method == 'POST':
# UPDATE users in database
jsonsave = json.dumps(request.POST).replace("\\", "")
jsonsave = jsonsave.replace(' "{','{')
jsonsave = jsonsave.replace('"}','}')
update = user.update(preferences=jsonsave)
context = {
'title': 'Preferences',
'postjson': 'null',
}
else:
jsonval = user[0].preferences
context = {
'title': 'Preferences',
'postjson': jsonval,
}
return render(request, 'preferences.html', context)
"""
FRIENDS
"""
@login_required
def friends(request):
user = User.objects.get(id=request.user.id)
friendlist = Friend.objects.filter(Q(user1=user) | Q(user2=user)).order_by()
context = {
'title': 'Friends',
'friends': friendlist
}
if request.method == 'POST':
result = json.loads(json.dumps(request.POST))
if result['type'] == "sendinvite":
usernametext = result['newfriend']
originaluser = request.user.username
# Grabs needed user
try:
senttousername = User.objects.get(username__iexact=usernametext)
senttouser = UserProfile.objects.get(user=senttousername.id)
pendingfriendslist = json.loads(senttouser.pendingfriends)
except Exception as err:
senttouser = None
if not senttouser:
servermessage = {
"message" : "The user " + str(usernametext) + " does not exist.",
"reason": "Wrong"}
elif senttousername == request.user:
servermessage = {
"message" : "You can not be friends with yourself. Sorry!",
"reason": "Self"}
elif Friend.objects.filter(user1=senttousername, user2=request.user) or Friend.objects.filter(user1=request.user, user2=senttousername):
# check if already friends
servermessage = {
"message" : "Already a friend of " + str(senttousername) +"!",
"reason": "Exists"}
else:
# checks if in list already
inlist = False
for friend in pendingfriendslist:
if friend['username'].lower() == originaluser.lower():
inlist = True
break
if inlist:
servermessage = {
"message" : "Invite already sent to " + str(senttousername) +"!",
"reason": "Already"}
else:
# Grabs the json, appens to it
pendingfriend = {}
pendingfriend["username"] = originaluser
pendingfriendslist.append(pendingfriend)
newfriend = UserProfile.objects.filter(user=senttousername.id)
# Updates the list with the new user object
newfriend.update(pendingfriends=json.dumps(pendingfriendslist))
servermessage = {
"message" : "You have sent a invite to " + str(senttousername),
"reason": "Added"}
return HttpResponse(json.dumps(servermessage))
elif result['type'] == "deletefriend":
friend_user = User.objects.get(username=result['delfriend'])
try:
Friend.objects.get(user1=user, user2=friend_user).delete()
except Exception as err:
try:
Friend.objects.get(user1=friend_user, user2=user).delete()
except Exception as err:
pass
return render(request, 'friends.html', context)
"""
CHAT
"""
def chat2(request):
context = {
'title': 'Chat2',
}
return render(request, 'chat2.html', context)
"""
PROFILE
"""
def profile(request):
context = {
'title': 'Profile',
}
return render(request, 'profile.html', context)
def resetpassword(request):
return render(request, 'resetpassword.html', {})
def confirmreset(request):
return render(request, 'confirmreset.html', {})
| 9,899
| 0
| 291
|
3f7eb8fc684d5efb045fee5916f9de74f361d25e
| 366
|
py
|
Python
|
src/middleware.py
|
cibinsb/EMBL_task
|
6d9d42f7fdf6a4cacb6393f1ac793ebc4f3279ae
|
[
"Apache-2.0"
] | null | null | null |
src/middleware.py
|
cibinsb/EMBL_task
|
6d9d42f7fdf6a4cacb6393f1ac793ebc4f3279ae
|
[
"Apache-2.0"
] | null | null | null |
src/middleware.py
|
cibinsb/EMBL_task
|
6d9d42f7fdf6a4cacb6393f1ac793ebc4f3279ae
|
[
"Apache-2.0"
] | null | null | null |
import falcon
import json
from helper.log import logger
from helper.utils import Constants
| 28.153846
| 87
| 0.663934
|
import falcon
import json
from helper.log import logger
from helper.utils import Constants
class HttpMethodValidator(object):
def process_request(self, req, resp):
if req.method not in ["GET"]:
logger.info("")
resp.status = falcon.HTTP_405
resp.body = json.dumps({Constants.error.value: "API supports only [GET,]"})
| 212
| 13
| 49
|
cb23e1f88750c0a157d04ee4d7eb7300fc61bf1c
| 201
|
py
|
Python
|
carat/audio.py
|
giovana-morais/carat
|
982af4dde63e8560ccc9120f3767f1420a545b67
|
[
"MIT"
] | 11
|
2019-08-08T13:54:59.000Z
|
2021-12-09T19:15:12.000Z
|
carat/audio.py
|
giovana-morais/carat
|
982af4dde63e8560ccc9120f3767f1420a545b67
|
[
"MIT"
] | 42
|
2019-07-01T19:17:24.000Z
|
2022-03-15T19:46:09.000Z
|
carat/audio.py
|
giovana-morais/carat
|
982af4dde63e8560ccc9120f3767f1420a545b67
|
[
"MIT"
] | 3
|
2021-02-10T13:21:59.000Z
|
2022-02-17T11:47:06.000Z
|
# encoding: utf-8
# pylint: disable=C0103
"""Utility functions to deal with audio."""
import librosa
__all__ = ['load']
# simply use librosa.load (this may change in the future)
load = librosa.load
| 18.272727
| 57
| 0.716418
|
# encoding: utf-8
# pylint: disable=C0103
"""Utility functions to deal with audio."""
import librosa
__all__ = ['load']
# simply use librosa.load (this may change in the future)
load = librosa.load
| 0
| 0
| 0
|
494a1b25d074708072f6aa1625114ff6dece6b72
| 493
|
py
|
Python
|
flights-tracker/flights_tracker/tracker.py
|
rafalstepien/flights-tracker
|
9ba1339a6c3e50a4671f41de8714fe854d044e9d
|
[
"MIT"
] | 1
|
2022-01-26T04:59:36.000Z
|
2022-01-26T04:59:36.000Z
|
flights-tracker/flights_tracker/tracker.py
|
rafalstepien/flights-tracker
|
9ba1339a6c3e50a4671f41de8714fe854d044e9d
|
[
"MIT"
] | null | null | null |
flights-tracker/flights_tracker/tracker.py
|
rafalstepien/flights-tracker
|
9ba1339a6c3e50a4671f41de8714fe854d044e9d
|
[
"MIT"
] | null | null | null |
import uvicorn
from fastapi import FastAPI
from email_sender.service.email_sender import EmailSender
from flights_tracker.routes import router
from flights_tracker.services.weekend_flights_service import WeekendFlightsService
app = FlightsTracker()
app.include_router(router)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000, log_level="info")
| 25.947368
| 82
| 0.803245
|
import uvicorn
from fastapi import FastAPI
from email_sender.service.email_sender import EmailSender
from flights_tracker.routes import router
from flights_tracker.services.weekend_flights_service import WeekendFlightsService
class FlightsTracker(FastAPI):
weekend_flights_service = WeekendFlightsService()
email_sender = EmailSender()
app = FlightsTracker()
app.include_router(router)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000, log_level="info")
| 0
| 96
| 23
|
e5181cd8bd4999ebfed2a270ac6316f18fff22b2
| 1,051
|
py
|
Python
|
src/denzel/app/tasks.py
|
eliorc/denzel
|
429a7c277b1a4ee515e3a112a8734b58fc89f53a
|
[
"Apache-2.0"
] | 17
|
2018-10-02T07:09:45.000Z
|
2021-11-30T11:02:28.000Z
|
src/denzel/app/tasks.py
|
eliorc/denzel
|
429a7c277b1a4ee515e3a112a8734b58fc89f53a
|
[
"Apache-2.0"
] | 4
|
2018-10-22T10:18:17.000Z
|
2020-09-02T14:31:31.000Z
|
src/denzel/app/tasks.py
|
eliorc/denzel
|
429a7c277b1a4ee515e3a112a8734b58fc89f53a
|
[
"Apache-2.0"
] | 2
|
2018-10-28T10:17:57.000Z
|
2020-02-05T22:12:04.000Z
|
import os
import celery
import requests
from app.logic.pipeline import process, load_model, predict
CELERY_BROKER = os.environ.get('CELERY_BROKER')
CELERY_BACKEND = os.environ.get('CELERY_BACKEND')
app = celery.Celery('tasks', broker=CELERY_BROKER, backend=CELERY_BACKEND)
@app.task(base=Model)
Model = app.register_task(Model())
| 22.847826
| 103
| 0.681256
|
import os
import celery
import requests
from app.logic.pipeline import process, load_model, predict
CELERY_BROKER = os.environ.get('CELERY_BROKER')
CELERY_BACKEND = os.environ.get('CELERY_BACKEND')
app = celery.Celery('tasks', broker=CELERY_BROKER, backend=CELERY_BACKEND)
class Model(celery.Task):
def __init__(self):
with open('.worker_loading', 'a'):
pass
self._model = load_model()
if os.path.exists('.worker_loading'): # Needed because concurrent workers will retry to remove
os.remove('.worker_loading')
@property
def model(self):
return self._model
@app.task(base=Model)
def invoke_predict(json_data, sync=False):
# Preprocess data
data = process(invoke_predict.model, json_data)
# Preform predictions
result = predict(invoke_predict.model, data)
# Send prediction to callback_uri
if not sync:
requests.post(url=json_data['callback_uri'],
json=result)
return result
Model = app.register_task(Model())
| 596
| 72
| 45
|
0e0050f065ebfc4a804ecbc5ca6fa59531b95360
| 2,791
|
py
|
Python
|
stationaryschrodinger/tfAPI.py
|
cteerara/StationarySchrodinger
|
247a6fefdb7d2f5b09444d59a1862c4053db9ad6
|
[
"MIT"
] | null | null | null |
stationaryschrodinger/tfAPI.py
|
cteerara/StationarySchrodinger
|
247a6fefdb7d2f5b09444d59a1862c4053db9ad6
|
[
"MIT"
] | null | null | null |
stationaryschrodinger/tfAPI.py
|
cteerara/StationarySchrodinger
|
247a6fefdb7d2f5b09444d59a1862c4053db9ad6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
tf.enable_eager_execution();
| 37.213333
| 123
| 0.620208
|
import tensorflow as tf
tf.enable_eager_execution();
def compare(t1,t2,tol):
# Compare two 2D tensorflow arrays
# INPUT: tensorflow arrays t1,t2
# tolerence tol
# OUTPUT: true if (t1-t2) < tol, else false
t1size = t1.get_shape();
t2size = t2.get_shape();
if (len(t1size) != len(t2size)):
print('Array dimension mismatch')
return False;
if (t1.dtype != t2.dtype):
print('Datatype mismatch')
return False
isequal = True;
for i in range(0,len(t1size)):
isequal = isequal and t1size[i] == t2size[i]
if (not isequal):
print('Length in each dimension mismatch')
return False;
else:
tol = tf.constant(tol,shape=t1size,dtype=tf.float32)
EqualTensor = tf.math.less( tf.cast(tf.math.abs(t1-t2),tf.float32), tol)
isequal = tf.math.reduce_all(EqualTensor)
if (isequal):
return True;
else:
return False;
def tfdim(t):
# INPUT: tensorflow array t
# OUTPUT: n where n is the number of dimensions of t
# e.g., [1,1,1] has n==1 (1D array) and [[1,1],[2,2]] has n==2 (2D array)
return tf.reshape(tf.shape(tf.shape(t)),[])
def tflen(t):
# INPUT: 1D tensorflow array t
# OUTPUT: n where n is the length of t
# e.g., [1,1,1] has n==3
tol = 1e-6
scalar1 = tf.constant(1,dtype=tf.int32)
tdim = tfdim(t)
if not compare(scalar1,tf.cast(tdim,scalar1.dtype),tol):
# I am not including this in the coverage because it is meant to throw a n error
raise ValueError('input array is not 1D') # pragma: no cover
return tf.reshape(tf.shape(t),[])
def integrate(t1,t2,x):
# INPUT: 1D tensorflow arrays of the same length t1,t2
# t1 and t2 are vector representing a function defined on the same domain at the same evenly spaced grid points
# dx is the spacings between gridpoints
# OUTPUT: tout where tout = \Sum_i(t1[i]*t2[i])
# tout is the result of a numerical integration of t1*t2
#> Handle inappopriate size/dim array
dx = x[1]-x[0]
for i in range(0,tflen(x)-1):
if (dx-(x[i+1]-x[i])) > 1e-4: # Tolerence for difference in dx.
# I am not including this in the coverage because it is meant to throw a n error
raise ValueError('Gridpoint are not evenly spaced. The difference of the spacings exceeds 1e-6') # pragma: no cover
scalar1 = tf.constant(1)
tol = 1e-6
if not compare(tflen(t1),tflen(t2),tol):
# I am not including this in the coverage because it is meant to throw a n error
raise ValueError('Input arrays are not the same shape') # pragma: no cover
n = tflen(t1)
tout = tf.reshape(tf.reduce_sum(t1[0:n-1]*t2[0:n-1]),[])
return tout*dx
| 2,636
| 0
| 100
|
2ccb1bca281aebfdaa7f2da47ea35be6e589abad
| 267
|
py
|
Python
|
crudapp/crudapp/views.py
|
nitinprakash96/zeolearn-tut
|
a6202115d9a32225b9e76f290d9b6f8430725375
|
[
"Apache-2.0"
] | null | null | null |
crudapp/crudapp/views.py
|
nitinprakash96/zeolearn-tut
|
a6202115d9a32225b9e76f290d9b6f8430725375
|
[
"Apache-2.0"
] | null | null | null |
crudapp/crudapp/views.py
|
nitinprakash96/zeolearn-tut
|
a6202115d9a32225b9e76f290d9b6f8430725375
|
[
"Apache-2.0"
] | 1
|
2019-11-01T03:03:32.000Z
|
2019-11-01T03:03:32.000Z
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
| 24.272727
| 64
| 0.696629
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
def home(request):
html = """
<h1>Django CRUD Example</h1>
<a href="/blog_posts/">Blog post CRUD example</a><br>
"""
return HttpResponse(html)
| 141
| 0
| 23
|
4f8d038bdaafd675342a535381e26b7f9aaa0b8d
| 3,254
|
py
|
Python
|
main.py
|
jondye/rfid-squeezebox-py
|
ad5bf9e5c96cac188e455a7aa2fed536cfb582b8
|
[
"MIT"
] | null | null | null |
main.py
|
jondye/rfid-squeezebox-py
|
ad5bf9e5c96cac188e455a7aa2fed536cfb582b8
|
[
"MIT"
] | null | null | null |
main.py
|
jondye/rfid-squeezebox-py
|
ad5bf9e5c96cac188e455a7aa2fed536cfb582b8
|
[
"MIT"
] | null | null | null |
import db
import mfrc522
import network
import speaker
import squeezebox
import squeezebox
import time
import ujson
from machine import Pin, PWM
if __name__ == '__main__':
main()
| 28.79646
| 76
| 0.556853
|
import db
import mfrc522
import network
import speaker
import squeezebox
import squeezebox
import time
import ujson
from machine import Pin, PWM
def read(reader):
while True:
stat, tag_type = reader.request(reader.REQIDL)
if stat == reader.OK:
stat, raw_uid = reader.anticoll()
if stat == reader.OK:
return '%02x%02x%02x%02x' % (
raw_uid[0], raw_uid[1], raw_uid[2], raw_uid[3])
time.sleep(1)
def make_reader():
return mfrc522.MFRC522(14, 13, 12, 2, 15)
def load_config():
with open('config.json', 'r') as f:
return ujson.load(f)
def connect(client, ssid, password, sounder):
if not client.active():
print("Activating WIFI Station mode")
client.active(True)
if not client.isconnected():
print("Connecting to network")
client.connect(ssid, password)
while not client.isconnected():
pass
print("network config:", client.ifconfig())
speaker.ack_sound(sounder)
time.sleep(0.2)
speaker.ack_sound(sounder)
time.sleep(0.2)
speaker.ack_sound(sounder)
return client
def program_tracks(reader, sounder, config):
print("Programming tracks")
tracks = squeezebox.read_current_playlist(
config['host'], config['port'], config['player_id'])
print("tracks: %s" % tracks)
speaker.ack_sound(sounder)
card_id = None
while not card_id or card_id == config['master_card']:
card_id = read(reader)
db.save(card_id, {'tracks': tracks})
print("save to card %s" % card_id)
speaker.success_sound(sounder)
time.sleep(5)
def main():
config = load_config()
reader = make_reader()
sounder = PWM(Pin(4))
network.WLAN(network.AP_IF).active(False) # disable access point
client = network.WLAN(network.STA_IF)
connect(client, config['ssid'], config['password'], sounder)
while True:
try:
if not client.isconnected():
connect(client, config['ssid'], config['password'], sounder)
card_id = read(reader)
if card_id == config['master_card']:
print("master card detected")
program_tracks(reader, sounder, config)
elif card_id == config['pause_card']:
print("pausing")
squeezebox.pause(
config['host'],
config['port'],
config['player_id'])
speaker.success_sound(sounder)
else:
print("card %s read" % card_id)
data = db.load(card_id)
if data and 'tracks' in data:
print("playing %s" % data['tracks'])
squeezebox.play(
config['host'],
config['port'],
config['player_id'],
data['tracks'])
speaker.success_sound(sounder)
else:
speaker.fail_sound(sounder)
except Exception as e:
speaker.fail_sound(sounder)
print("EXCEPTION: %s" % e)
time.sleep(5)
if __name__ == '__main__':
main()
| 2,926
| 0
| 138
|
0f1b8fca9391ed6855a3fd18e5f8db0ff1d75274
| 204
|
py
|
Python
|
openregistry/lots/core/tests/fixtures.py
|
EBRD-ProzorroSale/openregistry.lots.core
|
dd54376ac15858ea6d44cf792d1d87328495b9b8
|
[
"Apache-2.0"
] | null | null | null |
openregistry/lots/core/tests/fixtures.py
|
EBRD-ProzorroSale/openregistry.lots.core
|
dd54376ac15858ea6d44cf792d1d87328495b9b8
|
[
"Apache-2.0"
] | 48
|
2017-08-19T13:22:59.000Z
|
2019-03-12T07:52:37.000Z
|
openregistry/lots/core/tests/fixtures.py
|
EBRD-ProzorroSale/openregistry.lots.core
|
dd54376ac15858ea6d44cf792d1d87328495b9b8
|
[
"Apache-2.0"
] | 9
|
2017-08-03T09:29:57.000Z
|
2018-09-14T10:59:32.000Z
|
PARTIAL_MOCK_CONFIG = {
"api":{
"plugins":{
"lots.core": None,
"transferring":{
"plugins":{
"lots.transferring":None
}
},
}
}
}
| 15.692308
| 36
| 0.392157
|
PARTIAL_MOCK_CONFIG = {
"api":{
"plugins":{
"lots.core": None,
"transferring":{
"plugins":{
"lots.transferring":None
}
},
}
}
}
| 0
| 0
| 0
|
dae2afc7991f13605a9cad9f36b257c5723f013a
| 622
|
py
|
Python
|
docs/chap6/code/floatrange.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
docs/chap6/code/floatrange.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
docs/chap6/code/floatrange.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
#coding:utf-8
'''
filename:floatrange.py
generate a sequence of parmeters with floating-point numbers.
'''
import itertools
f = frange(1.2,9)
print(list(f))
f = frange(1.2)
print(list(f))
f = frange(1.2,9,-1)
print(list(f))
f = frange(1.2,None,-1)
print(list(f))
f = frange(1.2,None,0)
print(list(f))
| 12.44
| 71
| 0.573955
|
#coding:utf-8
'''
filename:floatrange.py
generate a sequence of parmeters with floating-point numbers.
'''
import itertools
def frange(start,end=None,step=1.0):
if end is None:
end = float(start)
start = 0.0
assert step
for i in itertools.count():
next = start+ i*step
if (step > 0.0 and next >= end)or (step < 0.0 and next <= end):
break
yield next
f = frange(1.2,9)
print(list(f))
f = frange(1.2)
print(list(f))
f = frange(1.2,9,-1)
print(list(f))
f = frange(1.2,None,-1)
print(list(f))
f = frange(1.2,None,0)
print(list(f))
| 268
| 0
| 23
|
9ba96374077cc6b6b051eb3cef2d42ee720e73a1
| 2,926
|
py
|
Python
|
cmake/spirv_num_to_cpp.py
|
orrkarl/rasterizer
|
8ff356a6344336d8cd2250cbf488b0d853b26cda
|
[
"MIT"
] | 2
|
2019-08-16T09:12:30.000Z
|
2019-08-18T20:57:33.000Z
|
cmake/spirv_num_to_cpp.py
|
orrkarl/rasterizer
|
8ff356a6344336d8cd2250cbf488b0d853b26cda
|
[
"MIT"
] | 12
|
2019-09-13T17:26:09.000Z
|
2020-02-13T12:34:40.000Z
|
cmake/spirv_num_to_cpp.py
|
orrkarl/vkr
|
8ff356a6344336d8cd2250cbf488b0d853b26cda
|
[
"MIT"
] | null | null | null |
import argparse
import os
import re
import sys
SOURCE_FMT = """// AUTOGENERATED
// Generated by spirv_num_to_cpp.py, from '{file_name}'
#include <stddef.h>
#include <stdint.h>
namespace {namespace} {{
const volatile uint32_t {var_name}[] = {{
{raw_lines}
}};
const volatile size_t {var_name}_count = {total_data_count};
}} // {namespace}
// AUTOGENERATED
"""
HEADER_VARS_FMT = "extern const uint32_t {var_name}[];\nextern const size_t {var_name}_count;"
HEADER_FMT = """// AUTOGENERATED
// Generated by spirv_num_to_cpp.py
#pragma once
#include <stddef.h>
#include <stdint.h>
namespace {namespace} {{
{var_pairs}
}} // {namespace}
// AUTOGENERATED
"""
if __name__ == '__main__':
sys.exit(main())
| 27.603774
| 123
| 0.697539
|
import argparse
import os
import re
import sys
SOURCE_FMT = """// AUTOGENERATED
// Generated by spirv_num_to_cpp.py, from '{file_name}'
#include <stddef.h>
#include <stdint.h>
namespace {namespace} {{
const volatile uint32_t {var_name}[] = {{
{raw_lines}
}};
const volatile size_t {var_name}_count = {total_data_count};
}} // {namespace}
// AUTOGENERATED
"""
HEADER_VARS_FMT = "extern const uint32_t {var_name}[];\nextern const size_t {var_name}_count;"
HEADER_FMT = """// AUTOGENERATED
// Generated by spirv_num_to_cpp.py
#pragma once
#include <stddef.h>
#include <stdint.h>
namespace {namespace} {{
{var_pairs}
}} // {namespace}
// AUTOGENERATED
"""
def readfile(path):
with open(path) as f:
return f.read()
def make_var_name(path):
return os.path.basename(path).replace('.', '_')
def make_var_pairs(file_paths):
pairs = (HEADER_VARS_FMT.format(var_name=make_var_name(path)) for path in file_paths)
return '\n\n'.join(pairs)
def handle_source_command(args):
file_data = readfile(args.src_path)
file_name = os.path.basename(args.src_path)
total_data_count = len(file_data.split(','))
with open(os.path.join(args.dest_path, file_name + '.cpp'), 'w') as f:
f.write(SOURCE_FMT.format(file_name=file_name, namespace=args.namespace, var_name=make_var_name(args.src_path),
raw_lines=file_data, total_data_count=total_data_count))
def handle_header_command(args):
with open(args.dest_path, 'w') as f:
f.write(HEADER_FMT.format(namespace=args.namespace, var_pairs=make_var_pairs(args.spirv_files)))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
source_parser = subparsers.add_parser('source', help='generating spirv-in-cpp source files')
source_parser.add_argument('-s', '--src-path', help='source file to stringify', required=True)
source_parser.add_argument('-d', '--dest-path', help='generated files dir', required=True)
source_parser.add_argument('-ns', '--namespace', help='namespace of the generated arguments', required=True)
header_parser = subparsers.add_parser('header', help='generating spirv-in-cpp header file')
header_parser.add_argument('-s', '--spirv-files', help='list of spirv files embedded in cpp', required=True, nargs='+')
header_parser.add_argument('-d', '--dest-path', help='full file path for the generated header', required=True)
header_parser.add_argument('-ns', '--namespace', help='namespace of the generated arguments', required=True)
args = parser.parse_args()
if args.command == 'source':
handle_source_command(args)
elif args.command == 'header':
handle_header_command(args)
else:
print('Invalid command \'{command}\''.format(args.command), file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 2,063
| 0
| 138
|
7db64e2d008d8a948c335e4ef44b123ff8349784
| 1,228
|
py
|
Python
|
transfer-system/transfer_system/server/app.py
|
beasley-weather/beasley-weather-station
|
5834429506c9cff82833a70d017c2ce51c8ded54
|
[
"MIT"
] | null | null | null |
transfer-system/transfer_system/server/app.py
|
beasley-weather/beasley-weather-station
|
5834429506c9cff82833a70d017c2ce51c8ded54
|
[
"MIT"
] | null | null | null |
transfer-system/transfer_system/server/app.py
|
beasley-weather/beasley-weather-station
|
5834429506c9cff82833a70d017c2ce51c8ded54
|
[
"MIT"
] | null | null | null |
import json
from json import JSONDecodeError
import subprocess as sp
from traceback import print_exc
from ..dbi import WeewxDB
from flask import Flask, request
def create_app(database):
"""
:param database: Name of database to save into
"""
app = Flask(__name__)
weewx_db = WeewxDB(database)
@app.route('/', methods=['POST'])
@app.route('/data', methods=['POST'])
@app.route('/')
return app
| 23.169811
| 81
| 0.610749
|
import json
from json import JSONDecodeError
import subprocess as sp
from traceback import print_exc
from ..dbi import WeewxDB
from flask import Flask, request
def create_app(database):
"""
:param database: Name of database to save into
"""
app = Flask(__name__)
weewx_db = WeewxDB(database)
@app.route('/', methods=['POST'])
def route_data():
try:
data = json.loads(request.data.decode('utf-8'))
weewx_db.archive_insert_data(data)
except JSONDecodeError:
print_exc()
return 422, 'Invalid JSON'
except IOError:
print_exc()
return 500, 'Unable to save data'
rebuild_weewx_reports()
return ''
@app.route('/data', methods=['POST'])
def route_data_deprecated():
return 'This route, /data, is deprecated. Use / instead\n' + route_data()
@app.route('/')
def route_index():
return 'yooo, what what!'
return app
def rebuild_weewx_reports():
proc = sp.run(['wee_reports'])
if proc.returncode == 0:
print('Successfully generated reports')
return True
else:
print('Failed to generate reports')
return False
| 691
| 0
| 101
|
38d650ecab150ae7367ae4f5f43a45e22bee985c
| 1,617
|
py
|
Python
|
dglib/generalization/irm.py
|
billzhonggz/Transfer-Learning-Library
|
d7a6e4298e571d5101e05515a2ab1f171160ef89
|
[
"MIT"
] | 1,474
|
2020-07-24T02:55:55.000Z
|
2022-03-31T12:35:56.000Z
|
dglib/generalization/irm.py
|
mxliu/Transfer-Learning-Library
|
7b0ccb3a8087ecc65daf4b1e815e5a3f42106641
|
[
"MIT"
] | 70
|
2020-08-05T10:47:33.000Z
|
2022-03-31T03:48:54.000Z
|
dglib/generalization/irm.py
|
mxliu/Transfer-Learning-Library
|
7b0ccb3a8087ecc65daf4b1e815e5a3f42106641
|
[
"MIT"
] | 312
|
2020-08-01T11:08:39.000Z
|
2022-03-30T06:03:47.000Z
|
"""
Modified from https://github.com/facebookresearch/DomainBed
@author: Baixu Chen
@contact: cbx_99_hasta@outlook.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
class InvariancePenaltyLoss(nn.Module):
r"""Invariance Penalty Loss from `Invariant Risk Minimization <https://arxiv.org/pdf/1907.02893.pdf>`_.
We adopt implementation from `DomainBed <https://github.com/facebookresearch/DomainBed>`_. Given classifier
output :math:`y` and ground truth :math:`labels`, we split :math:`y` into two parts :math:`y_1, y_2`, corresponding
labels are :math:`labels_1, labels_2`. Next we calculate cross entropy loss with respect to a dummy classifier
:math:`w`, resulting in :math:`grad_1, grad_2` . Invariance penalty is then :math:`grad_1*grad_2`.
Inputs:
- y: predictions from model
- labels: ground truth
Shape:
- y: :math:`(N, C)` where C means the number of classes.
- labels: :math:`(N, )` where N mean mini-batch size
"""
| 41.461538
| 119
| 0.681509
|
"""
Modified from https://github.com/facebookresearch/DomainBed
@author: Baixu Chen
@contact: cbx_99_hasta@outlook.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
class InvariancePenaltyLoss(nn.Module):
r"""Invariance Penalty Loss from `Invariant Risk Minimization <https://arxiv.org/pdf/1907.02893.pdf>`_.
We adopt implementation from `DomainBed <https://github.com/facebookresearch/DomainBed>`_. Given classifier
output :math:`y` and ground truth :math:`labels`, we split :math:`y` into two parts :math:`y_1, y_2`, corresponding
labels are :math:`labels_1, labels_2`. Next we calculate cross entropy loss with respect to a dummy classifier
:math:`w`, resulting in :math:`grad_1, grad_2` . Invariance penalty is then :math:`grad_1*grad_2`.
Inputs:
- y: predictions from model
- labels: ground truth
Shape:
- y: :math:`(N, C)` where C means the number of classes.
- labels: :math:`(N, )` where N mean mini-batch size
"""
def __init__(self):
super(InvariancePenaltyLoss, self).__init__()
self.scale = torch.tensor(1.).requires_grad_()
def forward(self, y: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
loss_1 = F.cross_entropy(y[::2] * self.scale, labels[::2])
loss_2 = F.cross_entropy(y[1::2] * self.scale, labels[1::2])
grad_1 = autograd.grad(loss_1, [self.scale], create_graph=True)[0]
grad_2 = autograd.grad(loss_2, [self.scale], create_graph=True)[0]
penalty = torch.sum(grad_1 * grad_2)
return penalty
| 513
| 0
| 54
|
5575f2112eec09893ca26f17e183ed4317d31a74
| 1,521
|
py
|
Python
|
test/test_multipush.py
|
brentschroeter/parallel
|
8465e2fab51596e0cfee5d0c216d85914b0bcd00
|
[
"MIT"
] | 1
|
2021-06-22T07:38:13.000Z
|
2021-06-22T07:38:13.000Z
|
test/test_multipush.py
|
brentschroeter/parallel
|
8465e2fab51596e0cfee5d0c216d85914b0bcd00
|
[
"MIT"
] | 1
|
2018-02-20T03:54:33.000Z
|
2018-02-20T05:33:14.000Z
|
test/test_multipush.py
|
brentschroeter/parallel
|
8465e2fab51596e0cfee5d0c216d85914b0bcd00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import parallel
import unittest
import thread
import testing_lib
import time
import uuid
import config
from multiprocessing import RawValue
if __name__ == '__main__':
unittest.main()
| 33.8
| 207
| 0.737015
|
#!/usr/bin/env python
import parallel
import unittest
import thread
import testing_lib
import time
import uuid
import config
from multiprocessing import RawValue
def wait_job(ms):
time.sleep(ms * 0.001)
return 1
def get_timeout(num_workers):
transportation_time = testing_lib.TRANSPORT_MS * config.NUM_JOBS * config.NUM_PUSHERS + 1000
working_time = config.WAIT_TIME * config.NUM_JOBS * config.NUM_PUSHERS
return working_time + transportation_time
def send_jobs(run_job, args):
for i in range(config.NUM_JOBS):
run_job(wait_job, (config.WAIT_TIME))
def on_recv_result(result, job_info, args):
total_completed, = args
total_completed.value += 1
class TestParallel(unittest.TestCase):
def test_multipush(self):
'''Tests behavior when multiple servers are pushing jobs simultaneously.'''
total_completed = RawValue('i')
total_completed.value = 0
start_workers, kill_workers = testing_lib.construct_worker_pool(config.num_local_workers(), config.WORKER_ADDRESSES, send_jobs, (), on_recv_result, (total_completed,), num_pushers=config.NUM_PUSHERS)
start_workers()
completion = testing_lib.check_for_completion(total_completed, config.NUM_JOBS * config.NUM_PUSHERS, get_timeout(len(config.WORKER_ADDRESSES)))
kill_workers()
if not completion:
self.fail('Not all jobs received: %d / %d' % (total_completed.value, config.NUM_JOBS * config.NUM_PUSHERS))
if __name__ == '__main__':
unittest.main()
| 435
| 760
| 115
|
34090411f9334b063de9fa2e34c7176d14b5fdaf
| 164
|
py
|
Python
|
intro/part03-02_countdown/src/countdown.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
intro/part03-02_countdown/src/countdown.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
intro/part03-02_countdown/src/countdown.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
# Fix the program
print("Are you ready?")
number = int(input("Please type in a number: "))
while number != 0:
print(number)
number = number -1
print("Now!")
| 23.428571
| 48
| 0.646341
|
# Fix the program
print("Are you ready?")
number = int(input("Please type in a number: "))
while number != 0:
print(number)
number = number -1
print("Now!")
| 0
| 0
| 0
|
bf8eddc2251b4fdf476a1eb2917f0565b810547c
| 1,708
|
py
|
Python
|
11_data_science/pdf/test_pymupdf.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | 1
|
2022-03-06T13:03:56.000Z
|
2022-03-06T13:03:56.000Z
|
11_data_science/pdf/test_pymupdf.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | null | null | null |
11_data_science/pdf/test_pymupdf.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import os
import fitz # fitz就是pip install PyMuPDF -i https://mirrors.aliyun.com/pypi/simple (PyMuPDF-1.18.17)
if __name__ == "__main__":
path = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\上市公司财报.pdf'
# path = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\热力公司2016年审计报告.pdf'
pdf_to_img(path)
# list_img = ['热力公司2016年审计报告3.jpg', 'Inked热力公司2016年审计报告-4.jpg', '热力公司2016年审计报告6.jpg', '热力公司2016年审计报告9.jpg',
# '热力公司2016年审计报告4.jpg', ]
# list_img = ['热力公司2016年审计报告3.jpg', 'Inked热力公司2016年审计报告6.jpg', '热力公司2016年审计报告9.jpg',]
#
# path2 = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\热力公司2016年审计报告222.pdf'
# list_img2 = [f'D:\聚均科技-研发\\2021 聚均科技AI平台\\OCR\\{item}' for item in list_img]
# img_to_pdf(list_img2, path2)
| 35.583333
| 111
| 0.622365
|
import datetime
import os
import fitz # fitz就是pip install PyMuPDF -i https://mirrors.aliyun.com/pypi/simple (PyMuPDF-1.18.17)
def pdf_to_img(path_file):
with fitz.open(path_file) as pdf:
print(pdf.metadata)
for i in range(pdf.pageCount):
# if i < 118 or i > 120:
# continue
path = path_file.replace('.pdf', f'{i}.jpg')
page = pdf[i]
pix = page.get_pixmap(matrix=fitz.Matrix(1.5, 1.5).preRotate(0))
pix.writePNG(path)
# pix.save(path)
# mat = fitz.Matrix(1, 1).preRotate(0)
# pix = page.getPixmap(matrix=mat, alpha=False)
# pix.writePNG(path)
def img_to_pdf(list_path_img, path_pdf):
doc = fitz.open()
# 循环path中的文件,可import os 然后用 for img in os.listdir(img_path)实现
# 这里为了让文件以1,2,3的形式进行拼接,就偷懒循环文件名中的数字。
for img_file in list_path_img:
imgdoc = fitz.open(img_file)
pdfbytes = imgdoc.convertToPDF() # 使用图片创建单页的 PDF
imgpdf = fitz.open("pdf", pdfbytes)
doc.insertPDF(imgpdf)
doc.save(path_pdf)
doc.close()
if __name__ == "__main__":
path = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\上市公司财报.pdf'
# path = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\热力公司2016年审计报告.pdf'
pdf_to_img(path)
# list_img = ['热力公司2016年审计报告3.jpg', 'Inked热力公司2016年审计报告-4.jpg', '热力公司2016年审计报告6.jpg', '热力公司2016年审计报告9.jpg',
# '热力公司2016年审计报告4.jpg', ]
# list_img = ['热力公司2016年审计报告3.jpg', 'Inked热力公司2016年审计报告6.jpg', '热力公司2016年审计报告9.jpg',]
#
# path2 = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\热力公司2016年审计报告222.pdf'
# list_img2 = [f'D:\聚均科技-研发\\2021 聚均科技AI平台\\OCR\\{item}' for item in list_img]
# img_to_pdf(list_img2, path2)
| 1,035
| 0
| 46
|
fe75b254c08503ec5cca082f0fb15c9c89d1faff
| 5,233
|
py
|
Python
|
userbot/modules/spammer.py
|
deepak99911/SpyderzzBot
|
f8b9a447df3f3a33c9c7ec157ebf446c3f9cd4b2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-05-27T14:57:14.000Z
|
2020-05-27T14:57:14.000Z
|
userbot/modules/spammer.py
|
deepak99911/SpyderzzBot
|
f8b9a447df3f3a33c9c7ec157ebf446c3f9cd4b2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/spammer.py
|
deepak99911/SpyderzzBot
|
f8b9a447df3f3a33c9c7ec157ebf446c3f9cd4b2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 57
|
2019-12-01T17:03:16.000Z
|
2021-04-30T01:50:35.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
import asyncio
from asyncio import wait
from userbot import BOTLOG_CHATID, BOTLOG, CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.spam")
@register(outgoing=True, pattern="^.tspam")
@register(outgoing=True, pattern="^.bigspam")
@register(outgoing=True, pattern="^.gangsta$")
@register(outgoing=True, pattern="^.nikal$")
@register(outgoing=True, pattern="^.repeat")
@register(outgoing=True, pattern="^.repeats")
@register(outgoing=True, pattern="^.picspam")
@register(outgoing=True, pattern="^.delayspam")
CMD_HELP.update({
"spam": ".tspam <text>\
\nUsage: Spam the text letter by letter.\
\n\n.spam <count> <text>\
\nUsage: Your regular spammer stuff :P\
\n\n.bigspam <count> <text>\
\nUsage: .spam on steroids !!\
\n\n.picspam <count> <link>\
\nUsage: As if text spam was not enough !!\
\n\n.delayspam <delay> <count> <text>\
\nUsage: .bigspam but slower.\
\n\n.gangsta\
\nUsage: Gives you Gengster Feeling, btw Spyder is real Gangsta.\
\n\n.nikal\
\nUsage: Prevents Lawda Lassun.\
\n\n\nNOTE : I am not responsible if you get banned for spamming!"
})
| 30.964497
| 78
| 0.563921
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
import asyncio
from asyncio import wait
from userbot import BOTLOG_CHATID, BOTLOG, CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.spam")
async def spammer(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[6:8])
spam_message = str(e.text[8:])
await wait(
[e.respond(spam_message) for i in range(counter)]
)
await e.delete()
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM \n\n"
"Spam was executed successfully"
)
@register(outgoing=True, pattern="^.tspam")
async def tmeme(e):
tspam = str(e.text[7:])
message = tspam.replace(" ", "")
for letter in message:
await e.respond(letter)
await e.delete()
@register(outgoing=True, pattern="^.bigspam")
async def bigspam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[9:13])
spam_message = str(e.text[13:])
for i in range(1, counter):
await e.respond(spam_message)
await e.delete()
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID,
"#BIGSPAM \n\n"
"Bigspam was executed successfully"
)
@register(outgoing=True, pattern="^.gangsta$")
async def whoizme(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("EVERyBOdy")
await asyncio.sleep(0.3)
await e.edit("iZ")
await asyncio.sleep(0.2)
await e.edit("GangSTur")
await asyncio.sleep(0.5)
await e.edit("UNtIL ")
await asyncio.sleep(0.2)
await e.edit("I")
await asyncio.sleep(0.3)
await e.edit("ArRivE")
await asyncio.sleep(0.3)
await e.edit("🔥")
await asyncio.sleep(0.3)
await e.edit("EVERyBOdy iZ GangSTur UNtIL I ArRivE 🔥")
@register(outgoing=True, pattern="^.nikal$")
async def whoizme(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("NikAl")
await asyncio.sleep(0.3)
await e.edit("lAwDe")
await asyncio.sleep(0.2)
await e.edit("PehLi")
await asyncio.sleep(0.5)
await e.edit("FuRsaT")
await asyncio.sleep(0.2)
await e.edit("Me")
await asyncio.sleep(0.3)
await e.edit("NikAl")
await asyncio.sleep(0.3)
await e.edit("<--")
await asyncio.sleep(0.3)
await e.edit("NikAl lAwDe PehLi FuRsaT Me NikAL <--")
@register(outgoing=True, pattern="^.repeat")
async def repeat(e):
message = e.text[10:]
count = int(e.text[8:10])
repmessage = message * count
await e.respond(repmessage)
await e.delete()
@register(outgoing=True, pattern="^.repeats")
async def repeats(e):
message = e.text[10:]
count = int(e.text[8:10])
repmessage = message * count
await wait([e.respond(repmessage)for i in range(count)])
await e.delete()
@register(outgoing=True, pattern="^.picspam")
async def tiny_pic_spam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
text = message.split()
counter = int(text[0])
link = str(text[2])
if range(1, counter):
await e.client.send_file(e.chat_id, link)
await e.delete()
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID,
"#PICSPAM \n\n"
"PicSpam was executed successfully"
)
@register(outgoing=True, pattern="^.delayspam")
async def spammer(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
spamDelay = float(message[11:15])
counter = int(message[15:19])
spam_message = str(e.text[19:])
from userbot.events import register
for i in range(1, counter):
await e.respond(spam_message)
time.sleep(spamDelay)
await e.delete()
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID,
"#DelaySPAM \n\n"
"DelaySpam was executed successfully"
)
CMD_HELP.update({
"spam": ".tspam <text>\
\nUsage: Spam the text letter by letter.\
\n\n.spam <count> <text>\
\nUsage: Your regular spammer stuff :P\
\n\n.bigspam <count> <text>\
\nUsage: .spam on steroids !!\
\n\n.picspam <count> <link>\
\nUsage: As if text spam was not enough !!\
\n\n.delayspam <delay> <count> <text>\
\nUsage: .bigspam but slower.\
\n\n.gangsta\
\nUsage: Gives you Gengster Feeling, btw Spyder is real Gangsta.\
\n\n.nikal\
\nUsage: Prevents Lawda Lassun.\
\n\n\nNOTE : I am not responsible if you get banned for spamming!"
})
| 3,660
| 0
| 198
|
137ab97ad5e7c8c06e0212b7937a1621ff946721
| 5,669
|
py
|
Python
|
lte/gateway/python/magma/mobilityd/ip_descriptor_map.py
|
saurabhsoni88/magma
|
4236c9d8edb7bd203707ff7e861b1f7c12fb84c7
|
[
"BSD-3-Clause"
] | 1
|
2021-08-04T16:40:05.000Z
|
2021-08-04T16:40:05.000Z
|
lte/gateway/python/magma/mobilityd/ip_descriptor_map.py
|
saurabhsoni88/magma
|
4236c9d8edb7bd203707ff7e861b1f7c12fb84c7
|
[
"BSD-3-Clause"
] | 112
|
2020-09-03T06:41:43.000Z
|
2022-03-31T12:07:08.000Z
|
lte/gateway/python/magma/mobilityd/ip_descriptor_map.py
|
saurabhsoni88/magma
|
4236c9d8edb7bd203707ff7e861b1f7c12fb84c7
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T03:41:46.000Z
|
2021-05-26T03:41:46.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The IP allocator maintains the life cycle of assigned IP addresses.
The IP allocator accepts IP blocks (range of IP addresses), and supports
allocating and releasing IP addresses from the assigned IP blocks. Note
that an IP address is not immediately made available for allocation right
after release: it is "reserved" for the same client for a certain period of
time to ensure that 1) an observer, e.g. pipelined, that caches IP states has
enough time to pull the updated IP states; 2) IP packets intended for the
old client will not be unintentionally routed to a new client until the old
TCP connection expires.
To support this semantic, an IP address can have the following states
during it's life cycle in the IP allocator:
FREE: IP is available for allocation
ALLOCATED: IP is allocated for a client.
RELEASED: IP is released, but still reserved for the client
REAPED: IPs are periodically reaped from the RELEASED state to the
REAPED state, and at the same time a timer is set. All REAPED state
IPs are freed once the time goes off. The purpose of this state is
to age IPs for a certain period of time before freeing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ipaddress import ip_address, ip_network
from typing import Dict, List, Set
from random import choice
from magma.mobilityd.ip_descriptor import IPDesc, IPState
DEFAULT_IP_RECYCLE_INTERVAL = 15
| 39.096552
| 77
| 0.665726
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The IP allocator maintains the life cycle of assigned IP addresses.
The IP allocator accepts IP blocks (range of IP addresses), and supports
allocating and releasing IP addresses from the assigned IP blocks. Note
that an IP address is not immediately made available for allocation right
after release: it is "reserved" for the same client for a certain period of
time to ensure that 1) an observer, e.g. pipelined, that caches IP states has
enough time to pull the updated IP states; 2) IP packets intended for the
old client will not be unintentionally routed to a new client until the old
TCP connection expires.
To support this semantic, an IP address can have the following states
during it's life cycle in the IP allocator:
FREE: IP is available for allocation
ALLOCATED: IP is allocated for a client.
RELEASED: IP is released, but still reserved for the client
REAPED: IPs are periodically reaped from the RELEASED state to the
REAPED state, and at the same time a timer is set. All REAPED state
IPs are freed once the time goes off. The purpose of this state is
to age IPs for a certain period of time before freeing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ipaddress import ip_address, ip_network
from typing import Dict, List, Set
from random import choice
from magma.mobilityd.ip_descriptor import IPDesc, IPState
DEFAULT_IP_RECYCLE_INTERVAL = 15
class IpDescriptorMap:
def __init__(self, ip_states: Dict[str, IPDesc]):
"""
Args:
ip_states: Dictionary containing IPDesc keyed by current state
"""
self.ip_states = ip_states
def add_ip_to_state(self, ip: ip_address, ip_desc: IPDesc,
state: IPState):
""" Add ip=>ip_desc pairs to a internal dict """
assert ip_desc.state == state, \
"ip_desc.state %s does not match with state %s" \
% (ip_desc.state, state)
assert state in IPState, "unknown state %s" % state
self.ip_states[state][ip.exploded] = ip_desc
def remove_ip_from_state(self, ip: ip_address, state: IPState) -> IPDesc:
""" Remove an IP from a internal dict """
assert state in IPState, "unknown state %s" % state
ip_desc = self.ip_states[state].pop(ip.exploded, None)
return ip_desc
def pop_ip_from_state(self, state: IPState) -> IPDesc:
""" Pop an IP from a internal dict """
assert state in IPState, "unknown state %s" % state
ip_state_key = choice(list(self.ip_states[state].keys()))
ip_desc = self.ip_states[state].pop(ip_state_key)
return ip_desc
def get_ip_count(self, state: IPState) -> int:
""" Return number of IPs in a state """
assert state in IPState, "unknown state %s" % state
return len(self.ip_states[state])
def test_ip_state(self, ip: ip_address, state: IPState) -> bool:
""" check if IP is in state X """
assert state in IPState, "unknown state %s" % state
return ip.exploded in self.ip_states[state]
def get_ip_state(self, ip: ip_address) -> IPState:
""" return the state of an IP """
for state in IPState:
if self.test_ip_state(ip, state):
return state
raise AssertionError("IP %s not found in any states" % ip)
def list_ips(self, state: IPState) -> List[ip_address]:
""" return a list of IPs in state X """
assert state in IPState, "unknown state %s" % state
return [ip_address(ip) for ip in self.ip_states[state]]
def mark_ip_state(self, ip: ip_address, state: IPState) -> IPDesc:
""" Remove, mark, add: move IP to a new state """
assert state in IPState, "unknown state %s" % state
old_state = self.get_ip_state(ip)
ip_desc = self.ip_states[old_state][ip.exploded]
# some internal checks
assert ip_desc.state != state, \
"move IP to the same state %s" % state
assert ip == ip_desc.ip, "Unmatching ip_desc for %s" % ip
if ip_desc.state == IPState.FREE:
assert ip_desc.sid is None,\
"Unexpected sid in a freed IPDesc {}".format(ip_desc)
else:
assert ip_desc.sid is not None, \
"Missing sid in state %s IPDesc {}".format(ip_desc)
# remove, mark, add
self.remove_ip_from_state(ip, old_state)
ip_desc.state = state
self.add_ip_to_state(ip, ip_desc, state)
return ip_desc
def get_allocated_ip_block_set(self) -> Set[ip_network]:
""" A IP block is allocated if ANY IP is allocated from it """
allocated_ips = self.ip_states[IPState.ALLOCATED]
return {ip_desc.ip_block for ip_desc in allocated_ips.values()}
def __str__(self) -> str:
""" return the state of an IP """
ret_str = "{}:".format(self.__class__.__name__)
for state in IPState:
ret_str = ret_str + "\n{}".format(state)
for _ip, ip_desc in self.ip_states[state].items():
ret_str = ret_str + "\n{}".format(str(ip_desc))
return ret_str
| 0
| 3,711
| 23
|
d3ca9f458a2ea28861bbef896ef4e0e8828dcbae
| 2,444
|
py
|
Python
|
model_zoo/official/nlp/bert_thor/src/lr_generator.py
|
Joejiong/mindspore
|
083fd6565cab1aa1d3114feeacccf1cba0d55e80
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/bert_thor/src/lr_generator.py
|
Joejiong/mindspore
|
083fd6565cab1aa1d3114feeacccf1cba0d55e80
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/bert_thor/src/lr_generator.py
|
Joejiong/mindspore
|
083fd6565cab1aa1d3114feeacccf1cba0d55e80
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""learning rate generator"""
import numpy as np
from mindspore.common.tensor import Tensor
def get_poly_lr(global_step, lr_init, lr_end, lr_max, warmup_steps, total_steps, poly_power):
"""
generate learning rate array
Args:
lr_init(float): init learning rate
lr_end(float): end learning rate
lr_max(float): max learning rate
warmup_steps(int): number of warmup epochs
total_steps(int): total epoch of training
poly_power(int): poly learning rate power
Returns:
np.array, learning rate array
"""
lr_each_step = []
if warmup_steps != 0:
inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps)
else:
inc_each_step = 0
for i in range(total_steps):
if i < warmup_steps:
lr = float(lr_init) + inc_each_step * float(i)
else:
base = (1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps)))
lr = float(lr_max - lr_end) * (base ** poly_power)
lr = lr + lr_end
if lr < 0.0:
lr = 0.0
lr_each_step.append(lr)
learning_rate = np.array(lr_each_step).astype(np.float32)
current_step = global_step
learning_rate = learning_rate[current_step:]
return learning_rate
# bert kfac hyperparam setting
| 34.422535
| 110
| 0.6473
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""learning rate generator"""
import numpy as np
from mindspore.common.tensor import Tensor
def get_poly_lr(global_step, lr_init, lr_end, lr_max, warmup_steps, total_steps, poly_power):
"""
generate learning rate array
Args:
lr_init(float): init learning rate
lr_end(float): end learning rate
lr_max(float): max learning rate
warmup_steps(int): number of warmup epochs
total_steps(int): total epoch of training
poly_power(int): poly learning rate power
Returns:
np.array, learning rate array
"""
lr_each_step = []
if warmup_steps != 0:
inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps)
else:
inc_each_step = 0
for i in range(total_steps):
if i < warmup_steps:
lr = float(lr_init) + inc_each_step * float(i)
else:
base = (1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps)))
lr = float(lr_max - lr_end) * (base ** poly_power)
lr = lr + lr_end
if lr < 0.0:
lr = 0.0
lr_each_step.append(lr)
learning_rate = np.array(lr_each_step).astype(np.float32)
current_step = global_step
learning_rate = learning_rate[current_step:]
return learning_rate
# bert kfac hyperparam setting
def get_bert_lr():
learning_rate = Tensor(
get_poly_lr(global_step=0, lr_init=0.0, lr_end=1e-6, lr_max=3.1e-3, warmup_steps=0, total_steps=30000,
poly_power=1))
return learning_rate
def get_bert_damping():
damping = Tensor(
get_poly_lr(global_step=0, lr_init=0.0, lr_end=1e-6, lr_max=5e-2, warmup_steps=0, total_steps=30000,
poly_power=1))
return damping
| 383
| 0
| 45
|
914862d616409da7f4b40a4e8c797e4288b7d1e5
| 121
|
py
|
Python
|
cwesecurity/__init__.py
|
johnlwhiteman/cwe-security
|
fdb53e5ef2b296d612824c6abe48c7a5be8844bb
|
[
"Apache-2.0"
] | null | null | null |
cwesecurity/__init__.py
|
johnlwhiteman/cwe-security
|
fdb53e5ef2b296d612824c6abe48c7a5be8844bb
|
[
"Apache-2.0"
] | null | null | null |
cwesecurity/__init__.py
|
johnlwhiteman/cwe-security
|
fdb53e5ef2b296d612824c6abe48c7a5be8844bb
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from cwesecurity.cwe import Cwe
| 30.25
| 53
| 0.809917
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from cwesecurity.cwe import Cwe
| 0
| 0
| 0
|
38b212aebca5df92320025b051c573a9846ac9fc
| 40,510
|
py
|
Python
|
bed_reader/_open_bed.py
|
TedDriggs/bed-reader
|
6b6309d45234cd04feccd2362e60e1138555bf4e
|
[
"Apache-2.0"
] | null | null | null |
bed_reader/_open_bed.py
|
TedDriggs/bed-reader
|
6b6309d45234cd04feccd2362e60e1138555bf4e
|
[
"Apache-2.0"
] | null | null | null |
bed_reader/_open_bed.py
|
TedDriggs/bed-reader
|
6b6309d45234cd04feccd2362e60e1138555bf4e
|
[
"Apache-2.0"
] | null | null | null |
import logging
import multiprocessing
import os
from dataclasses import dataclass
from itertools import repeat, takewhile
from pathlib import Path
from typing import Any, List, Mapping, Optional, Union
import numpy as np
import pandas as pd
from .bed_reader import read_f32, read_f64, read_i8
# https://stackoverflow.com/questions/845058/how-to-get-line-count-of-a-large-file-cheaply-in-python
@dataclass
_delimiters = {"fam": r"\s+", "bim": "\t"}
_count_name = {"fam": "iid_count", "bim": "sid_count"}
_meta_meta = {
# https://stackoverflow.com/questions/41921255/staticmethod-object-is-not-callable
"fid": _MetaMeta("fam", 0, np.str_, "0", _all_same),
"iid": _MetaMeta("fam", 1, np.str_, None, _sequence),
"father": _MetaMeta("fam", 2, np.str_, "0", _all_same),
"mother": _MetaMeta("fam", 3, np.str_, "0", _all_same),
"sex": _MetaMeta("fam", 4, np.int32, 0, _all_same),
"pheno": _MetaMeta("fam", 5, np.str_, "0", _all_same),
"chromosome": _MetaMeta("bim", 0, np.str_, "0", _all_same),
"sid": _MetaMeta("bim", 1, np.str_, None, _sequence),
"cm_position": _MetaMeta("bim", 2, np.float32, 0, _all_same),
"bp_position": _MetaMeta("bim", 3, np.int32, 0, _all_same),
"allele_1": _MetaMeta("bim", 4, np.str_, "A1", _all_same),
"allele_2": _MetaMeta("bim", 5, np.str_, "A2", _all_same),
}
class open_bed:
"""
Open a PLINK .bed file for reading.
Parameters
----------
filepath: pathlib.Path or str
File path to the .bed file.
iid_count: None or int, optional
Number of individuals (samples) in the .bed file.
The default (``iid_count=None``) finds the number
automatically by quickly scanning the .fam file.
sid_count: None or int, optional
Number of SNPs (variants) in the .bed file.
The default (``sid_count=None``) finds the number
automatically by quickly scanning the .bim file.
properties: dict, optional
A dictionary of any replacement properties. The default is an empty dictionary.
The keys of the dictionary are the names of the properties to replace.
The possible keys are:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
The values are replacement lists or arrays. A value can also be `None`,
meaning do not read or offer this property. See examples, below.
The list or array will be converted to a :class:`numpy.ndarray`
of the appropriate dtype, if necessary. Any :class:`numpy.nan` values
will converted to the appropriate missing value. The PLINK `.fam specification
<https://www.cog-genomics.org/plink2/formats#fam>`_
and `.bim specification <https://www.cog-genomics.org/plink2/formats#bim>`_
lists the dtypes and missing values for each property.
count_A1: bool, optional
True (default) to count the number of A1 alleles (the PLINK standard).
False to count the number of A2 alleles.
num_threads: None or int, optional
The number of threads with which to read data. Defaults to all available
processors.
Can also be set with these environment variables (listed in priority order):
'PST_NUM_THREADS', 'NUM_THREADS', 'MKL_NUM_THREADS'.
skip_format_check: bool, optional
False (default) to immediately check for expected starting bytes in
the .bed file. True to delay the check until (and if) data is read.
fam_filepath: pathlib.Path or str, optional
Path to the file containing information about each individual (sample).
Defaults to replacing the .bed file’s suffix with .fam.
bim_filepath: pathlib.Path or str, optional
Path to the file containing information about each SNP (variant).
Defaults to replacing the .bed file’s suffix with .bim.
Returns
-------
open_bed
an open_bed object
Examples
--------
List individual (sample) :attr:`iid` and SNP (variant) :attr:`sid`, then :meth:`read`
the whole file.
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> bed = open_bed(file_name)
>>> print(bed.iid)
['iid1' 'iid2' 'iid3']
>>> print(bed.sid)
['sid1' 'sid2' 'sid3' 'sid4']
>>> print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
>>> del bed # optional: delete bed object
Open the file and read data for one SNP (variant)
at index position 2.
.. doctest::
>>> import numpy as np
>>> with open_bed(file_name) as bed:
... print(bed.read(np.s_[:,2]))
[[nan]
[nan]
[ 2.]]
Replace :attr:`iid`.
>>> bed = open_bed(file_name, properties={"iid":["sample1","sample2","sample3"]})
>>> print(bed.iid) # replaced
['sample1' 'sample2' 'sample3']
>>> print(bed.sid) # same as before
['sid1' 'sid2' 'sid3' 'sid4']
Give the number of individuals (samples) and SNPs (variants) so that the .fam and
.bim files need never be opened.
>>> with open_bed(file_name, iid_count=3, sid_count=4) as bed:
... print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
Mark some properties as "don’t read or offer".
>>> bed = open_bed(file_name, properties={
... "father" : None, "mother" : None, "sex" : None, "pheno" : None,
... "allele_1" : None, "allele_2":None })
>>> print(bed.iid) # read from file
['iid1' 'iid2' 'iid3']
>>> print(bed.allele_2) # not read and not offered
None
See the :meth:`read` for details of reading batches via slicing and fancy indexing.
"""
def read(
self,
index: Optional[Any] = None,
dtype: Optional[Union[type, str]] = "float32",
order: Optional[str] = "F",
force_python_only: Optional[bool] = False,
num_threads=None,
) -> np.ndarray:
"""
Read genotype information.
Parameters
----------
index:
An optional expression specifying the individuals (samples) and SNPs
(variants) to read. (See examples, below).
Defaults to ``None``, meaning read all.
(If index is a tuple, the first component indexes the individuals and the
second indexes
the SNPs. If it is not a tuple and not None, it indexes SNPs.)
dtype: {'float32' (default), 'float64', 'int8'}, optional
The desired data-type for the returned array.
order : {'F','C'}, optional
The desired memory layout for the returned array.
Defaults to ``F`` (Fortran order, which is SNP-major).
force_python_only: bool, optional
If False (default), uses the faster Rust code; otherwise it uses the slower
pure Python code.
num_threads: None or int, optional
The number of threads with which to read data. Defaults to all available
processors.
Can also be set with :class:`open_bed` or these
environment variables (listed in priority order):
'PST_NUM_THREADS', 'NUM_THREADS', 'MKL_NUM_THREADS'.
Returns
-------
numpy.ndarray
2-D array containing values of 0, 1, 2, or missing
Rows represent individuals (samples). Columns represent SNPs (variants).
For ``dtype`` 'float32' and 'float64', NaN indicates missing values.
For 'int8', -127 indicates missing values.
Examples
--------
To read all data in a .bed file, set ``index`` to ``None``. This is the default.
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
To read selected individuals (samples) and/or SNPs (variants), set each part of
a :class:`numpy.s_` to an `int`, a list of `int`, a slice expression, or
a list of `bool`.
Negative integers count from the end of the list.
.. doctest::
>>> import numpy as np
>>> bed = open_bed(file_name)
>>> print(bed.read(np.s_[:,2])) # read the SNPs indexed by 2.
[[nan]
[nan]
[ 2.]]
>>> print(bed.read(np.s_[:,[2,3,0]])) # read the SNPs indexed by 2, 3, and 0
[[nan 0. 1.]
[nan 2. 2.]
[ 2. 0. 0.]]
>>> # read SNPs from 1 (inclusive) to 4 (exclusive)
>>> print(bed.read(np.s_[:,1:4]))
[[ 0. nan 0.]
[ 0. nan 2.]
[ 1. 2. 0.]]
>>> print(np.unique(bed.chromosome)) # print unique chrom values
['1' '5' 'Y']
>>> print(bed.read(np.s_[:,bed.chromosome=='5'])) # read all SNPs in chrom 5
[[nan]
[nan]
[ 2.]]
>>> print(bed.read(np.s_[0,:])) # Read 1st individual (across all SNPs)
[[ 1. 0. nan 0.]]
>>> print(bed.read(np.s_[::2,:])) # Read every 2nd individual
[[ 1. 0. nan 0.]
[ 0. 1. 2. 0.]]
>>> #read last and 2nd-to-last individuals and the last SNPs
>>> print(bed.read(np.s_[[-1,-2],-1]))
[[0.]
[2.]]
You can give a dtype for the output.
.. doctest::
>>> print(bed.read(dtype='int8'))
[[ 1 0 -127 0]
[ 2 0 -127 2]
[ 0 1 2 0]]
>>> del bed # optional: delete bed object
"""
iid_index_or_slice_etc, sid_index_or_slice_etc = self._split_index(index)
dtype = np.dtype(dtype)
if order not in {"F", "C"}:
raise ValueError(f"order '{order}' not known, only 'F', 'C'")
# Later happy with _iid_range and _sid_range or could it be done with
# allocation them?
if self._iid_range is None:
self._iid_range = np.arange(self.iid_count, dtype="uintp")
if self._sid_range is None:
self._sid_range = np.arange(self.sid_count, dtype="uintp")
iid_index = np.ascontiguousarray(
self._iid_range[iid_index_or_slice_etc],
dtype="uintp",
)
sid_index = np.ascontiguousarray(
self._sid_range[sid_index_or_slice_etc], dtype="uintp"
)
if not force_python_only:
num_threads = get_num_threads(
self._num_threads if num_threads is None else num_threads
)
val = np.zeros((len(iid_index), len(sid_index)), order=order, dtype=dtype)
if self.iid_count > 0 and self.sid_count > 0:
if dtype == np.int8:
reader = read_i8
elif dtype == np.float64:
reader = read_f64
elif dtype == np.float32:
reader = read_f32
else:
raise ValueError(
f"dtype '{val.dtype}' not known, only "
+ "'int8', 'float32', and 'float64' are allowed."
)
reader(
str(self.filepath),
iid_count=self.iid_count,
sid_count=self.sid_count,
count_a1=self.count_A1,
iid_index=iid_index,
sid_index=sid_index,
val=val,
num_threads=num_threads,
)
else:
if not self.count_A1:
byteZero = 0
byteThree = 2
else:
byteZero = 2
byteThree = 0
if dtype == np.int8:
missing = -127
else:
missing = np.nan
# An earlier version of this code had a way to read consecutive SNPs of code
# in one read. May want
# to add that ability back to the code.
# Also, note that reading with python will often result in
# non-contiguous memory
# logging.warn("using pure python plink parser (might be much slower!!)")
val = np.zeros(
((int(np.ceil(0.25 * self.iid_count)) * 4), len(sid_index)),
order=order,
dtype=dtype,
) # allocate it a little big
nbyte = int(np.ceil(0.25 * self.iid_count))
with open(self.filepath, "rb") as filepointer:
for SNPsIndex, bimIndex in enumerate(sid_index):
startbit = int(np.ceil(0.25 * self.iid_count) * bimIndex + 3)
filepointer.seek(startbit)
bytes = np.array(bytearray(filepointer.read(nbyte))).reshape(
(int(np.ceil(0.25 * self.iid_count)), 1), order="F"
)
val[3::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 64] = missing
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 128] = 1
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 192] = byteThree
bytes = np.mod(bytes, 64)
val[2::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 16] = missing
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 32] = 1
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 48] = byteThree
bytes = np.mod(bytes, 16)
val[1::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 4] = missing
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 8] = 1
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 12] = byteThree
bytes = np.mod(bytes, 4)
val[0::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 1] = missing
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 2] = 1
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 3] = byteThree
val = val[iid_index, :] # reorder or trim any extra allocation
assert val.dtype == np.dtype(dtype) # real assert
if not open_bed._array_properties_are_ok(val, order):
val = val.copy(order=order)
return val
@property
def fid(self) -> np.ndarray:
"""
Family id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.fid)
['fid1' 'fid1' 'fid2']
"""
return self.property_item("fid")
@property
def iid(self) -> np.ndarray:
"""
Individual id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.iid)
['iid1' 'iid2' 'iid3']
"""
return self.property_item("iid")
@property
def father(self) -> np.ndarray:
"""
Father id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.father)
['iid23' 'iid23' 'iid22']
"""
return self.property_item("father")
@property
def mother(self) -> np.ndarray:
"""
Mother id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.mother)
['iid34' 'iid34' 'iid33']
"""
return self.property_item("mother")
@property
def sex(self) -> np.ndarray:
"""
Sex of each individual (sample).
Returns
-------
numpy.ndarray
array of 0, 1, or 2
0 is unknown, 1 is male, 2 is female
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sex)
[1 2 0]
"""
return self.property_item("sex")
@property
def pheno(self) -> np.ndarray:
"""
A phenotype for each individual (sample)
(seldom used).
Returns
-------
numpy.ndarray
array of str
'0' may represent a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.pheno)
['red' 'red' 'blue']
"""
return self.property_item("pheno")
@property
def properties(self) -> Mapping[str, np.array]:
"""
All the properties returned as a dictionary.
Returns
-------
dict
all the properties
The keys of the dictionary are the names of the properties, namely:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
The values are :class:`numpy.ndarray`.
If needed, will cause a one-time read of the .fam and .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(len(bed.properties)) #length of dict
12
"""
for key in _meta_meta:
self.property_item(key)
return self.properties_dict
def property_item(self, name: str) -> np.ndarray:
"""
Retrieve one property by name.
Returns
-------
numpy.ndarray
a property value
The name is one of these:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
If needed, will cause a one-time read of the .fam or .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.property_item('chromosome'))
['1' '1' '5' 'Y']
"""
if name not in self.properties_dict:
mm = _meta_meta[name]
self._read_fam_or_bim(suffix=mm.suffix)
return self.properties_dict[name]
@property
def chromosome(self) -> np.ndarray:
"""
Chromosome of each SNP (variant)
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.chromosome)
['1' '1' '5' 'Y']
"""
return self.property_item("chromosome")
@property
def sid(self) -> np.ndarray:
"""
SNP id of each SNP (variant).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sid)
['sid1' 'sid2' 'sid3' 'sid4']
"""
return self.property_item("sid")
@property
def cm_position(self) -> np.ndarray:
"""
Centimorgan position of each SNP (variant).
Returns
-------
numpy.ndarray
array of float
0.0 represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.cm_position)
[ 100.4 2000.5 4000.7 7000.9]
"""
return self.property_item("cm_position")
@property
def bp_position(self) -> np.ndarray:
"""
Base-pair position of each SNP (variant).
Returns
-------
numpy.ndarray
array of int
0 represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.bp_position)
[ 1 100 1000 1004]
"""
return self.property_item("bp_position")
@property
def allele_1(self) -> np.ndarray:
"""
First allele of each SNP (variant).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.allele_1)
['A' 'T' 'A' 'T']
"""
return self.property_item("allele_1")
@property
def allele_2(self) -> np.ndarray:
"""
Second allele of each SNP (variant),
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.allele_2)
['A' 'C' 'C' 'G']
"""
return self.property_item("allele_2")
@property
def iid_count(self) -> np.ndarray:
"""
Number of individuals (samples).
Returns
-------
int
number of individuals
If needed, will cause a fast line-count of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.iid_count)
3
"""
return self._count("fam")
@property
def sid_count(self) -> np.ndarray:
"""
Number of SNPs (variants).
Returns
-------
int
number of SNPs
If needed, will cause a fast line-count of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sid_count)
4
"""
return self._count("bim")
@staticmethod
@staticmethod
@property
def shape(self):
"""
Number of individuals (samples) and SNPs (variants).
Returns
-------
(int, int)
number of individuals, number of SNPs
If needed, will cause a fast line-count of the .fam and .bim files.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.shape)
(3, 4)
"""
return (len(self.iid), len(self.sid))
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# if True:
# from bed_reader import sample_file
# file_name = sample_file("small.bed")
# with open_bed(file_name) as bed:
# print(bed.iid)
# print(bed.sid)
# print(bed.read())
# if False:
# import numpy as np
# from bed_reader._open_bed import open_bed
# # Can get file from
# https://www.dropbox.com/sh/xluk9opjiaobteg/AABgEggLk0ZoO0KQq0I4CaTJa?dl=0
# bigfile = r"M:\deldir\genbgen\2\merged_487400x220000.1.bed"
# # bigfile = '/mnt/m/deldir/genbgen/2/merged_487400x220000.1.bed'
# with open_bed(bigfile, num_threads=20) as bed:
# sid_batch = 22 * 1000
# for sid_start in range(0, 10 * sid_batch, sid_batch):
# slicer = np.s_[:10000, sid_start : sid_start + sid_batch]
# print(slicer)
# val = bed.read(slicer)
# print(val.shape)
# if False:
# file = r"D:\OneDrive\programs\sgkit-plink\bed_reader\tests\data
# /plink_sim_10s_100v_10pmiss.bed"
# with open_bed(file) as bed:
# print(bed.iid)
# print(bed.shape)
# val = bed.read()
# print(val)
# if False:
# # bed_file = example_file('doc/ipynb/all.*','*.bed')
# bed_file = r"F:\backup\carlk4d\data\carlk\cachebio\genetics\onemil\
# id1000000.sid_1000000.seed0.byiid\iid990000to1000000.bed"
# bed = Bed(bed_file, count_A1=False)
# snpdata1 = bed[:, :1000].read()
# snpdata2 = bed[:, :1000].read(dtype="int8", _require_float32_64=False)
# print(snpdata2)
# snpdata3 = bed[:, :1000].read(
# dtype="int8", order="C", _require_float32_64=False
# )
# print(snpdata3)
# snpdata3.val = snpdata3.val.astype("float32")
# snpdata3.val.dtype
# if False:
# from bed_reader import Bed, SnpGen
# iid_count = 487409
# sid_count = 5000
# sid_count_max = 5765294
# sid_batch_size = 50
# sid_batch_count = -(sid_count // -sid_batch_size)
# sid_batch_count_max = -(sid_count_max // -sid_batch_size)
# snpgen = SnpGen(seed=234, iid_count=iid_count, sid_count=sid_count_max)
# for batch_index in range(sid_batch_count):
# sid_index_start = batch_index * sid_batch_size
# sid_index_end = (batch_index + 1) * sid_batch_size # what about rounding
# filename = r"d:\deldir\rand\fakeukC{0}x{1}-{2}.bed".format(
# iid_count, sid_index_start, sid_index_end
# )
# if not os.path.exists(filename):
# Bed.write(
# filename + ".temp", snpgen[:, sid_index_start:sid_index_end].read()
# )
# os.rename(filename + ".temp", filename)
# if False:
# from bed_reader import Pheno, Bed
# filename = r"m:\deldir\New folder (4)\all_chr.maf0.001.N300.bed"
# iid_count = 300
# iid = [["0", "iid_{0}".format(iid_index)] for iid_index in range(iid_count)]
# bed = Bed(filename, iid=iid, count_A1=False)
# print(bed.iid_count)
# if False:
# from pysnptools.util import example_file
# pheno_fn = example_file("pysnptools/examples/toydata.phe")
# if False:
# from bed_reader import Pheno, Bed
# print(os.getcwd())
# # Read data from Pheno format
# snpdata = Pheno("../examples/toydata.phe").read()
# # pstutil.create_directory_if_necessary("tempdir/toydata.5chrom.bed")
# Bed.write(
# "tempdir/toydata.5chrom.bed", snpdata, count_A1=False
# ) # Write data in Bed format
import pytest
pytest.main(["--doctest-modules", __file__])
| 31.33024
| 107
| 0.532782
|
import logging
import multiprocessing
import os
from dataclasses import dataclass
from itertools import repeat, takewhile
from pathlib import Path
from typing import Any, List, Mapping, Optional, Union
import numpy as np
import pandas as pd
from .bed_reader import read_f32, read_f64, read_i8
# https://stackoverflow.com/questions/845058/how-to-get-line-count-of-a-large-file-cheaply-in-python
def _rawincount(filepath):
with open(filepath, "rb") as f:
bufgen = takewhile(lambda x: x, (f.raw.read(1024 * 1024) for _ in repeat(None)))
return sum(buf.count(b"\n") for buf in bufgen)
@dataclass
class _MetaMeta:
suffix: str
column: int
dtype: type
missing_value: object
fill_sequence: object
def _all_same(key, length, missing, dtype):
if np.issubdtype(dtype, np.str_):
dtype = f"<U{len(missing)}"
return np.full(length, missing, dtype=dtype)
def _sequence(key, length, missing, dtype):
if np.issubdtype(dtype, np.str_):
longest = len(f"{key}{length}")
dtype = f"<U{longest}"
return np.fromiter(
(f"{key}{i+1}" for i in range(length)), dtype=dtype, count=length
)
_delimiters = {"fam": r"\s+", "bim": "\t"}
_count_name = {"fam": "iid_count", "bim": "sid_count"}
_meta_meta = {
# https://stackoverflow.com/questions/41921255/staticmethod-object-is-not-callable
"fid": _MetaMeta("fam", 0, np.str_, "0", _all_same),
"iid": _MetaMeta("fam", 1, np.str_, None, _sequence),
"father": _MetaMeta("fam", 2, np.str_, "0", _all_same),
"mother": _MetaMeta("fam", 3, np.str_, "0", _all_same),
"sex": _MetaMeta("fam", 4, np.int32, 0, _all_same),
"pheno": _MetaMeta("fam", 5, np.str_, "0", _all_same),
"chromosome": _MetaMeta("bim", 0, np.str_, "0", _all_same),
"sid": _MetaMeta("bim", 1, np.str_, None, _sequence),
"cm_position": _MetaMeta("bim", 2, np.float32, 0, _all_same),
"bp_position": _MetaMeta("bim", 3, np.int32, 0, _all_same),
"allele_1": _MetaMeta("bim", 4, np.str_, "A1", _all_same),
"allele_2": _MetaMeta("bim", 5, np.str_, "A2", _all_same),
}
def get_num_threads(num_threads=None):
if num_threads is not None:
return num_threads
if "PST_NUM_THREADS" in os.environ:
return int(os.environ["PST_NUM_THREADS"])
if "NUM_THREADS" in os.environ:
return int(os.environ["NUM_THREADS"])
if "MKL_NUM_THREADS" in os.environ:
return int(os.environ["MKL_NUM_THREADS"])
return multiprocessing.cpu_count()
class open_bed:
"""
Open a PLINK .bed file for reading.
Parameters
----------
filepath: pathlib.Path or str
File path to the .bed file.
iid_count: None or int, optional
Number of individuals (samples) in the .bed file.
The default (``iid_count=None``) finds the number
automatically by quickly scanning the .fam file.
sid_count: None or int, optional
Number of SNPs (variants) in the .bed file.
The default (``sid_count=None``) finds the number
automatically by quickly scanning the .bim file.
properties: dict, optional
A dictionary of any replacement properties. The default is an empty dictionary.
The keys of the dictionary are the names of the properties to replace.
The possible keys are:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
The values are replacement lists or arrays. A value can also be `None`,
meaning do not read or offer this property. See examples, below.
The list or array will be converted to a :class:`numpy.ndarray`
of the appropriate dtype, if necessary. Any :class:`numpy.nan` values
will converted to the appropriate missing value. The PLINK `.fam specification
<https://www.cog-genomics.org/plink2/formats#fam>`_
and `.bim specification <https://www.cog-genomics.org/plink2/formats#bim>`_
lists the dtypes and missing values for each property.
count_A1: bool, optional
True (default) to count the number of A1 alleles (the PLINK standard).
False to count the number of A2 alleles.
num_threads: None or int, optional
The number of threads with which to read data. Defaults to all available
processors.
Can also be set with these environment variables (listed in priority order):
'PST_NUM_THREADS', 'NUM_THREADS', 'MKL_NUM_THREADS'.
skip_format_check: bool, optional
False (default) to immediately check for expected starting bytes in
the .bed file. True to delay the check until (and if) data is read.
fam_filepath: pathlib.Path or str, optional
Path to the file containing information about each individual (sample).
Defaults to replacing the .bed file’s suffix with .fam.
bim_filepath: pathlib.Path or str, optional
Path to the file containing information about each SNP (variant).
Defaults to replacing the .bed file’s suffix with .bim.
Returns
-------
open_bed
an open_bed object
Examples
--------
List individual (sample) :attr:`iid` and SNP (variant) :attr:`sid`, then :meth:`read`
the whole file.
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> bed = open_bed(file_name)
>>> print(bed.iid)
['iid1' 'iid2' 'iid3']
>>> print(bed.sid)
['sid1' 'sid2' 'sid3' 'sid4']
>>> print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
>>> del bed # optional: delete bed object
Open the file and read data for one SNP (variant)
at index position 2.
.. doctest::
>>> import numpy as np
>>> with open_bed(file_name) as bed:
... print(bed.read(np.s_[:,2]))
[[nan]
[nan]
[ 2.]]
Replace :attr:`iid`.
>>> bed = open_bed(file_name, properties={"iid":["sample1","sample2","sample3"]})
>>> print(bed.iid) # replaced
['sample1' 'sample2' 'sample3']
>>> print(bed.sid) # same as before
['sid1' 'sid2' 'sid3' 'sid4']
Give the number of individuals (samples) and SNPs (variants) so that the .fam and
.bim files need never be opened.
>>> with open_bed(file_name, iid_count=3, sid_count=4) as bed:
... print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
Mark some properties as "don’t read or offer".
>>> bed = open_bed(file_name, properties={
... "father" : None, "mother" : None, "sex" : None, "pheno" : None,
... "allele_1" : None, "allele_2":None })
>>> print(bed.iid) # read from file
['iid1' 'iid2' 'iid3']
>>> print(bed.allele_2) # not read and not offered
None
See the :meth:`read` for details of reading batches via slicing and fancy indexing.
"""
def __init__(
self,
filepath: Union[str, Path],
iid_count: Optional[int] = None,
sid_count: Optional[int] = None,
properties: Mapping[str, List[Any]] = {},
count_A1: bool = True,
num_threads: Optional[int] = None,
skip_format_check: bool = False,
fam_filepath: Union[str, Path] = None,
bim_filepath: Union[str, Path] = None,
):
self.filepath = Path(filepath)
self.count_A1 = count_A1
self._num_threads = num_threads
self.skip_format_check = skip_format_check
self._fam_filepath = (
Path(fam_filepath)
if fam_filepath is not None
else self.filepath.parent / (self.filepath.stem + ".fam")
)
self._bim_filepath = (
Path(bim_filepath)
if bim_filepath is not None
else self.filepath.parent / (self.filepath.stem + ".bim")
)
self.properties_dict, self._counts = open_bed._fix_up_properties(
properties, iid_count, sid_count, use_fill_sequence=False
)
self._iid_range = None
self._sid_range = None
if not self.skip_format_check:
with open(self.filepath, "rb") as filepointer:
self._check_file(filepointer)
def read(
self,
index: Optional[Any] = None,
dtype: Optional[Union[type, str]] = "float32",
order: Optional[str] = "F",
force_python_only: Optional[bool] = False,
num_threads=None,
) -> np.ndarray:
"""
Read genotype information.
Parameters
----------
index:
An optional expression specifying the individuals (samples) and SNPs
(variants) to read. (See examples, below).
Defaults to ``None``, meaning read all.
(If index is a tuple, the first component indexes the individuals and the
second indexes
the SNPs. If it is not a tuple and not None, it indexes SNPs.)
dtype: {'float32' (default), 'float64', 'int8'}, optional
The desired data-type for the returned array.
order : {'F','C'}, optional
The desired memory layout for the returned array.
Defaults to ``F`` (Fortran order, which is SNP-major).
force_python_only: bool, optional
If False (default), uses the faster Rust code; otherwise it uses the slower
pure Python code.
num_threads: None or int, optional
The number of threads with which to read data. Defaults to all available
processors.
Can also be set with :class:`open_bed` or these
environment variables (listed in priority order):
'PST_NUM_THREADS', 'NUM_THREADS', 'MKL_NUM_THREADS'.
Returns
-------
numpy.ndarray
2-D array containing values of 0, 1, 2, or missing
Rows represent individuals (samples). Columns represent SNPs (variants).
For ``dtype`` 'float32' and 'float64', NaN indicates missing values.
For 'int8', -127 indicates missing values.
Examples
--------
To read all data in a .bed file, set ``index`` to ``None``. This is the default.
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
To read selected individuals (samples) and/or SNPs (variants), set each part of
a :class:`numpy.s_` to an `int`, a list of `int`, a slice expression, or
a list of `bool`.
Negative integers count from the end of the list.
.. doctest::
>>> import numpy as np
>>> bed = open_bed(file_name)
>>> print(bed.read(np.s_[:,2])) # read the SNPs indexed by 2.
[[nan]
[nan]
[ 2.]]
>>> print(bed.read(np.s_[:,[2,3,0]])) # read the SNPs indexed by 2, 3, and 0
[[nan 0. 1.]
[nan 2. 2.]
[ 2. 0. 0.]]
>>> # read SNPs from 1 (inclusive) to 4 (exclusive)
>>> print(bed.read(np.s_[:,1:4]))
[[ 0. nan 0.]
[ 0. nan 2.]
[ 1. 2. 0.]]
>>> print(np.unique(bed.chromosome)) # print unique chrom values
['1' '5' 'Y']
>>> print(bed.read(np.s_[:,bed.chromosome=='5'])) # read all SNPs in chrom 5
[[nan]
[nan]
[ 2.]]
>>> print(bed.read(np.s_[0,:])) # Read 1st individual (across all SNPs)
[[ 1. 0. nan 0.]]
>>> print(bed.read(np.s_[::2,:])) # Read every 2nd individual
[[ 1. 0. nan 0.]
[ 0. 1. 2. 0.]]
>>> #read last and 2nd-to-last individuals and the last SNPs
>>> print(bed.read(np.s_[[-1,-2],-1]))
[[0.]
[2.]]
You can give a dtype for the output.
.. doctest::
>>> print(bed.read(dtype='int8'))
[[ 1 0 -127 0]
[ 2 0 -127 2]
[ 0 1 2 0]]
>>> del bed # optional: delete bed object
"""
iid_index_or_slice_etc, sid_index_or_slice_etc = self._split_index(index)
dtype = np.dtype(dtype)
if order not in {"F", "C"}:
raise ValueError(f"order '{order}' not known, only 'F', 'C'")
# Later happy with _iid_range and _sid_range or could it be done with
# allocation them?
if self._iid_range is None:
self._iid_range = np.arange(self.iid_count, dtype="uintp")
if self._sid_range is None:
self._sid_range = np.arange(self.sid_count, dtype="uintp")
iid_index = np.ascontiguousarray(
self._iid_range[iid_index_or_slice_etc],
dtype="uintp",
)
sid_index = np.ascontiguousarray(
self._sid_range[sid_index_or_slice_etc], dtype="uintp"
)
if not force_python_only:
num_threads = get_num_threads(
self._num_threads if num_threads is None else num_threads
)
val = np.zeros((len(iid_index), len(sid_index)), order=order, dtype=dtype)
if self.iid_count > 0 and self.sid_count > 0:
if dtype == np.int8:
reader = read_i8
elif dtype == np.float64:
reader = read_f64
elif dtype == np.float32:
reader = read_f32
else:
raise ValueError(
f"dtype '{val.dtype}' not known, only "
+ "'int8', 'float32', and 'float64' are allowed."
)
reader(
str(self.filepath),
iid_count=self.iid_count,
sid_count=self.sid_count,
count_a1=self.count_A1,
iid_index=iid_index,
sid_index=sid_index,
val=val,
num_threads=num_threads,
)
else:
if not self.count_A1:
byteZero = 0
byteThree = 2
else:
byteZero = 2
byteThree = 0
if dtype == np.int8:
missing = -127
else:
missing = np.nan
# An earlier version of this code had a way to read consecutive SNPs of code
# in one read. May want
# to add that ability back to the code.
# Also, note that reading with python will often result in
# non-contiguous memory
# logging.warn("using pure python plink parser (might be much slower!!)")
val = np.zeros(
((int(np.ceil(0.25 * self.iid_count)) * 4), len(sid_index)),
order=order,
dtype=dtype,
) # allocate it a little big
nbyte = int(np.ceil(0.25 * self.iid_count))
with open(self.filepath, "rb") as filepointer:
for SNPsIndex, bimIndex in enumerate(sid_index):
startbit = int(np.ceil(0.25 * self.iid_count) * bimIndex + 3)
filepointer.seek(startbit)
bytes = np.array(bytearray(filepointer.read(nbyte))).reshape(
(int(np.ceil(0.25 * self.iid_count)), 1), order="F"
)
val[3::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 64] = missing
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 128] = 1
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 192] = byteThree
bytes = np.mod(bytes, 64)
val[2::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 16] = missing
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 32] = 1
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 48] = byteThree
bytes = np.mod(bytes, 16)
val[1::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 4] = missing
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 8] = 1
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 12] = byteThree
bytes = np.mod(bytes, 4)
val[0::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 1] = missing
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 2] = 1
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 3] = byteThree
val = val[iid_index, :] # reorder or trim any extra allocation
assert val.dtype == np.dtype(dtype) # real assert
if not open_bed._array_properties_are_ok(val, order):
val = val.copy(order=order)
return val
def __str__(self) -> str:
return f"{self.__class__.__name__}('{self.filepath}',...)"
@property
def fid(self) -> np.ndarray:
"""
Family id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.fid)
['fid1' 'fid1' 'fid2']
"""
return self.property_item("fid")
@property
def iid(self) -> np.ndarray:
"""
Individual id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.iid)
['iid1' 'iid2' 'iid3']
"""
return self.property_item("iid")
@property
def father(self) -> np.ndarray:
"""
Father id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.father)
['iid23' 'iid23' 'iid22']
"""
return self.property_item("father")
@property
def mother(self) -> np.ndarray:
"""
Mother id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.mother)
['iid34' 'iid34' 'iid33']
"""
return self.property_item("mother")
@property
def sex(self) -> np.ndarray:
"""
Sex of each individual (sample).
Returns
-------
numpy.ndarray
array of 0, 1, or 2
0 is unknown, 1 is male, 2 is female
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sex)
[1 2 0]
"""
return self.property_item("sex")
@property
def pheno(self) -> np.ndarray:
"""
A phenotype for each individual (sample)
(seldom used).
Returns
-------
numpy.ndarray
array of str
'0' may represent a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.pheno)
['red' 'red' 'blue']
"""
return self.property_item("pheno")
@property
def properties(self) -> Mapping[str, np.array]:
"""
All the properties returned as a dictionary.
Returns
-------
dict
all the properties
The keys of the dictionary are the names of the properties, namely:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
The values are :class:`numpy.ndarray`.
If needed, will cause a one-time read of the .fam and .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(len(bed.properties)) #length of dict
12
"""
for key in _meta_meta:
self.property_item(key)
return self.properties_dict
def property_item(self, name: str) -> np.ndarray:
"""
Retrieve one property by name.
Returns
-------
numpy.ndarray
a property value
The name is one of these:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
If needed, will cause a one-time read of the .fam or .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.property_item('chromosome'))
['1' '1' '5' 'Y']
"""
if name not in self.properties_dict:
mm = _meta_meta[name]
self._read_fam_or_bim(suffix=mm.suffix)
return self.properties_dict[name]
@property
def chromosome(self) -> np.ndarray:
"""
Chromosome of each SNP (variant)
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.chromosome)
['1' '1' '5' 'Y']
"""
return self.property_item("chromosome")
@property
def sid(self) -> np.ndarray:
"""
SNP id of each SNP (variant).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sid)
['sid1' 'sid2' 'sid3' 'sid4']
"""
return self.property_item("sid")
@property
def cm_position(self) -> np.ndarray:
"""
Centimorgan position of each SNP (variant).
Returns
-------
numpy.ndarray
array of float
0.0 represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.cm_position)
[ 100.4 2000.5 4000.7 7000.9]
"""
return self.property_item("cm_position")
@property
def bp_position(self) -> np.ndarray:
"""
Base-pair position of each SNP (variant).
Returns
-------
numpy.ndarray
array of int
0 represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.bp_position)
[ 1 100 1000 1004]
"""
return self.property_item("bp_position")
@property
def allele_1(self) -> np.ndarray:
"""
First allele of each SNP (variant).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.allele_1)
['A' 'T' 'A' 'T']
"""
return self.property_item("allele_1")
@property
def allele_2(self) -> np.ndarray:
"""
Second allele of each SNP (variant),
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.allele_2)
['A' 'C' 'C' 'G']
"""
return self.property_item("allele_2")
@property
def iid_count(self) -> np.ndarray:
"""
Number of individuals (samples).
Returns
-------
int
number of individuals
If needed, will cause a fast line-count of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.iid_count)
3
"""
return self._count("fam")
@property
def sid_count(self) -> np.ndarray:
"""
Number of SNPs (variants).
Returns
-------
int
number of SNPs
If needed, will cause a fast line-count of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sid_count)
4
"""
return self._count("bim")
def _property_filepath(self, suffix):
if suffix == "fam":
return self._fam_filepath
else:
assert suffix == "bim" # real assert
return self._bim_filepath
def _count(self, suffix):
count = self._counts[suffix]
if count is None:
count = _rawincount(self._property_filepath(suffix))
self._counts[suffix] = count
return count
@staticmethod
def _check_file(filepointer):
mode = filepointer.read(2)
if mode != b"l\x1b":
raise ValueError("Not a valid .bed file")
mode = filepointer.read(1) # \x01 = SNP major \x00 = individual major
if mode != b"\x01":
raise ValueError("only SNP-major is implemented")
def __del__(self):
self.__exit__()
def __enter__(self):
return self
def __exit__(self, *_):
pass
@staticmethod
def _array_properties_are_ok(val, order):
if order == "F":
return val.flags["F_CONTIGUOUS"]
else:
assert order == "C" # real assert
return val.flags["C_CONTIGUOUS"]
@property
def shape(self):
"""
Number of individuals (samples) and SNPs (variants).
Returns
-------
(int, int)
number of individuals, number of SNPs
If needed, will cause a fast line-count of the .fam and .bim files.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.shape)
(3, 4)
"""
return (len(self.iid), len(self.sid))
@staticmethod
def _split_index(index):
if not isinstance(index, tuple):
index = (None, index)
iid_index = open_bed._fix_up_index(index[0])
sid_index = open_bed._fix_up_index(index[1])
return iid_index, sid_index
@staticmethod
def _fix_up_index(index):
if index is None: # make a shortcut for None
return slice(None)
try: # If index is an int, return it in an array
index = index.__index__() # (see
# https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not)
return [index]
except Exception:
pass
return index
@staticmethod
def _write_fam_or_bim(base_filepath, properties, suffix, property_filepath):
assert suffix in {"fam", "bim"}, "real assert"
filepath = (
Path(property_filepath)
if property_filepath is not None
else base_filepath.parent / (base_filepath.stem + "." + suffix)
)
fam_bim_list = []
for key, mm in _meta_meta.items():
if mm.suffix == suffix:
assert len(fam_bim_list) == mm.column, "real assert"
fam_bim_list.append(properties[key])
sep = " " if suffix == "fam" else "\t"
with open(filepath, "w") as filepointer:
for index in range(len(fam_bim_list[0])):
filepointer.write(
sep.join(str(seq[index]) for seq in fam_bim_list) + "\n"
)
@staticmethod
def _fix_up_properties_array(input, dtype, missing_value, key):
if input is None:
return None
if len(input) == 0:
return np.zeros([0], dtype=dtype)
if not isinstance(input, np.ndarray):
return open_bed._fix_up_properties_array(
np.array(input), dtype, missing_value, key
)
if len(input.shape) != 1:
raise ValueError(f"{key} should be one dimensional")
if not np.issubdtype(input.dtype, dtype):
output = np.array(input, dtype=dtype)
else:
output = input
# Change NaN in input to correct missing value
if np.issubdtype(input.dtype, np.floating):
output[input != input] = missing_value
return output
@staticmethod
def _fix_up_properties(properties, iid_count, sid_count, use_fill_sequence):
for key in properties:
if key not in _meta_meta:
raise KeyError(f"properties key '{key}' not known")
count_dict = {"fam": iid_count, "bim": sid_count}
properties_dict = {}
for key, mm in _meta_meta.items():
count = count_dict[mm.suffix]
if key not in properties or (use_fill_sequence and properties[key] is None):
if use_fill_sequence:
output = mm.fill_sequence(key, count, mm.missing_value, mm.dtype)
else:
continue # Test coverage reaches this, but doesn't report it.
else:
output = open_bed._fix_up_properties_array(
properties[key], mm.dtype, mm.missing_value, key
)
if output is not None:
if count is None:
count_dict[mm.suffix] = len(output)
else:
if count != len(output):
raise ValueError(
f"The length of override {key}, {len(output)}, should not "
+ "be different from the current "
+ f"{_count_name[mm.suffix]}, {count}"
)
properties_dict[key] = output
return properties_dict, count_dict
def _read_fam_or_bim(self, suffix):
property_filepath = self._property_filepath(suffix)
logging.info("Loading {0} file {1}".format(suffix, property_filepath))
count = self._counts[suffix]
delimiter = _delimiters[suffix]
if delimiter in {r"\s+"}:
delimiter = None
delim_whitespace = True
else:
delim_whitespace = False
usecolsdict = {}
dtype_dict = {}
for key, mm in _meta_meta.items():
if mm.suffix is suffix and key not in self.properties_dict:
usecolsdict[key] = mm.column
dtype_dict[mm.column] = mm.dtype
assert list(usecolsdict.values()) == sorted(usecolsdict.values()) # real assert
assert len(usecolsdict) > 0 # real assert
if os.path.getsize(property_filepath) == 0:
fields = []
else:
fields = pd.read_csv(
property_filepath,
delimiter=delimiter,
delim_whitespace=delim_whitespace,
header=None,
index_col=False,
comment=None,
dtype=dtype_dict,
usecols=usecolsdict.values(),
)
if count is None:
self._counts[suffix] = len(fields)
else:
if count != len(fields):
raise ValueError(
f"The number of lines in the *.{suffix} file, {len(fields)}, "
+ "should not be different from the current "
+ "f{_count_name[suffix]}, {count}"
)
for key in usecolsdict.keys():
mm = _meta_meta[key]
if len(fields) == 0:
output = np.array([], dtype=mm.dtype)
else:
output = fields[mm.column].values
if not np.issubdtype(output.dtype, mm.dtype):
output = np.array(output, dtype=mm.dtype)
self.properties_dict[key] = output
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# if True:
# from bed_reader import sample_file
# file_name = sample_file("small.bed")
# with open_bed(file_name) as bed:
# print(bed.iid)
# print(bed.sid)
# print(bed.read())
# if False:
# import numpy as np
# from bed_reader._open_bed import open_bed
# # Can get file from
# https://www.dropbox.com/sh/xluk9opjiaobteg/AABgEggLk0ZoO0KQq0I4CaTJa?dl=0
# bigfile = r"M:\deldir\genbgen\2\merged_487400x220000.1.bed"
# # bigfile = '/mnt/m/deldir/genbgen/2/merged_487400x220000.1.bed'
# with open_bed(bigfile, num_threads=20) as bed:
# sid_batch = 22 * 1000
# for sid_start in range(0, 10 * sid_batch, sid_batch):
# slicer = np.s_[:10000, sid_start : sid_start + sid_batch]
# print(slicer)
# val = bed.read(slicer)
# print(val.shape)
# if False:
# file = r"D:\OneDrive\programs\sgkit-plink\bed_reader\tests\data
# /plink_sim_10s_100v_10pmiss.bed"
# with open_bed(file) as bed:
# print(bed.iid)
# print(bed.shape)
# val = bed.read()
# print(val)
# if False:
# # bed_file = example_file('doc/ipynb/all.*','*.bed')
# bed_file = r"F:\backup\carlk4d\data\carlk\cachebio\genetics\onemil\
# id1000000.sid_1000000.seed0.byiid\iid990000to1000000.bed"
# bed = Bed(bed_file, count_A1=False)
# snpdata1 = bed[:, :1000].read()
# snpdata2 = bed[:, :1000].read(dtype="int8", _require_float32_64=False)
# print(snpdata2)
# snpdata3 = bed[:, :1000].read(
# dtype="int8", order="C", _require_float32_64=False
# )
# print(snpdata3)
# snpdata3.val = snpdata3.val.astype("float32")
# snpdata3.val.dtype
# if False:
# from bed_reader import Bed, SnpGen
# iid_count = 487409
# sid_count = 5000
# sid_count_max = 5765294
# sid_batch_size = 50
# sid_batch_count = -(sid_count // -sid_batch_size)
# sid_batch_count_max = -(sid_count_max // -sid_batch_size)
# snpgen = SnpGen(seed=234, iid_count=iid_count, sid_count=sid_count_max)
# for batch_index in range(sid_batch_count):
# sid_index_start = batch_index * sid_batch_size
# sid_index_end = (batch_index + 1) * sid_batch_size # what about rounding
# filename = r"d:\deldir\rand\fakeukC{0}x{1}-{2}.bed".format(
# iid_count, sid_index_start, sid_index_end
# )
# if not os.path.exists(filename):
# Bed.write(
# filename + ".temp", snpgen[:, sid_index_start:sid_index_end].read()
# )
# os.rename(filename + ".temp", filename)
# if False:
# from bed_reader import Pheno, Bed
# filename = r"m:\deldir\New folder (4)\all_chr.maf0.001.N300.bed"
# iid_count = 300
# iid = [["0", "iid_{0}".format(iid_index)] for iid_index in range(iid_count)]
# bed = Bed(filename, iid=iid, count_A1=False)
# print(bed.iid_count)
# if False:
# from pysnptools.util import example_file
# pheno_fn = example_file("pysnptools/examples/toydata.phe")
# if False:
# from bed_reader import Pheno, Bed
# print(os.getcwd())
# # Read data from Pheno format
# snpdata = Pheno("../examples/toydata.phe").read()
# # pstutil.create_directory_if_necessary("tempdir/toydata.5chrom.bed")
# Bed.write(
# "tempdir/toydata.5chrom.bed", snpdata, count_A1=False
# ) # Write data in Bed format
import pytest
pytest.main(["--doctest-modules", __file__])
| 8,811
| 95
| 511
|
d1e47a5ecac5656e9c1c3ebf9c8b3158717cbecc
| 152
|
py
|
Python
|
karaoke-server.py
|
AetherUnbound/pikaraoke
|
d4af46c55e4425ad6398c25ab6b108c8ce0277fc
|
[
"MIT"
] | null | null | null |
karaoke-server.py
|
AetherUnbound/pikaraoke
|
d4af46c55e4425ad6398c25ab6b108c8ce0277fc
|
[
"MIT"
] | null | null | null |
karaoke-server.py
|
AetherUnbound/pikaraoke
|
d4af46c55e4425ad6398c25ab6b108c8ce0277fc
|
[
"MIT"
] | null | null | null |
# Author: Matthew Bowden bowdenm@spu.edu
import flask as fsk
app = fsk.Flask(__name__)
@app.route('/')
| 13.818182
| 40
| 0.690789
|
# Author: Matthew Bowden bowdenm@spu.edu
import flask as fsk
app = fsk.Flask(__name__)
@app.route('/')
def show_songs():
return 'List of songs'
| 23
| 0
| 22
|
6dffa14bc17cbcb187baf012649a6b72553290cd
| 23,202
|
py
|
Python
|
level3/find_the_access_codes/solution.py
|
lcsm29/goog-foobar
|
6ea44879d9d9f3483fa320d92d6c25b14565c899
|
[
"MIT"
] | null | null | null |
level3/find_the_access_codes/solution.py
|
lcsm29/goog-foobar
|
6ea44879d9d9f3483fa320d92d6c25b14565c899
|
[
"MIT"
] | null | null | null |
level3/find_the_access_codes/solution.py
|
lcsm29/goog-foobar
|
6ea44879d9d9f3483fa320d92d6c25b14565c899
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
from time import perf_counter_ns
basic_tests = (
([1, 1, 1], 1),
([1, 2, 3, 4, 5, 6], 3)
)
additional_tests = (
([1, 2], 0),
([2, 3, 5, 7], 0),
([1] * 100 + [2] + [4], 4),
([10, 20, 23, 27, 45, 69, 118, 138, 161, 166, 167, 170, 174, 213, 222, 224, 250, 251, 270, 285, 291, 325, 336, 355, 360, 381, 390, 396, 403, 413, 423, 446, 488, 507, 521, 560, 570, 660, 685, 715, 758, 781, 782, 783, 829, 855, 864, 874, 897, 936, 938, 944, 965, 981, 983, 993, 998, 1038, 1039, 1044, 1072, 1133, 1155, 1156, 1178, 1184, 1188, 1223, 1229, 1247, 1249, 1292, 1295, 1406, 1413, 1430, 1446, 1470, 1485, 1525, 1538, 1572, 1575, 1656, 1665, 1713, 1744, 1756, 1757, 1759, 1809, 1823, 1834, 1852, 1860, 1884, 1893, 1923, 1989, 2000], 11),
([1, 2, 2, 3, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 12, 14, 15, 15, 15, 17, 19, 24, 25, 26, 30, 31, 31, 34, 37, 42, 44, 48, 49, 50, 50, 55, 60, 62, 66, 68, 69, 77, 78, 79, 79, 81, 83, 84, 85, 87, 88, 88, 96, 98, 102, 104, 105, 106, 109, 109, 111, 112, 121, 122, 123, 123, 128, 130, 132, 132, 135, 136, 140, 143, 144, 145, 151, 151, 154, 155, 156, 157, 163, 163, 170, 172, 174, 174, 175, 175, 176, 179, 184, 193, 196, 200, 200, 204, 206, 206, 207, 208, 210, 211, 213, 215, 217, 219, 220, 220, 225, 225, 225, 226, 227, 228, 231, 232, 232, 236, 237, 238, 240, 240, 240, 243, 250, 254, 255, 257, 258, 260, 260, 264, 266, 268, 274, 275, 275, 275, 278, 279, 279, 281, 282, 283, 284, 286, 291, 293, 293, 294, 301, 301, 302, 304, 305, 305, 306, 306, 308, 310, 311, 311, 315, 316, 316, 320, 321, 321, 322, 323, 326, 328, 329, 330, 333, 334, 338, 339, 341, 347, 348, 349, 353, 356, 357, 361, 363, 366, 366, 366, 367, 369, 372, 373, 374, 375, 383, 384, 385, 388, 390, 392, 398, 405, 406, 409, 412, 412, 414, 419, 419, 419, 424, 425, 425, 425, 426, 427, 428, 429, 432, 432, 434, 435, 436, 438, 441, 442, 445, 446, 448, 448, 452, 456, 457, 459, 463, 464, 465, 466, 467, 467, 468, 468, 468, 473, 473, 480, 484, 486, 488, 488, 489, 489, 491, 495, 496, 497, 501, 502, 505, 506, 506, 510, 512, 516, 517, 517, 518, 528, 528, 530, 534, 536, 536, 537, 539, 539, 542, 545, 549, 555, 558, 559, 562, 563, 563, 563, 563, 565, 566, 567, 571, 572, 575, 578, 579, 579, 579, 584, 584, 586, 588, 590, 591, 592, 592, 598, 601, 603, 604, 607, 609, 612, 612, 613, 613, 615, 616, 618, 619, 622, 623, 625, 626, 627, 630, 630, 631, 631, 631, 632, 635, 637, 637, 641, 643, 645, 645, 646, 647, 648, 648, 649, 650, 650, 653, 653, 655, 657, 658, 659, 661, 664, 665, 668, 669, 669, 677, 678, 684, 686, 688, 690, 698, 698, 699, 703, 703, 704, 705, 706, 706, 709, 712, 720, 722, 725, 726, 727, 727, 730, 732, 732, 733, 735, 736, 746, 750, 753, 753, 753, 753, 759, 761, 767, 772, 778, 786, 788, 788, 792, 793, 796, 797, 798, 799, 799, 801, 801, 810, 811, 812, 813, 822, 823, 826, 828, 829, 830, 832, 833, 833, 834, 837, 838, 839, 840, 842, 843, 851, 852, 854, 859, 860, 861, 863, 866, 866, 869, 870, 873, 873, 874, 874, 877, 880, 885, 890, 893, 895, 895, 903, 907, 912, 918, 918, 919, 919, 919, 919, 922, 923, 924, 924, 924, 933, 935, 936, 936, 940, 945, 948, 949, 950, 952, 952, 954, 957, 958, 963, 966, 966, 968, 969, 971, 972, 973, 973, 973, 976, 977, 980, 981, 985, 985, 986, 987, 987, 989, 992, 993, 994, 997, 999, 999, 1004, 1004, 1006, 1008, 1008, 1009, 1009, 1012, 1015, 1017, 1021, 1022, 1024, 1024, 1027, 1027, 1035, 1039, 1039, 1040, 1042, 1043, 1046, 1048, 1052, 1053, 1058, 1060, 1066, 1067, 1067, 1067, 1070, 1071, 1072, 1076, 1081, 1082, 1083, 1087, 1091, 1091, 1094, 1094, 1095, 1096, 1102, 1103, 1103, 1103, 1105, 1107, 1107, 1113, 1114, 1114, 1114, 1115, 1115, 1117, 1117, 1119, 1125, 1126, 1127, 1127, 1127, 1131, 1131, 1132, 1145, 1146, 1146, 1148, 1149, 1150, 1150, 1151, 1151, 1155, 1155, 1160, 1163, 1165, 1165, 1167, 1168, 1172, 1173, 1173, 1174, 1177, 1181, 1183, 1184, 1189, 1192, 1192, 1197, 1197, 1202, 1209, 1212, 1215, 1216, 1217, 1220, 1220, 1222, 1222, 1222, 1222, 1226, 1227, 1231, 1232, 1239, 1240, 1243, 1244, 1245, 1250, 1255, 1258, 1258, 1259, 1264, 1271, 1271, 1272, 1272, 1274, 1276, 1277, 1279, 1280, 1283, 1284, 1285, 1288, 1291, 1296, 1298, 1299, 1300, 1302, 1302, 1306, 1311, 1315, 1315, 1316, 1321, 1321, 1325, 1325, 1327, 1329, 1329, 1330, 1332, 1333, 1338, 1339, 1340, 1345, 1347, 1347, 1350, 1353, 1357, 1359, 1360, 1360, 1360, 1363, 1369, 1370, 1370, 1370, 1371, 1374, 1376, 1378, 1379, 1380, 1381, 1382, 1385, 1388, 1388, 1390, 1395, 1398, 1402, 1403, 1403, 1405, 1406, 1408, 1412, 1414, 1419, 1424, 1424, 1427, 1428, 1430, 1430, 1432, 1435, 1439, 1439, 1440, 1442, 1442, 1450, 1454, 1455, 1456, 1457, 1458, 1459, 1461, 1462, 1463, 1463, 1465, 1465, 1466, 1472, 1474, 1476, 1477, 1477, 1477, 1480, 1482, 1483, 1485, 1487, 1488, 1490, 1491, 1493, 1494, 1495, 1496, 1498, 1498, 1501, 1505, 1505, 1506, 1515, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1523, 1525, 1525, 1526, 1527, 1528, 1528, 1530, 1535, 1537, 1537, 1540, 1540, 1540, 1540, 1542, 1543, 1546, 1548, 1549, 1551, 1552, 1553, 1556, 1557, 1558, 1560, 1561, 1563, 1563, 1566, 1569, 1570, 1571, 1576, 1579, 1583, 1584, 1585, 1589, 1589, 1594, 1594, 1595, 1598, 1606, 1609, 1611, 1612, 1618, 1619, 1620, 1625, 1628, 1629, 1639, 1640, 1640, 1644, 1644, 1645, 1649, 1653, 1656, 1657, 1657, 1658, 1658, 1659, 1661, 1666, 1667, 1668, 1671, 1672, 1673, 1681, 1687, 1689, 1689, 1691, 1691, 1691, 1692, 1699, 1699, 1702, 1703, 1704, 1705, 1707, 1708, 1714, 1717, 1717, 1720, 1725, 1725, 1730, 1732, 1733, 1738, 1738, 1740, 1741, 1741, 1744, 1746, 1748, 1748, 1751, 1753, 1755, 1756, 1757, 1759, 1759, 1759, 1768, 1772, 1773, 1774, 1780, 1781, 1784, 1785, 1787, 1787, 1788, 1788, 1789, 1789, 1791, 1794, 1797, 1797, 1802, 1805, 1806, 1809, 1809, 1812, 1813, 1814, 1815, 1816, 1821, 1824, 1826, 1826, 1831, 1834, 1835, 1838, 1839, 1839, 1839, 1845, 1846, 1849, 1854, 1858, 1864, 1865, 1867, 1877, 1879, 1879, 1879, 1880, 1881, 1882, 1882, 1883, 1885, 1885, 1888, 1890, 1897, 1899, 1901, 1905, 1907, 1907, 1913, 1913, 1914, 1922, 1923, 1925, 1927, 1929, 1930, 1932, 1932, 1939, 1940, 1941, 1945, 1946, 1947, 1952, 1952, 1953, 1954, 1954, 1954, 1956, 1959, 1959, 1962, 1963, 1966, 1967, 1970, 1977, 1978, 1978, 1979, 1982, 1987, 1988, 1990, 1991, 1992, 1993, 1994, 1995, 1997, 2000], 16_509),
([1, 1, 2, 4, 5, 2376, 2404, 3797, 3851, 4386, 4626, 5146, 5378, 5611, 5651, 5814, 6513, 6604, 7433, 7456, 7902, 8116, 8480, 10222, 10434, 10996, 11135, 11424, 11496, 11869, 12024, 12380, 13137, 13270, 13542, 13827, 13915, 14567, 14594, 14999, 15004, 16862, 17536, 17998, 19438, 19881, 20007, 21197, 21517, 22352, 22738, 22964, 24492, 24811, 25316, 26545, 27646, 28899, 29248, 29414, 29508, 29710, 30286, 31039, 31133, 33469, 34124, 34253, 35365, 35500, 35549, 35824, 36176, 37025, 37333, 37797, 38722, 39109, 39350, 39515, 41329, 41480, 41902, 41925, 42138, 42272, 42580, 43135, 43285, 43459, 43609, 43673, 43720, 44215, 44228, 44388, 44424, 45172, 46363, 46672, 46838, 47485, 48833, 49688, 50804, 53130, 53853, 54021, 54411, 54593, 55252, 55883, 56838, 57900, 58000, 58294, 58660, 59099, 59419, 59693, 60482, 61178, 61269, 61314, 62412, 63961, 64270, 64859, 66320, 66602, 67277, 68792, 69172, 69384, 70404, 70925, 71912, 72238, 72407, 72903, 73156, 73957, 74339, 75594, 75739, 76477, 76933, 77056, 78383, 79292, 79460, 80007, 81393, 81921, 82478, 82519, 83555, 83700, 83729, 84267, 84293, 84456, 84991, 85015, 85168, 85483, 86330, 86539, 86602, 86627, 87365, 87373, 87397, 87752, 88339, 88736, 88755, 88878, 89210, 90786, 90867, 90985, 91038, 91293, 91441, 92081, 93020, 93308, 94704, 95199, 95349, 95402, 95520, 95588, 96507, 97209, 97949, 98547, 99409, 99572, 99956, 100273, 100286, 100520, 100996, 103060, 103716, 104204, 104588, 105063, 105291, 107506, 107573, 107598, 107786, 109411, 110328, 111122, 112567, 112982, 114466, 114734, 114952, 114956, 115699, 116183, 116235, 116240, 116546, 117085, 118292, 118642, 118692, 119629, 120058, 120229, 120299, 120668, 120843, 121310, 121361, 121809, 122237, 122444, 122745, 123172, 123536, 124751, 124758, 124864, 125802, 125842, 126102, 126496, 127064, 128252, 128500, 128527, 128775, 129423, 129770, 130180, 131520, 131955, 131968, 133103, 133550, 133653, 135184, 135353, 135424, 135775, 135806, 136364, 138014, 138019, 138995, 139978, 140443, 140710, 141077, 141758, 142049, 144424, 145361, 146043, 146496, 147308, 148004, 148132, 148194, 148315, 148356, 148745, 149171, 150067, 150409, 150911, 151094, 151344, 151852, 151955, 153093, 153421, 153868, 154412, 154415, 154556, 154988, 155165, 155369, 155452, 157006, 158594, 158833, 158977, 159320, 159441, 159621, 160559, 161030, 161418, 161499, 161546, 162092, 162100, 162487, 162495, 162933, 164019, 164860, 166041, 166227, 166514, 167443, 168228, 168442, 168714, 169205, 170059, 170458, 170944, 171048, 171937, 172401, 173151, 173953, 174383, 176454, 177051, 177371, 177604, 177653, 177916, 178673, 178721, 178859, 179775, 180347, 180556, 180708, 181440, 182059, 183012, 183102, 183703, 184324, 184364, 186200, 187135, 187147, 187287, 187326, 188781, 189064, 189455, 189622, 189688, 189722, 190190, 190559, 190985, 191409, 191960, 192376, 193140, 193657, 194994, 195168, 195421, 196295, 196534, 196949, 197042, 197229, 197590, 198872, 199052, 199632, 199657, 200555, 201151, 201324, 201446, 201632, 201827, 202262, 203034, 203080, 203775, 203790, 203795, 204252, 204309, 204317, 205306, 205412, 207839, 207914, 207956, 208364, 209462, 211072, 212088, 213155, 213159, 213322, 213659, 214046, 214728, 214779, 215260, 215900, 215973, 217046, 217974, 218444, 218696, 219185, 219686, 220148, 220273, 220842, 221436, 221497, 221716, 222530, 222635, 222647, 223100, 223403, 223862, 224272, 224580, 224625, 225157, 225364, 225525, 225965, 226064, 226132, 227500, 227558, 227627, 228193, 228426, 228528, 229668, 229730, 230653, 230802, 231518, 232532, 232733, 233089, 233919, 235296, 235321, 235642, 238313, 238441, 239117, 240710, 240870, 241429, 241594, 241722, 241815, 241939, 242116, 242857, 243226, 243230, 243593, 243655, 243720, 244049, 245057, 245396, 245734, 247547, 248382, 249195, 249807, 250421, 250589, 252190, 253206, 253276, 253398, 254136, 254332, 254848, 255485, 255581, 256750, 257099, 257198, 257745, 258165, 258626, 258870, 259521, 260359, 260474, 260813, 261771, 262329, 263921, 264230, 264378, 264631, 265056, 265143, 265391, 267191, 267653, 268623, 268624, 268988, 269234, 269742, 270090, 270570, 272591, 272688, 273856, 274040, 274529, 274873, 275226, 276389, 276403, 276635, 277403, 277409, 278268, 279490, 280155, 280876, 281309, 281621, 281760, 282060, 282282, 282594, 283735, 283852, 284328, 284590, 285020, 285298, 286064, 286072, 287060, 287761, 287839, 288425, 288602, 288875, 289531, 289736, 290635, 290896, 291107, 291206, 291672, 291846, 292053, 292771, 292786, 293642, 293928, 294476, 294496, 294643, 294693, 294944, 295285, 295430, 295463, 295664, 296142, 296337, 297621, 297872, 298045, 298057, 298149, 298577, 298699, 299572, 299648, 300637, 301226, 301632, 302001, 302023, 303323, 303576, 304150, 305089, 305425, 305950, 306972, 307464, 307700, 308344, 308490, 308593, 309417, 310113, 312420, 312454, 312472, 313194, 313356, 314130, 314332, 314461, 314582, 314872, 315209, 315285, 315334, 315498, 315773, 317746, 317917, 318182, 319378, 320172, 320448, 321163, 321909, 322979, 323203, 323526, 323794, 324611, 324678, 325446, 325462, 325635, 326641, 327200, 328873, 329951, 330151, 330447, 330516, 331125, 331548, 333377, 333662, 333976, 334641, 335104, 336391, 337062, 337460, 337571, 339236, 339329, 339480, 339705, 339765, 340482, 340605, 340793, 341016, 341729, 342315, 342338, 344123, 344776, 345140, 345586, 345825, 345937, 346608, 347127, 348265, 348378, 348706, 348754, 348796, 349200, 349851, 350914, 351323, 352159, 352348, 352561, 352776, 352991, 353107, 354069, 354498, 354910, 355844, 355965, 357028, 357341, 357722, 358812, 359449, 359597, 360115, 360332, 360459, 361637, 362126, 362210, 362254, 362533, 362708, 362838, 363078, 364395, 364762, 365521, 366124, 366219, 366891, 367246, 367608, 368364, 369011, 369044, 369737, 370433, 370510, 370547, 371477, 371560, 371749, 373421, 373608, 374140, 375112, 375157, 377419, 377582, 377669, 377968, 378340, 378421, 379710, 380238, 380601, 382147, 383396, 383398, 383411, 383475, 383486, 383783, 384718, 385380, 386302, 386729, 386807, 387258, 389859, 389895, 390345, 391082, 391398, 391576, 392238, 392261, 392455, 392510, 393929, 394210, 394223, 394389, 394485, 394749, 394925, 395541, 396339, 396464, 397327, 397903, 398066, 398297, 398427, 398562, 399776, 400170, 400754, 400969, 401064, 401272, 401663, 401914, 402040, 402164, 402696, 403151, 403681, 404052, 405818, 406037, 406261, 406629, 407310, 409060, 409374, 409495, 409544, 410885, 412078, 412701, 412903, 413601, 414417, 415696, 415729, 415781, 415863, 417181, 417630, 417752, 418517, 419112, 419171, 419353, 419510, 419682, 420192, 420810, 421004, 421461, 421786, 422146, 422150, 423551, 425267, 425379, 425782, 425975, 426113, 426186, 426599, 426929, 427245, 427712, 428179, 428412, 428777, 429052, 429261, 429406, 429892, 430130, 431013, 431415, 431551, 432078, 432812, 433038, 433933, 434655, 434711, 434716, 434966, 435418, 435457, 435630, 435749, 436432, 437531, 437759, 438173, 438243, 438514, 439222, 439640, 440146, 440304, 440694, 441318, 442052, 442321, 442912, 443710, 443734, 444491, 444573, 444754, 445243, 445301, 445512, 445851, 445935, 446428, 446992, 447391, 447721, 449202, 449288, 450127, 451570, 453164, 453291, 453619, 454826, 456006, 456196, 456229, 456688, 456747, 456877, 457778, 457851, 457997, 458359, 458470, 458931, 459116, 459163, 459320, 459716, 459761, 461561, 462270, 462276, 462666, 463203, 465064, 466002, 466783, 466937, 468798, 468881, 471002, 471887, 472016, 472145, 472217, 473959, 474378, 475158, 475238, 475366, 475644, 475975, 476065, 476114, 476926, 477511, 478181, 478249, 478450, 479206, 479217, 479533, 481048, 483196, 483691, 484304, 484488, 484494, 485018, 485349, 486256, 486449, 486872, 487486, 487961, 488037, 488156, 489348, 489638, 489908, 491162, 492176, 492300, 492866, 493793, 493925, 494924, 495341, 495407, 495699, 496482, 497186, 497884, 498271, 498450, 498519, 498528, 498899, 499047, 499333, 500150, 501425, 502056, 502268, 502442, 502869, 502899, 503448, 503535, 504613, 504905, 505175, 505888, 506169, 506282, 506666, 506774, 507343, 507557, 509448, 509851, 511908, 512739, 513048, 513129, 513377, 513634, 514286, 514572, 515207, 516682, 516911, 518608, 518692, 518860, 519961, 520080, 520382, 520560, 522851, 522937, 523178, 523367, 523494, 524226, 524474, 526274, 526328, 527401, 527436, 529756, 530121, 530265, 531483, 531625, 531777, 532553, 532973, 532984, 534260, 534397, 534602, 535340, 535508, 535783, 536444, 536992, 537216, 537968, 539486, 539787, 539834, 542257, 543800, 544298, 544614, 545107, 545537, 545778, 547150, 547811, 547866, 547908, 548595, 550162, 550186, 551133, 551911, 552997, 553188, 553978, 553978, 554130, 554795, 554856, 556226, 556916, 557050, 557832, 557879, 558941, 560307, 560462, 561439, 561775, 561789, 561934, 562007, 562716, 563375, 563593, 564273, 564510, 564640, 564859, 565369, 565832, 566604, 566628, 566790, 567004, 567243, 567245, 567467, 567949, 569373, 569688, 570202, 570438, 571062, 571255, 572528, 572670, 573224, 573688, 574074, 574122, 575086, 575466, 575628, 575998, 576338, 576351, 576423, 578248, 578472, 578581, 578661, 579047, 579070, 579086, 579289, 579462, 579536, 579555, 580414, 582070, 582275, 582996, 583037, 584002, 584111, 584719, 585584, 585663, 586710, 588070, 588097, 589054, 589506, 592401, 593024, 595977, 596044, 597282, 598495, 598581, 598960, 599513, 599538, 599851, 600064, 600141, 600422, 600465, 600810, 601258, 601309, 601729, 602268, 602302, 602947, 603146, 603656, 604433, 605449, 607652, 607709, 607898, 608403, 609582, 611612, 611903, 613310, 614715, 615497, 616157, 616292, 616551, 616595, 617936, 618565, 618699, 618761, 620093, 620475, 620590, 620657, 621727, 622288, 622299, 622710, 623579, 623983, 623990, 624360, 625648, 625905, 627038, 627046, 627321, 627411, 627870, 628348, 628465, 628604, 628907, 629093, 630123, 630169, 630587, 630682, 631633, 631753, 632566, 633245, 634336, 634604, 634660, 635053, 635697, 635866, 636420, 636673, 636710, 636987, 637660, 638096, 638808, 639858, 640684, 640991, 641215, 641284, 641420, 642119, 642443, 642701, 642820, 642862, 642953, 643370, 643500, 643671, 645554, 645971, 647794, 648648, 648865, 649376, 649432, 649795, 650358, 650568, 651834, 651856, 652254, 653300, 653440, 653454, 654175, 655179, 655314, 655389, 655627, 657291, 658236, 658900, 658973, 659088, 659584, 660104, 660559, 660990, 661166, 661431, 661514, 661661, 661807, 662368, 662633, 662791, 662927, 663067, 665502, 665995, 667229, 667348, 667461, 667595, 668861, 669190, 669762, 670137, 670289, 670785, 671082, 671673, 671740, 672038, 672736, 672781, 673036, 673144, 673886, 674025, 674156, 674280, 674661, 674681, 675010, 675272, 675680, 675685, 676299, 676468, 676630, 676775, 677155, 677223, 678522, 678836, 679444, 679470, 680074, 681360, 682418, 682815, 682941, 682948, 683240, 684703, 684886, 684910, 686936, 687137, 687911, 688084, 689225, 690904, 691771, 692349, 692476, 692763, 693718, 694162, 694339, 695346, 695759, 695779, 696211, 696750, 697011, 697270, 697481, 697870, 697957, 698246, 699744, 699889, 700237, 700448, 700703, 701356, 702575, 703435, 703455, 703748, 703799, 704043, 704190, 704616, 705139, 706540, 706558, 706707, 708015, 708694, 708926, 709825, 710492, 711090, 711168, 711361, 711781, 711894, 713324, 713529, 713686, 714646, 714683, 714909, 715177, 715416, 716041, 716235, 716442, 717033, 717516, 719185, 719891, 721161, 721627, 721965, 722128, 722248, 722285, 722633, 722653, 722824, 722844, 723592, 725429, 725743, 726556, 726970, 727189, 727362, 727443, 727517, 727834, 728297, 728388, 728457, 728545, 728552, 730850, 732439, 732705, 733196, 734087, 734168, 734274, 734583, 735300, 736158, 736434, 736887, 737125, 737654, 737829, 737915, 738100, 738749, 738868, 739490, 740312, 741096, 741961, 742147, 742282, 742480, 743002, 743022, 744131, 744338, 745303, 745596, 745624, 745668, 746420, 746442, 747031, 748626, 749169, 749571, 749638, 749882, 751490, 751786, 752276, 752798, 753000, 753614, 754993, 756731, 757354, 757480, 757613, 757701, 758073, 758559, 758645, 758689, 760270, 760274, 761576, 762247, 762673, 762794, 762795, 763258, 763649, 763731, 764087, 764418, 764791, 765065, 766545, 766624, 767867, 767868, 768262, 769370, 769625, 769727, 769764, 769806, 769890, 770042, 770888, 770939, 771303, 771704, 772691, 772819, 772852, 772991, 773256, 774325, 774756, 776239, 777138, 777220, 777350, 778003, 778047, 778267, 778856, 779024, 779239, 779918, 782130, 782264, 782336, 782490, 782530, 783304, 784670, 785546, 785788, 786413, 786976, 787344, 787444, 787580, 788023, 789280, 790678, 790879, 791556, 792022, 792549, 792679, 793021, 795676, 795807, 797302, 797557, 797566, 797623, 797879, 798439, 798850, 800365, 800495, 801142, 801767, 801826, 802426, 802759, 802982, 803285, 803760, 804229, 804881, 805481, 806355, 806412, 807131, 807155, 807344, 808725, 808985, 809392, 809648, 810667, 811253, 811526, 811756, 811965, 812124, 812251, 812853, 813200, 815272, 815744, 817021, 817128, 817503, 818154, 818170, 818944, 819568, 820404, 820705, 821494, 821946, 822287, 822294, 822342, 822798, 823066, 823287, 823302, 823715, 823786, 824195, 825090, 825643, 826223, 826473, 826799, 827386, 828174, 828603, 829122, 829284, 829806, 830026, 830622, 830945, 831387, 831905, 833516, 833563, 833708, 833886, 833953, 834054, 834260, 834314, 834650, 834749, 835908, 836018, 836966, 837330, 837645, 838957, 839309, 839577, 839861, 840024, 840136, 840182, 840967, 842003, 842414, 842452, 843463, 843899, 844144, 844260, 844689, 844835, 844881, 844953, 845450, 846379, 846589, 847023, 847704, 849207, 849977, 852621, 852888, 852925, 853944, 853952, 854185, 854562, 854629, 854651, 858294, 858306, 859025, 859621, 860103, 862058, 862305, 862477, 862811, 864637, 864959, 864965, 865802, 866147, 867167, 867201, 867652, 868060, 869453, 871559, 871577, 871926, 872212, 872497, 873052, 873056, 873119, 873131, 875113, 875271, 876161, 876519, 876938, 877547, 878046, 878472, 878503, 879047, 879575, 880701, 881652, 881833, 881919, 882061, 883577, 884403, 885023, 885127, 885785, 886158, 886208, 888402, 889913, 890229, 891018, 891362, 892577, 892614, 892993, 895511, 896001, 896080, 896840, 897549, 897778, 898041, 898631, 898925, 899632, 899693, 900664, 900731, 900846, 901237, 902452, 902600, 903765, 903824, 904503, 904806, 905170, 905714, 905773, 906339, 907288, 907374, 907465, 907670, 908341, 910218, 911660, 912251, 912590, 913230, 913434, 913862, 914468, 914555, 916230, 916429, 916539, 916570, 916992, 918561, 918717, 919383, 919617, 920634, 921636, 922107, 923018, 924184, 924450, 924527, 924671, 925145, 925642, 925668, 926427, 927170, 928014, 928689, 928908, 929630, 929880, 929982, 930221, 930510, 930956, 931230, 931469, 931615, 931807, 931849, 932278, 932334, 933131, 934640, 936083, 936568, 936766, 937113, 938140, 938375, 939190, 939220, 939406, 940609, 940924, 942686, 942741, 943700, 944047, 945738, 946158, 946663, 946803, 947757, 947909, 948209, 948851, 949348, 950198, 951077, 951495, 951531, 951552, 951665, 952289, 952822, 952942, 953011, 953352, 953503, 953979, 955326, 955497, 955971, 957215, 957374, 957416, 957494, 957711, 957775, 958597, 958845, 959574, 961150, 961643, 961700, 963012, 963241, 964259, 965387, 965609, 965863, 966914, 969018, 969270, 969665, 969762, 971319, 971600, 972634, 972757, 973134, 973294, 973894, 973985, 974198, 974994, 975440, 975802, 975974, 976033, 976057, 976313, 977155, 977168, 977286, 978755, 979202, 979626, 981524, 981594, 981667, 982178, 982446, 982685, 983200, 983528, 983662, 983912, 984327, 984469, 985813, 986081, 986251, 986977, 987372, 987385, 987400, 988582, 988950, 989624, 989795, 989930, 990827, 991296, 991411, 991873, 991948, 992277, 993009, 993016, 993092, 993998, 994233, 994280, 994287, 994621, 995485, 995576, 995633, 996076, 996197, 996989, 999437], 6_582)
)
results = {}
num_iters = 1
for func in [func for func in dir() if func.startswith('solution')]:
results[func] = []
print(f'\n{func}() (Number of Iterations {num_iters:,})')
for test in basic_tests + additional_tests:
l, expected = test
start = perf_counter_ns()
for i in range(num_iters):
result = globals()[func](l)
end = perf_counter_ns()
results[func].append(end - start)
print(f'{func}({l if len(l) < 10 else "truncated due to length: " + str(len(l))}) returned {result} '
f'({"correct" if result == expected else f"expected: {expected}"})'
f' in {end - start:,} nanoseconds.')
| 565.902439
| 15,778
| 0.700888
|
def solution(l, counter=0): # although this solution passed the Google's test, and I submitted this one for Invitation #A1, but I don't think 171700 is the correct answer for l = [1] * 100 + [2] + [4]
ll = len(l)
arr = [0 for _ in range(ll)]
for i, k in enumerate(l):
for j in range(i):
if k % l[j] == 0:
arr[i] += 1
counter += arr[j]
return counter
if __name__ == '__main__':
from time import perf_counter_ns
basic_tests = (
([1, 1, 1], 1),
([1, 2, 3, 4, 5, 6], 3)
)
additional_tests = (
([1, 2], 0),
([2, 3, 5, 7], 0),
([1] * 100 + [2] + [4], 4),
([10, 20, 23, 27, 45, 69, 118, 138, 161, 166, 167, 170, 174, 213, 222, 224, 250, 251, 270, 285, 291, 325, 336, 355, 360, 381, 390, 396, 403, 413, 423, 446, 488, 507, 521, 560, 570, 660, 685, 715, 758, 781, 782, 783, 829, 855, 864, 874, 897, 936, 938, 944, 965, 981, 983, 993, 998, 1038, 1039, 1044, 1072, 1133, 1155, 1156, 1178, 1184, 1188, 1223, 1229, 1247, 1249, 1292, 1295, 1406, 1413, 1430, 1446, 1470, 1485, 1525, 1538, 1572, 1575, 1656, 1665, 1713, 1744, 1756, 1757, 1759, 1809, 1823, 1834, 1852, 1860, 1884, 1893, 1923, 1989, 2000], 11),
([1, 2, 2, 3, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 12, 14, 15, 15, 15, 17, 19, 24, 25, 26, 30, 31, 31, 34, 37, 42, 44, 48, 49, 50, 50, 55, 60, 62, 66, 68, 69, 77, 78, 79, 79, 81, 83, 84, 85, 87, 88, 88, 96, 98, 102, 104, 105, 106, 109, 109, 111, 112, 121, 122, 123, 123, 128, 130, 132, 132, 135, 136, 140, 143, 144, 145, 151, 151, 154, 155, 156, 157, 163, 163, 170, 172, 174, 174, 175, 175, 176, 179, 184, 193, 196, 200, 200, 204, 206, 206, 207, 208, 210, 211, 213, 215, 217, 219, 220, 220, 225, 225, 225, 226, 227, 228, 231, 232, 232, 236, 237, 238, 240, 240, 240, 243, 250, 254, 255, 257, 258, 260, 260, 264, 266, 268, 274, 275, 275, 275, 278, 279, 279, 281, 282, 283, 284, 286, 291, 293, 293, 294, 301, 301, 302, 304, 305, 305, 306, 306, 308, 310, 311, 311, 315, 316, 316, 320, 321, 321, 322, 323, 326, 328, 329, 330, 333, 334, 338, 339, 341, 347, 348, 349, 353, 356, 357, 361, 363, 366, 366, 366, 367, 369, 372, 373, 374, 375, 383, 384, 385, 388, 390, 392, 398, 405, 406, 409, 412, 412, 414, 419, 419, 419, 424, 425, 425, 425, 426, 427, 428, 429, 432, 432, 434, 435, 436, 438, 441, 442, 445, 446, 448, 448, 452, 456, 457, 459, 463, 464, 465, 466, 467, 467, 468, 468, 468, 473, 473, 480, 484, 486, 488, 488, 489, 489, 491, 495, 496, 497, 501, 502, 505, 506, 506, 510, 512, 516, 517, 517, 518, 528, 528, 530, 534, 536, 536, 537, 539, 539, 542, 545, 549, 555, 558, 559, 562, 563, 563, 563, 563, 565, 566, 567, 571, 572, 575, 578, 579, 579, 579, 584, 584, 586, 588, 590, 591, 592, 592, 598, 601, 603, 604, 607, 609, 612, 612, 613, 613, 615, 616, 618, 619, 622, 623, 625, 626, 627, 630, 630, 631, 631, 631, 632, 635, 637, 637, 641, 643, 645, 645, 646, 647, 648, 648, 649, 650, 650, 653, 653, 655, 657, 658, 659, 661, 664, 665, 668, 669, 669, 677, 678, 684, 686, 688, 690, 698, 698, 699, 703, 703, 704, 705, 706, 706, 709, 712, 720, 722, 725, 726, 727, 727, 730, 732, 732, 733, 735, 736, 746, 750, 753, 753, 753, 753, 759, 761, 767, 772, 778, 786, 788, 788, 792, 793, 796, 797, 798, 799, 799, 801, 801, 810, 811, 812, 813, 822, 823, 826, 828, 829, 830, 832, 833, 833, 834, 837, 838, 839, 840, 842, 843, 851, 852, 854, 859, 860, 861, 863, 866, 866, 869, 870, 873, 873, 874, 874, 877, 880, 885, 890, 893, 895, 895, 903, 907, 912, 918, 918, 919, 919, 919, 919, 922, 923, 924, 924, 924, 933, 935, 936, 936, 940, 945, 948, 949, 950, 952, 952, 954, 957, 958, 963, 966, 966, 968, 969, 971, 972, 973, 973, 973, 976, 977, 980, 981, 985, 985, 986, 987, 987, 989, 992, 993, 994, 997, 999, 999, 1004, 1004, 1006, 1008, 1008, 1009, 1009, 1012, 1015, 1017, 1021, 1022, 1024, 1024, 1027, 1027, 1035, 1039, 1039, 1040, 1042, 1043, 1046, 1048, 1052, 1053, 1058, 1060, 1066, 1067, 1067, 1067, 1070, 1071, 1072, 1076, 1081, 1082, 1083, 1087, 1091, 1091, 1094, 1094, 1095, 1096, 1102, 1103, 1103, 1103, 1105, 1107, 1107, 1113, 1114, 1114, 1114, 1115, 1115, 1117, 1117, 1119, 1125, 1126, 1127, 1127, 1127, 1131, 1131, 1132, 1145, 1146, 1146, 1148, 1149, 1150, 1150, 1151, 1151, 1155, 1155, 1160, 1163, 1165, 1165, 1167, 1168, 1172, 1173, 1173, 1174, 1177, 1181, 1183, 1184, 1189, 1192, 1192, 1197, 1197, 1202, 1209, 1212, 1215, 1216, 1217, 1220, 1220, 1222, 1222, 1222, 1222, 1226, 1227, 1231, 1232, 1239, 1240, 1243, 1244, 1245, 1250, 1255, 1258, 1258, 1259, 1264, 1271, 1271, 1272, 1272, 1274, 1276, 1277, 1279, 1280, 1283, 1284, 1285, 1288, 1291, 1296, 1298, 1299, 1300, 1302, 1302, 1306, 1311, 1315, 1315, 1316, 1321, 1321, 1325, 1325, 1327, 1329, 1329, 1330, 1332, 1333, 1338, 1339, 1340, 1345, 1347, 1347, 1350, 1353, 1357, 1359, 1360, 1360, 1360, 1363, 1369, 1370, 1370, 1370, 1371, 1374, 1376, 1378, 1379, 1380, 1381, 1382, 1385, 1388, 1388, 1390, 1395, 1398, 1402, 1403, 1403, 1405, 1406, 1408, 1412, 1414, 1419, 1424, 1424, 1427, 1428, 1430, 1430, 1432, 1435, 1439, 1439, 1440, 1442, 1442, 1450, 1454, 1455, 1456, 1457, 1458, 1459, 1461, 1462, 1463, 1463, 1465, 1465, 1466, 1472, 1474, 1476, 1477, 1477, 1477, 1480, 1482, 1483, 1485, 1487, 1488, 1490, 1491, 1493, 1494, 1495, 1496, 1498, 1498, 1501, 1505, 1505, 1506, 1515, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1523, 1525, 1525, 1526, 1527, 1528, 1528, 1530, 1535, 1537, 1537, 1540, 1540, 1540, 1540, 1542, 1543, 1546, 1548, 1549, 1551, 1552, 1553, 1556, 1557, 1558, 1560, 1561, 1563, 1563, 1566, 1569, 1570, 1571, 1576, 1579, 1583, 1584, 1585, 1589, 1589, 1594, 1594, 1595, 1598, 1606, 1609, 1611, 1612, 1618, 1619, 1620, 1625, 1628, 1629, 1639, 1640, 1640, 1644, 1644, 1645, 1649, 1653, 1656, 1657, 1657, 1658, 1658, 1659, 1661, 1666, 1667, 1668, 1671, 1672, 1673, 1681, 1687, 1689, 1689, 1691, 1691, 1691, 1692, 1699, 1699, 1702, 1703, 1704, 1705, 1707, 1708, 1714, 1717, 1717, 1720, 1725, 1725, 1730, 1732, 1733, 1738, 1738, 1740, 1741, 1741, 1744, 1746, 1748, 1748, 1751, 1753, 1755, 1756, 1757, 1759, 1759, 1759, 1768, 1772, 1773, 1774, 1780, 1781, 1784, 1785, 1787, 1787, 1788, 1788, 1789, 1789, 1791, 1794, 1797, 1797, 1802, 1805, 1806, 1809, 1809, 1812, 1813, 1814, 1815, 1816, 1821, 1824, 1826, 1826, 1831, 1834, 1835, 1838, 1839, 1839, 1839, 1845, 1846, 1849, 1854, 1858, 1864, 1865, 1867, 1877, 1879, 1879, 1879, 1880, 1881, 1882, 1882, 1883, 1885, 1885, 1888, 1890, 1897, 1899, 1901, 1905, 1907, 1907, 1913, 1913, 1914, 1922, 1923, 1925, 1927, 1929, 1930, 1932, 1932, 1939, 1940, 1941, 1945, 1946, 1947, 1952, 1952, 1953, 1954, 1954, 1954, 1956, 1959, 1959, 1962, 1963, 1966, 1967, 1970, 1977, 1978, 1978, 1979, 1982, 1987, 1988, 1990, 1991, 1992, 1993, 1994, 1995, 1997, 2000], 16_509),
([1, 1, 2, 4, 5, 2376, 2404, 3797, 3851, 4386, 4626, 5146, 5378, 5611, 5651, 5814, 6513, 6604, 7433, 7456, 7902, 8116, 8480, 10222, 10434, 10996, 11135, 11424, 11496, 11869, 12024, 12380, 13137, 13270, 13542, 13827, 13915, 14567, 14594, 14999, 15004, 16862, 17536, 17998, 19438, 19881, 20007, 21197, 21517, 22352, 22738, 22964, 24492, 24811, 25316, 26545, 27646, 28899, 29248, 29414, 29508, 29710, 30286, 31039, 31133, 33469, 34124, 34253, 35365, 35500, 35549, 35824, 36176, 37025, 37333, 37797, 38722, 39109, 39350, 39515, 41329, 41480, 41902, 41925, 42138, 42272, 42580, 43135, 43285, 43459, 43609, 43673, 43720, 44215, 44228, 44388, 44424, 45172, 46363, 46672, 46838, 47485, 48833, 49688, 50804, 53130, 53853, 54021, 54411, 54593, 55252, 55883, 56838, 57900, 58000, 58294, 58660, 59099, 59419, 59693, 60482, 61178, 61269, 61314, 62412, 63961, 64270, 64859, 66320, 66602, 67277, 68792, 69172, 69384, 70404, 70925, 71912, 72238, 72407, 72903, 73156, 73957, 74339, 75594, 75739, 76477, 76933, 77056, 78383, 79292, 79460, 80007, 81393, 81921, 82478, 82519, 83555, 83700, 83729, 84267, 84293, 84456, 84991, 85015, 85168, 85483, 86330, 86539, 86602, 86627, 87365, 87373, 87397, 87752, 88339, 88736, 88755, 88878, 89210, 90786, 90867, 90985, 91038, 91293, 91441, 92081, 93020, 93308, 94704, 95199, 95349, 95402, 95520, 95588, 96507, 97209, 97949, 98547, 99409, 99572, 99956, 100273, 100286, 100520, 100996, 103060, 103716, 104204, 104588, 105063, 105291, 107506, 107573, 107598, 107786, 109411, 110328, 111122, 112567, 112982, 114466, 114734, 114952, 114956, 115699, 116183, 116235, 116240, 116546, 117085, 118292, 118642, 118692, 119629, 120058, 120229, 120299, 120668, 120843, 121310, 121361, 121809, 122237, 122444, 122745, 123172, 123536, 124751, 124758, 124864, 125802, 125842, 126102, 126496, 127064, 128252, 128500, 128527, 128775, 129423, 129770, 130180, 131520, 131955, 131968, 133103, 133550, 133653, 135184, 135353, 135424, 135775, 135806, 136364, 138014, 138019, 138995, 139978, 140443, 140710, 141077, 141758, 142049, 144424, 145361, 146043, 146496, 147308, 148004, 148132, 148194, 148315, 148356, 148745, 149171, 150067, 150409, 150911, 151094, 151344, 151852, 151955, 153093, 153421, 153868, 154412, 154415, 154556, 154988, 155165, 155369, 155452, 157006, 158594, 158833, 158977, 159320, 159441, 159621, 160559, 161030, 161418, 161499, 161546, 162092, 162100, 162487, 162495, 162933, 164019, 164860, 166041, 166227, 166514, 167443, 168228, 168442, 168714, 169205, 170059, 170458, 170944, 171048, 171937, 172401, 173151, 173953, 174383, 176454, 177051, 177371, 177604, 177653, 177916, 178673, 178721, 178859, 179775, 180347, 180556, 180708, 181440, 182059, 183012, 183102, 183703, 184324, 184364, 186200, 187135, 187147, 187287, 187326, 188781, 189064, 189455, 189622, 189688, 189722, 190190, 190559, 190985, 191409, 191960, 192376, 193140, 193657, 194994, 195168, 195421, 196295, 196534, 196949, 197042, 197229, 197590, 198872, 199052, 199632, 199657, 200555, 201151, 201324, 201446, 201632, 201827, 202262, 203034, 203080, 203775, 203790, 203795, 204252, 204309, 204317, 205306, 205412, 207839, 207914, 207956, 208364, 209462, 211072, 212088, 213155, 213159, 213322, 213659, 214046, 214728, 214779, 215260, 215900, 215973, 217046, 217974, 218444, 218696, 219185, 219686, 220148, 220273, 220842, 221436, 221497, 221716, 222530, 222635, 222647, 223100, 223403, 223862, 224272, 224580, 224625, 225157, 225364, 225525, 225965, 226064, 226132, 227500, 227558, 227627, 228193, 228426, 228528, 229668, 229730, 230653, 230802, 231518, 232532, 232733, 233089, 233919, 235296, 235321, 235642, 238313, 238441, 239117, 240710, 240870, 241429, 241594, 241722, 241815, 241939, 242116, 242857, 243226, 243230, 243593, 243655, 243720, 244049, 245057, 245396, 245734, 247547, 248382, 249195, 249807, 250421, 250589, 252190, 253206, 253276, 253398, 254136, 254332, 254848, 255485, 255581, 256750, 257099, 257198, 257745, 258165, 258626, 258870, 259521, 260359, 260474, 260813, 261771, 262329, 263921, 264230, 264378, 264631, 265056, 265143, 265391, 267191, 267653, 268623, 268624, 268988, 269234, 269742, 270090, 270570, 272591, 272688, 273856, 274040, 274529, 274873, 275226, 276389, 276403, 276635, 277403, 277409, 278268, 279490, 280155, 280876, 281309, 281621, 281760, 282060, 282282, 282594, 283735, 283852, 284328, 284590, 285020, 285298, 286064, 286072, 287060, 287761, 287839, 288425, 288602, 288875, 289531, 289736, 290635, 290896, 291107, 291206, 291672, 291846, 292053, 292771, 292786, 293642, 293928, 294476, 294496, 294643, 294693, 294944, 295285, 295430, 295463, 295664, 296142, 296337, 297621, 297872, 298045, 298057, 298149, 298577, 298699, 299572, 299648, 300637, 301226, 301632, 302001, 302023, 303323, 303576, 304150, 305089, 305425, 305950, 306972, 307464, 307700, 308344, 308490, 308593, 309417, 310113, 312420, 312454, 312472, 313194, 313356, 314130, 314332, 314461, 314582, 314872, 315209, 315285, 315334, 315498, 315773, 317746, 317917, 318182, 319378, 320172, 320448, 321163, 321909, 322979, 323203, 323526, 323794, 324611, 324678, 325446, 325462, 325635, 326641, 327200, 328873, 329951, 330151, 330447, 330516, 331125, 331548, 333377, 333662, 333976, 334641, 335104, 336391, 337062, 337460, 337571, 339236, 339329, 339480, 339705, 339765, 340482, 340605, 340793, 341016, 341729, 342315, 342338, 344123, 344776, 345140, 345586, 345825, 345937, 346608, 347127, 348265, 348378, 348706, 348754, 348796, 349200, 349851, 350914, 351323, 352159, 352348, 352561, 352776, 352991, 353107, 354069, 354498, 354910, 355844, 355965, 357028, 357341, 357722, 358812, 359449, 359597, 360115, 360332, 360459, 361637, 362126, 362210, 362254, 362533, 362708, 362838, 363078, 364395, 364762, 365521, 366124, 366219, 366891, 367246, 367608, 368364, 369011, 369044, 369737, 370433, 370510, 370547, 371477, 371560, 371749, 373421, 373608, 374140, 375112, 375157, 377419, 377582, 377669, 377968, 378340, 378421, 379710, 380238, 380601, 382147, 383396, 383398, 383411, 383475, 383486, 383783, 384718, 385380, 386302, 386729, 386807, 387258, 389859, 389895, 390345, 391082, 391398, 391576, 392238, 392261, 392455, 392510, 393929, 394210, 394223, 394389, 394485, 394749, 394925, 395541, 396339, 396464, 397327, 397903, 398066, 398297, 398427, 398562, 399776, 400170, 400754, 400969, 401064, 401272, 401663, 401914, 402040, 402164, 402696, 403151, 403681, 404052, 405818, 406037, 406261, 406629, 407310, 409060, 409374, 409495, 409544, 410885, 412078, 412701, 412903, 413601, 414417, 415696, 415729, 415781, 415863, 417181, 417630, 417752, 418517, 419112, 419171, 419353, 419510, 419682, 420192, 420810, 421004, 421461, 421786, 422146, 422150, 423551, 425267, 425379, 425782, 425975, 426113, 426186, 426599, 426929, 427245, 427712, 428179, 428412, 428777, 429052, 429261, 429406, 429892, 430130, 431013, 431415, 431551, 432078, 432812, 433038, 433933, 434655, 434711, 434716, 434966, 435418, 435457, 435630, 435749, 436432, 437531, 437759, 438173, 438243, 438514, 439222, 439640, 440146, 440304, 440694, 441318, 442052, 442321, 442912, 443710, 443734, 444491, 444573, 444754, 445243, 445301, 445512, 445851, 445935, 446428, 446992, 447391, 447721, 449202, 449288, 450127, 451570, 453164, 453291, 453619, 454826, 456006, 456196, 456229, 456688, 456747, 456877, 457778, 457851, 457997, 458359, 458470, 458931, 459116, 459163, 459320, 459716, 459761, 461561, 462270, 462276, 462666, 463203, 465064, 466002, 466783, 466937, 468798, 468881, 471002, 471887, 472016, 472145, 472217, 473959, 474378, 475158, 475238, 475366, 475644, 475975, 476065, 476114, 476926, 477511, 478181, 478249, 478450, 479206, 479217, 479533, 481048, 483196, 483691, 484304, 484488, 484494, 485018, 485349, 486256, 486449, 486872, 487486, 487961, 488037, 488156, 489348, 489638, 489908, 491162, 492176, 492300, 492866, 493793, 493925, 494924, 495341, 495407, 495699, 496482, 497186, 497884, 498271, 498450, 498519, 498528, 498899, 499047, 499333, 500150, 501425, 502056, 502268, 502442, 502869, 502899, 503448, 503535, 504613, 504905, 505175, 505888, 506169, 506282, 506666, 506774, 507343, 507557, 509448, 509851, 511908, 512739, 513048, 513129, 513377, 513634, 514286, 514572, 515207, 516682, 516911, 518608, 518692, 518860, 519961, 520080, 520382, 520560, 522851, 522937, 523178, 523367, 523494, 524226, 524474, 526274, 526328, 527401, 527436, 529756, 530121, 530265, 531483, 531625, 531777, 532553, 532973, 532984, 534260, 534397, 534602, 535340, 535508, 535783, 536444, 536992, 537216, 537968, 539486, 539787, 539834, 542257, 543800, 544298, 544614, 545107, 545537, 545778, 547150, 547811, 547866, 547908, 548595, 550162, 550186, 551133, 551911, 552997, 553188, 553978, 553978, 554130, 554795, 554856, 556226, 556916, 557050, 557832, 557879, 558941, 560307, 560462, 561439, 561775, 561789, 561934, 562007, 562716, 563375, 563593, 564273, 564510, 564640, 564859, 565369, 565832, 566604, 566628, 566790, 567004, 567243, 567245, 567467, 567949, 569373, 569688, 570202, 570438, 571062, 571255, 572528, 572670, 573224, 573688, 574074, 574122, 575086, 575466, 575628, 575998, 576338, 576351, 576423, 578248, 578472, 578581, 578661, 579047, 579070, 579086, 579289, 579462, 579536, 579555, 580414, 582070, 582275, 582996, 583037, 584002, 584111, 584719, 585584, 585663, 586710, 588070, 588097, 589054, 589506, 592401, 593024, 595977, 596044, 597282, 598495, 598581, 598960, 599513, 599538, 599851, 600064, 600141, 600422, 600465, 600810, 601258, 601309, 601729, 602268, 602302, 602947, 603146, 603656, 604433, 605449, 607652, 607709, 607898, 608403, 609582, 611612, 611903, 613310, 614715, 615497, 616157, 616292, 616551, 616595, 617936, 618565, 618699, 618761, 620093, 620475, 620590, 620657, 621727, 622288, 622299, 622710, 623579, 623983, 623990, 624360, 625648, 625905, 627038, 627046, 627321, 627411, 627870, 628348, 628465, 628604, 628907, 629093, 630123, 630169, 630587, 630682, 631633, 631753, 632566, 633245, 634336, 634604, 634660, 635053, 635697, 635866, 636420, 636673, 636710, 636987, 637660, 638096, 638808, 639858, 640684, 640991, 641215, 641284, 641420, 642119, 642443, 642701, 642820, 642862, 642953, 643370, 643500, 643671, 645554, 645971, 647794, 648648, 648865, 649376, 649432, 649795, 650358, 650568, 651834, 651856, 652254, 653300, 653440, 653454, 654175, 655179, 655314, 655389, 655627, 657291, 658236, 658900, 658973, 659088, 659584, 660104, 660559, 660990, 661166, 661431, 661514, 661661, 661807, 662368, 662633, 662791, 662927, 663067, 665502, 665995, 667229, 667348, 667461, 667595, 668861, 669190, 669762, 670137, 670289, 670785, 671082, 671673, 671740, 672038, 672736, 672781, 673036, 673144, 673886, 674025, 674156, 674280, 674661, 674681, 675010, 675272, 675680, 675685, 676299, 676468, 676630, 676775, 677155, 677223, 678522, 678836, 679444, 679470, 680074, 681360, 682418, 682815, 682941, 682948, 683240, 684703, 684886, 684910, 686936, 687137, 687911, 688084, 689225, 690904, 691771, 692349, 692476, 692763, 693718, 694162, 694339, 695346, 695759, 695779, 696211, 696750, 697011, 697270, 697481, 697870, 697957, 698246, 699744, 699889, 700237, 700448, 700703, 701356, 702575, 703435, 703455, 703748, 703799, 704043, 704190, 704616, 705139, 706540, 706558, 706707, 708015, 708694, 708926, 709825, 710492, 711090, 711168, 711361, 711781, 711894, 713324, 713529, 713686, 714646, 714683, 714909, 715177, 715416, 716041, 716235, 716442, 717033, 717516, 719185, 719891, 721161, 721627, 721965, 722128, 722248, 722285, 722633, 722653, 722824, 722844, 723592, 725429, 725743, 726556, 726970, 727189, 727362, 727443, 727517, 727834, 728297, 728388, 728457, 728545, 728552, 730850, 732439, 732705, 733196, 734087, 734168, 734274, 734583, 735300, 736158, 736434, 736887, 737125, 737654, 737829, 737915, 738100, 738749, 738868, 739490, 740312, 741096, 741961, 742147, 742282, 742480, 743002, 743022, 744131, 744338, 745303, 745596, 745624, 745668, 746420, 746442, 747031, 748626, 749169, 749571, 749638, 749882, 751490, 751786, 752276, 752798, 753000, 753614, 754993, 756731, 757354, 757480, 757613, 757701, 758073, 758559, 758645, 758689, 760270, 760274, 761576, 762247, 762673, 762794, 762795, 763258, 763649, 763731, 764087, 764418, 764791, 765065, 766545, 766624, 767867, 767868, 768262, 769370, 769625, 769727, 769764, 769806, 769890, 770042, 770888, 770939, 771303, 771704, 772691, 772819, 772852, 772991, 773256, 774325, 774756, 776239, 777138, 777220, 777350, 778003, 778047, 778267, 778856, 779024, 779239, 779918, 782130, 782264, 782336, 782490, 782530, 783304, 784670, 785546, 785788, 786413, 786976, 787344, 787444, 787580, 788023, 789280, 790678, 790879, 791556, 792022, 792549, 792679, 793021, 795676, 795807, 797302, 797557, 797566, 797623, 797879, 798439, 798850, 800365, 800495, 801142, 801767, 801826, 802426, 802759, 802982, 803285, 803760, 804229, 804881, 805481, 806355, 806412, 807131, 807155, 807344, 808725, 808985, 809392, 809648, 810667, 811253, 811526, 811756, 811965, 812124, 812251, 812853, 813200, 815272, 815744, 817021, 817128, 817503, 818154, 818170, 818944, 819568, 820404, 820705, 821494, 821946, 822287, 822294, 822342, 822798, 823066, 823287, 823302, 823715, 823786, 824195, 825090, 825643, 826223, 826473, 826799, 827386, 828174, 828603, 829122, 829284, 829806, 830026, 830622, 830945, 831387, 831905, 833516, 833563, 833708, 833886, 833953, 834054, 834260, 834314, 834650, 834749, 835908, 836018, 836966, 837330, 837645, 838957, 839309, 839577, 839861, 840024, 840136, 840182, 840967, 842003, 842414, 842452, 843463, 843899, 844144, 844260, 844689, 844835, 844881, 844953, 845450, 846379, 846589, 847023, 847704, 849207, 849977, 852621, 852888, 852925, 853944, 853952, 854185, 854562, 854629, 854651, 858294, 858306, 859025, 859621, 860103, 862058, 862305, 862477, 862811, 864637, 864959, 864965, 865802, 866147, 867167, 867201, 867652, 868060, 869453, 871559, 871577, 871926, 872212, 872497, 873052, 873056, 873119, 873131, 875113, 875271, 876161, 876519, 876938, 877547, 878046, 878472, 878503, 879047, 879575, 880701, 881652, 881833, 881919, 882061, 883577, 884403, 885023, 885127, 885785, 886158, 886208, 888402, 889913, 890229, 891018, 891362, 892577, 892614, 892993, 895511, 896001, 896080, 896840, 897549, 897778, 898041, 898631, 898925, 899632, 899693, 900664, 900731, 900846, 901237, 902452, 902600, 903765, 903824, 904503, 904806, 905170, 905714, 905773, 906339, 907288, 907374, 907465, 907670, 908341, 910218, 911660, 912251, 912590, 913230, 913434, 913862, 914468, 914555, 916230, 916429, 916539, 916570, 916992, 918561, 918717, 919383, 919617, 920634, 921636, 922107, 923018, 924184, 924450, 924527, 924671, 925145, 925642, 925668, 926427, 927170, 928014, 928689, 928908, 929630, 929880, 929982, 930221, 930510, 930956, 931230, 931469, 931615, 931807, 931849, 932278, 932334, 933131, 934640, 936083, 936568, 936766, 937113, 938140, 938375, 939190, 939220, 939406, 940609, 940924, 942686, 942741, 943700, 944047, 945738, 946158, 946663, 946803, 947757, 947909, 948209, 948851, 949348, 950198, 951077, 951495, 951531, 951552, 951665, 952289, 952822, 952942, 953011, 953352, 953503, 953979, 955326, 955497, 955971, 957215, 957374, 957416, 957494, 957711, 957775, 958597, 958845, 959574, 961150, 961643, 961700, 963012, 963241, 964259, 965387, 965609, 965863, 966914, 969018, 969270, 969665, 969762, 971319, 971600, 972634, 972757, 973134, 973294, 973894, 973985, 974198, 974994, 975440, 975802, 975974, 976033, 976057, 976313, 977155, 977168, 977286, 978755, 979202, 979626, 981524, 981594, 981667, 982178, 982446, 982685, 983200, 983528, 983662, 983912, 984327, 984469, 985813, 986081, 986251, 986977, 987372, 987385, 987400, 988582, 988950, 989624, 989795, 989930, 990827, 991296, 991411, 991873, 991948, 992277, 993009, 993016, 993092, 993998, 994233, 994280, 994287, 994621, 995485, 995576, 995633, 996076, 996197, 996989, 999437], 6_582)
)
results = {}
num_iters = 1
for func in [func for func in dir() if func.startswith('solution')]:
results[func] = []
print(f'\n{func}() (Number of Iterations {num_iters:,})')
for test in basic_tests + additional_tests:
l, expected = test
start = perf_counter_ns()
for i in range(num_iters):
result = globals()[func](l)
end = perf_counter_ns()
results[func].append(end - start)
print(f'{func}({l if len(l) < 10 else "truncated due to length: " + str(len(l))}) returned {result} '
f'({"correct" if result == expected else f"expected: {expected}"})'
f' in {end - start:,} nanoseconds.')
| 397
| 0
| 22
|
b814d40bced5d99185eb2d635cb5f9eff67c1cb0
| 6,820
|
py
|
Python
|
gips/scripts/run_mapout.py
|
wutobias/gips
|
93485caef46b39f9bd01edf58e306658bcee884b
|
[
"MIT"
] | 1
|
2021-04-24T10:29:39.000Z
|
2021-04-24T10:29:39.000Z
|
gips/scripts/run_mapout.py
|
wutobias/gips
|
93485caef46b39f9bd01edf58e306658bcee884b
|
[
"MIT"
] | null | null | null |
gips/scripts/run_mapout.py
|
wutobias/gips
|
93485caef46b39f9bd01edf58e306658bcee884b
|
[
"MIT"
] | 2
|
2021-02-16T14:18:59.000Z
|
2021-06-04T05:09:22.000Z
|
import numpy as np
import copy
import pygmo
from gips.gistmodel.mode0 import mode0
from gips.gistmodel.mode1 import mode1
from gips.gistmodel.mode3 import mode3
from gips.gistmodel.mode4 import mode4
from gips.gistmodel.mode5 import mode5
from gips.gistmodel.mode6 import mode6
from gips.gistmodel.mode7 import mode7
from gips.utils.misc import mode_error
from gips.mapout.map_processing import mapout_maps
from gips.utils.read_write import read_parmsfile
from gips.utils.read_write import write_maps
| 31.574074
| 101
| 0.523021
|
import numpy as np
import copy
import pygmo
from gips.gistmodel.mode0 import mode0
from gips.gistmodel.mode1 import mode1
from gips.gistmodel.mode3 import mode3
from gips.gistmodel.mode4 import mode4
from gips.gistmodel.mode5 import mode5
from gips.gistmodel.mode6 import mode6
from gips.gistmodel.mode7 import mode7
from gips.utils.misc import mode_error
from gips.mapout.map_processing import mapout_maps
from gips.utils.read_write import read_parmsfile
from gips.utils.read_write import write_maps
def mapout(gdatarec_lib, gdata_lib, mode, parms=6, pairs=False,
parmsfile=None, radiusadd=[0.,3.], softness=1., softcut=2.,
exclude=None, prefix=None, scaling=2.0, verbose=False):
if verbose:
print "Start mapout procedure with"
print "mode = %d" %mode
print "softness = %6.3f" %softness
print "softcut = %6.3f" %softcut
if verbose:
print "Organizing and preparing data ..."
mode_dict = dict()
mode_dict = {0 : mode0,
1 : mode1,
3 : mode3,
4 : mode4,
5 : mode5,
6 : mode6,
7 : mode7}
if mode in mode_dict.keys():
fitmode = mode_dict[mode]
else:
mode_error(mode)
fitter = fitmode(gdatarec_lib,
gdata_lib,
parms=parms,
pairs=False,
radiusadd=radiusadd,
softness=softness,
softcut=softcut,
scaling=scaling,
verbose=verbose)
### Find position of SES in parms file
if parmsfile==None:
raise IOError("Must provide parmsfile.")
parmdict = read_parmsfile(parmsfile)
A_SSE = -1
B_SSE = -1
for i, entry in enumerate(parmdict["header"]):
if entry.startswith("SSE"):
if entry.endswith("(A)"):
A_SSE=i
elif entry.endswith("(B)"):
B_SSE=i
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
### Find the best Candidate Solutions ###
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
### Collect all the solutions
N_entries = len(parmdict.keys())-1
A_list = list()
B_list = list()
x_list = list()
A_list_tmp = list()
B_list_tmp = list()
x_list_tmp = list()
for key, value in parmdict.items():
if key=="header":
continue
A_list_tmp.append(value[A_SSE])
B_list_tmp.append(value[B_SSE])
x_list_tmp.append(value[:fitter._parms])
if fitter.decomp:
N_entries = N_entries/2
for i in range(N_entries):
A_list.append([copy.copy(A_list_tmp[2*i]), copy.copy(A_list_tmp[2*i+1])])
B_list.append([copy.copy(B_list_tmp[2*i]), copy.copy(B_list_tmp[2*i+1])])
x_list.append(copy.copy(x_list_tmp[2*i]))
else:
A_list = copy.copy(A_list_tmp)
B_list = copy.copy(B_list_tmp)
x_list = copy.copy(x_list_tmp)
A_list = np.array(A_list)
B_list = np.array(B_list)
### Find the best solution
if fitter.decomp:
ndf, dl, dc, ndr = pygmo.fast_non_dominated_sorting(A_list)
ordered_ndf = list()
for front in ndf:
ordered_ndf.append(pygmo.sort_population_mo(A_list[front]))
else:
ordered_ndf = np.argsort(A_list, axis=0)
if fitter.decomp:
best_x_A = np.array(x_list[ordered_ndf[0][0]])
else:
best_x_A = np.array(x_list[ordered_ndf[0]])
### ~~~~~~~~~~~~~~~~~~~~~~ ###
### Prepare Exclusion List ###
### ~~~~~~~~~~~~~~~~~~~~~~ ###
if exclude != None \
and exclude != "":
exclude_list = list()
with open(exclude, "r") as fopen:
for line in fopen:
l =line.rstrip().lstrip().split()
if len(l)==0:
continue
if l[0].startswith("#"):
continue
for s in l:
exclude_list.append(s)
else:
exclude_list = list()
### ~~~~~~~~~~~~~~~~~~ ###
### Write out the maps ###
### ~~~~~~~~~~~~~~~~~~ ###
### Write out un-processed dx grids
if mode in [0,1,2]:
counter = 0
for rec_keys in fitter.gdatarec_dict.keys():
recdict = fitter.gdatarec_dict[rec_keys]
title = recdict["title"]
if title == None:
name = "%d" %counter
else:
name = title
for i in range(len(recdict["receptor"])):
if recdict["receptor"][i]["gdat"] == None:
continue
write_maps(recdict["receptor"][i]["gdat"], prefix="rec_%s_%d" %(name,i), pymol=True)
counter += 1
else:
counter = 0
for rec_keys in fitter.gdatarec_dict.keys():
recdict = fitter.gdatarec_dict[rec_keys]
title = recdict["title"]
if title == None:
name = "%d" %counter
else:
name = title
for i in range(len(recdict["receptor"])):
if recdict["receptor"][i]["gdat"] == None:
continue
write_maps(recdict["receptor"][i]["gdat"], prefix="rec_%s_%d" %(name,i), pymol=True)
counter += 1
counter = 0
for cplx_keys in fitter.gdata_dict.keys():
cplxdict = fitter.gdata_dict[cplx_keys]
if cplxdict["title"] in fitter.exclude:
continue
title = cplxdict["title"]
if title == None:
name = "%d" %counter
else:
name = title
_N_dict = len(cplxdict["complex"])
for i in range(_N_dict):
if cplxdict["complex"][i]["gdat"] == None:
continue
write_maps(cplxdict["complex"][i]["gdat"], prefix="cplx_%s_%d" %(name,i), pymol=True)
_N_dict = len(cplxdict["ligand"])
for i in range(_N_dict):
if cplxdict["ligand"][i]["gdat"] == None:
continue
write_maps(cplxdict["ligand"][i]["gdat"], prefix="lig_%s_%d" %(name,i), pymol=True)
counter += 1
### Write out pre-processed xyz grids
m = mapout_maps(fitter, best_x_A, pairs, prefix)
if mode in [0,1]:
m.process_rec = True
m.process_cplx = False
m.process_lig = False
else:
m.process_rec = True
m.process_cplx = True
m.process_lig = True
for case in range(fitter.N_case):
if fitter.name[case] in exclude_list:
continue
m.set_case(case)
### Internal write routine as a callback to the process routine
m.process(m.write)
| 6,294
| 0
| 23
|
809e92f4e235a27adf79ef06050b790247a40ee6
| 4,431
|
py
|
Python
|
tests/models/test_artifacts.py
|
Sohamkayal4103/mlflow
|
4e444efdf73c710644ee039b44fa36a31d716f69
|
[
"Apache-2.0"
] | 1
|
2022-01-11T02:51:17.000Z
|
2022-01-11T02:51:17.000Z
|
tests/models/test_artifacts.py
|
Sohamkayal4103/mlflow
|
4e444efdf73c710644ee039b44fa36a31d716f69
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_artifacts.py
|
Sohamkayal4103/mlflow
|
4e444efdf73c710644ee039b44fa36a31d716f69
|
[
"Apache-2.0"
] | 2
|
2019-05-11T08:13:38.000Z
|
2019-05-14T13:33:54.000Z
|
from mlflow.exceptions import MlflowException
from mlflow.models.evaluation.artifacts import (
ImageEvaluationArtifact,
JsonEvaluationArtifact,
NumpyEvaluationArtifact,
CsvEvaluationArtifact,
ParquetEvaluationArtifact,
TextEvaluationArtifact,
PickleEvaluationArtifact,
)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
import pathlib
import pytest
from mlflow.models.evaluation.artifacts import _infer_artifact_type_and_ext
from mlflow.models.evaluation.default_evaluator import _CustomMetric
@pytest.fixture
@pytest.mark.parametrize(
"is_file,artifact,artifact_type,ext",
[
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "png"),
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "jpg"),
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "jpeg"),
(True, __generate_dummy_json_file, JsonEvaluationArtifact, "json"),
(True, lambda path: pathlib.Path(path).write_text("test"), TextEvaluationArtifact, "txt"),
(
True,
lambda path: np.save(path, np.array([1, 2, 3]), allow_pickle=False),
NumpyEvaluationArtifact,
"npy",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_csv(path, index=False),
CsvEvaluationArtifact,
"csv",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_parquet(path),
ParquetEvaluationArtifact,
"parquet",
),
(False, pd.DataFrame({"test": [1, 2, 3]}), CsvEvaluationArtifact, "csv"),
(False, np.array([1, 2, 3]), NumpyEvaluationArtifact, "npy"),
(False, plt.figure(), ImageEvaluationArtifact, "png"),
(False, {"a": 1, "b": "e", "c": 1.2, "d": [1, 2]}, JsonEvaluationArtifact, "json"),
(False, [1, 2, 3, "test"], JsonEvaluationArtifact, "json"),
(False, '{"a": 1, "b": [1.2, 3]}', JsonEvaluationArtifact, "json"),
(False, '[1, 2, 3, "test"]', JsonEvaluationArtifact, "json"),
(False, __DummyClass(), PickleEvaluationArtifact, "pickle"),
],
)
| 37.871795
| 99
| 0.682013
|
from mlflow.exceptions import MlflowException
from mlflow.models.evaluation.artifacts import (
ImageEvaluationArtifact,
JsonEvaluationArtifact,
NumpyEvaluationArtifact,
CsvEvaluationArtifact,
ParquetEvaluationArtifact,
TextEvaluationArtifact,
PickleEvaluationArtifact,
)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
import pathlib
import pytest
from mlflow.models.evaluation.artifacts import _infer_artifact_type_and_ext
from mlflow.models.evaluation.default_evaluator import _CustomMetric
@pytest.fixture
def cm_fn_tuple():
return _CustomMetric(lambda: None, "", 0, "")
def __generate_dummy_json_file(path):
with open(path, "w") as f:
json.dump([1, 2, 3], f)
class __DummyClass:
def __init__(self):
self.test = 1
@pytest.mark.parametrize(
"is_file,artifact,artifact_type,ext",
[
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "png"),
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "jpg"),
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "jpeg"),
(True, __generate_dummy_json_file, JsonEvaluationArtifact, "json"),
(True, lambda path: pathlib.Path(path).write_text("test"), TextEvaluationArtifact, "txt"),
(
True,
lambda path: np.save(path, np.array([1, 2, 3]), allow_pickle=False),
NumpyEvaluationArtifact,
"npy",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_csv(path, index=False),
CsvEvaluationArtifact,
"csv",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_parquet(path),
ParquetEvaluationArtifact,
"parquet",
),
(False, pd.DataFrame({"test": [1, 2, 3]}), CsvEvaluationArtifact, "csv"),
(False, np.array([1, 2, 3]), NumpyEvaluationArtifact, "npy"),
(False, plt.figure(), ImageEvaluationArtifact, "png"),
(False, {"a": 1, "b": "e", "c": 1.2, "d": [1, 2]}, JsonEvaluationArtifact, "json"),
(False, [1, 2, 3, "test"], JsonEvaluationArtifact, "json"),
(False, '{"a": 1, "b": [1.2, 3]}', JsonEvaluationArtifact, "json"),
(False, '[1, 2, 3, "test"]', JsonEvaluationArtifact, "json"),
(False, __DummyClass(), PickleEvaluationArtifact, "pickle"),
],
)
def test_infer_artifact_type_and_ext(is_file, artifact, artifact_type, ext, tmp_path, cm_fn_tuple):
if is_file:
artifact_representation = tmp_path / f"test.{ext}"
artifact(artifact_representation)
else:
artifact_representation = artifact
inferred_from_path, inferred_type, inferred_ext = _infer_artifact_type_and_ext(
f"{ext}_{artifact_type.__name__}_artifact", artifact_representation, cm_fn_tuple
)
assert not (is_file ^ inferred_from_path)
assert inferred_type is artifact_type
assert inferred_ext == f".{ext}"
def test_infer_artifact_type_and_ext_raise_exception_for_non_file_non_json_str(cm_fn_tuple):
with pytest.raises(
MlflowException,
match="with string representation 'some random str' that is "
"neither a valid path to a file nor a JSON string",
):
_infer_artifact_type_and_ext("test_artifact", "some random str", cm_fn_tuple)
def test_infer_artifact_type_and_ext_raise_exception_for_non_existent_path(tmp_path, cm_fn_tuple):
path = tmp_path / "dne_path"
with pytest.raises(MlflowException, match=f"with path '{path}' does not exist"):
_infer_artifact_type_and_ext("test_artifact", path, cm_fn_tuple)
def test_infer_artifact_type_and_ext_raise_exception_for_non_file_artifact(tmp_path, cm_fn_tuple):
with pytest.raises(MlflowException, match=f"with path '{tmp_path}' is not a file"):
_infer_artifact_type_and_ext("non_file_artifact", tmp_path, cm_fn_tuple)
def test_infer_artifact_type_and_ext_raise_exception_for_unsupported_ext(tmp_path, cm_fn_tuple):
path = tmp_path / "invalid_ext_example.some_ext"
with open(path, "w") as f:
f.write("some stuff that shouldn't be read")
with pytest.raises(
MlflowException,
match=f"with path '{path}' does not match any of the supported file extensions",
):
_infer_artifact_type_and_ext("invalid_ext_artifact", path, cm_fn_tuple)
| 1,992
| -2
| 208
|
6317f3a48b6c684ec729ccc0512a3cbb1cb0ad18
| 1,424
|
py
|
Python
|
src/ReadingtipLibrary.py
|
Berries-M/Ohtu_miniprojekti
|
fd763f996830cc99e832167951b9cb77a3e58731
|
[
"MIT"
] | null | null | null |
src/ReadingtipLibrary.py
|
Berries-M/Ohtu_miniprojekti
|
fd763f996830cc99e832167951b9cb77a3e58731
|
[
"MIT"
] | null | null | null |
src/ReadingtipLibrary.py
|
Berries-M/Ohtu_miniprojekti
|
fd763f996830cc99e832167951b9cb77a3e58731
|
[
"MIT"
] | null | null | null |
# pylint: disable=invalid-name
"""Hyväksymistestausluokka
"""
#Pylint disablettu toistaiseksi
from stub_io import StubIO # pylint: disable=import-error
class ReadingtipLibrary: # pylint: disable=invalid-name
"""Luokka joka vastaa vaatimusten testaamisesta
"""
def __init__(self):
"""Luokan konstruktori
"""
self._io = StubIO()
def input(self, value):
"""Luo syötteen
"""
self._io.initial_add(value)
def output_should_contain(self, value):
"""Tarkistaa tulosteen
"""
outputs = self._io.output
if not value in outputs:
raise AssertionError(
f"Output \"{value}\" is not in {str(outputs)}"
)
def run_application(self):
"""Käynnistää sovelluksen
"""
self._io.start()
def last_output_should_contain(self, value):
"""Tarkistaa viimeisen tulosteen
"""
if len(self._io.output) > 0:
last_output = self._io.output.pop()
else:
last_output = ""
if last_output != value:
raise AssertionError(
f"{value} is not in {last_output}"
)
def database_must_be_empty(self):
"""Tarkistaa onko tietokanta tyhjä
"""
if len(self._io.database) > 0:
raise AssertionError(
"Database is not empty"
)
| 24.551724
| 62
| 0.561798
|
# pylint: disable=invalid-name
"""Hyväksymistestausluokka
"""
#Pylint disablettu toistaiseksi
from stub_io import StubIO # pylint: disable=import-error
class ReadingtipLibrary: # pylint: disable=invalid-name
"""Luokka joka vastaa vaatimusten testaamisesta
"""
def __init__(self):
"""Luokan konstruktori
"""
self._io = StubIO()
def input(self, value):
"""Luo syötteen
"""
self._io.initial_add(value)
def output_should_contain(self, value):
"""Tarkistaa tulosteen
"""
outputs = self._io.output
if not value in outputs:
raise AssertionError(
f"Output \"{value}\" is not in {str(outputs)}"
)
def run_application(self):
"""Käynnistää sovelluksen
"""
self._io.start()
def last_output_should_contain(self, value):
"""Tarkistaa viimeisen tulosteen
"""
if len(self._io.output) > 0:
last_output = self._io.output.pop()
else:
last_output = ""
if last_output != value:
raise AssertionError(
f"{value} is not in {last_output}"
)
def database_must_be_empty(self):
"""Tarkistaa onko tietokanta tyhjä
"""
if len(self._io.database) > 0:
raise AssertionError(
"Database is not empty"
)
| 0
| 0
| 0
|
a082368f583d73a70ea1e02f28ceaf3004ac72c2
| 1,500
|
py
|
Python
|
test/test_cv_conversions.py
|
proboscis/omni-cv-rules
|
0a3d4763c0c50bc32974a16531b3d0a1d396c05a
|
[
"MIT"
] | 2
|
2022-01-28T13:37:08.000Z
|
2022-03-03T20:29:20.000Z
|
test/test_cv_conversions.py
|
proboscis/omni-cv-rules
|
0a3d4763c0c50bc32974a16531b3d0a1d396c05a
|
[
"MIT"
] | null | null | null |
test/test_cv_conversions.py
|
proboscis/omni-cv-rules
|
0a3d4763c0c50bc32974a16531b3d0a1d396c05a
|
[
"MIT"
] | null | null | null |
import frozendict
from loguru import logger
from omni_converter import AutoDataFactory
from omni_converter.solver.astar import AstarSolver
from omni_cv_rules.coconut.omni_converter import AutoList
from omni_cv_rules.rulebook import CV_RULEBOOK
target_conversions=[
("[image_path]","numpy_rgb"),
("pix2pix_batch,nc=3","image,RGB,RGB"),
("torch,float32,CHW,RGB,0_1","base64"),
("torch,float32,CHW,RGB,0_1","widget"),
("numpy,float32,CHW,RGB,0_1","[image,L,L]"),
("numpy,float32,BCHW,RGB,0_1","[image,L,L]"),
("[numpy,float32,CHW,RGB,0_1]","[image,L,L]"),
("numpy,float32,BHW,L,None","numpy,float32,BHWC,RGB,None"),
(AutoList(frozendict.frozendict({'arrange': 'HWC', 'meta': frozendict.frozendict({'shape': (None, None, 1)}), 'type': 'numpy', 'dtype': 'float32', 'ch_rpr': 'L', 'v_range': 'None'})),
AutoList(frozendict.frozendict({'type': 'numpy', 'arrange': 'HWC', 'ch_rpr': 'LLL', 'meta': frozendict.frozendict({'shape': (None, None, 3)}), 'dtype': 'float32', 'v_range': 'None'})))
]
| 37.5
| 189
| 0.66
|
import frozendict
from loguru import logger
from omni_converter import AutoDataFactory
from omni_converter.solver.astar import AstarSolver
from omni_cv_rules.coconut.omni_converter import AutoList
from omni_cv_rules.rulebook import CV_RULEBOOK
target_conversions=[
("[image_path]","numpy_rgb"),
("pix2pix_batch,nc=3","image,RGB,RGB"),
("torch,float32,CHW,RGB,0_1","base64"),
("torch,float32,CHW,RGB,0_1","widget"),
("numpy,float32,CHW,RGB,0_1","[image,L,L]"),
("numpy,float32,BCHW,RGB,0_1","[image,L,L]"),
("[numpy,float32,CHW,RGB,0_1]","[image,L,L]"),
("numpy,float32,BHW,L,None","numpy,float32,BHWC,RGB,None"),
(AutoList(frozendict.frozendict({'arrange': 'HWC', 'meta': frozendict.frozendict({'shape': (None, None, 1)}), 'type': 'numpy', 'dtype': 'float32', 'ch_rpr': 'L', 'v_range': 'None'})),
AutoList(frozendict.frozendict({'type': 'numpy', 'arrange': 'HWC', 'ch_rpr': 'LLL', 'meta': frozendict.frozendict({'shape': (None, None, 3)}), 'dtype': 'float32', 'v_range': 'None'})))
]
def test_solver():
solver = AstarSolver(
heuristics=lambda x,y:0,
neighbors=CV_RULEBOOK,
max_depth=100,
silent=False
)
for tgt,dst in target_conversions:
res = solver.solve(tgt,dst)
#logger.info(res)
def test_auto():
auto = AutoDataFactory(CV_RULEBOOK)
#auto(None,None).solver.solve_cache.clear()
for tgt,dst in target_conversions:
c = auto(tgt,None).converter(dst)
#logger.info(c)
| 426
| 0
| 46
|
cc7b17bf2ed9de52a8928eacc68c93e0d029b4b9
| 5,776
|
py
|
Python
|
src/train_triplet.py
|
yumatsuoka/triplet-net_label-spreading
|
7e7c1f31d3f22ab77ecdcb7eae1d959d0e9b7d36
|
[
"Apache-2.0"
] | null | null | null |
src/train_triplet.py
|
yumatsuoka/triplet-net_label-spreading
|
7e7c1f31d3f22ab77ecdcb7eae1d959d0e9b7d36
|
[
"Apache-2.0"
] | null | null | null |
src/train_triplet.py
|
yumatsuoka/triplet-net_label-spreading
|
7e7c1f31d3f22ab77ecdcb7eae1d959d0e9b7d36
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
train triplet net and get feature vectors
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import time
import argparse
# from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
import dump_vec
import triplet_net
if __name__ == '__main__':
st = time.clock()
s_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=-1, type=int)
parser.add_argument('--epoch', default=40, type=int)
parser.add_argument('--batchsize', default=100, type=int)
parser.add_argument('--initmodel', default=0, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--outputdim', default=2, type=int)
parser.add_argument('--n_train', default=1000, type=int)
parser.add_argument('--plot_dim', default=100, type=int)
parser.add_argument('--d_name', default='hoge', type=str)
args = parser.parse_args()
print('Create model')
model = triplet_net.Triplet_net(args.outputdim)
print('Check gpu')
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
print('Load dataset')
ld_dict, unld_dict = get_mnist(args.n_train)
print('Setup optimizer')
optimizer = optimizers.Adam(alpha=0.0002)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001))
if args.initmodel:
model_dir = '../dump/{}_triplet.model'.format(args.d_name)
serializers.load_hdf5(model_dir, model)
if args.resume:
state_dir = '../dump/{}_triplet.state'.format(args.d_name)
serializers.load_hdf5(state_dir, optimizer)
print('training and test')
train_and_dump(model, optimizer, ld_dict, unld_dict, xp, args.batchsize,\
args.epoch, args.plot_dim, args.gpu, args.outputdim, args.d_name)
print('end')
print('elapsed time[m]:', (time.clock() - st)/60.0)
| 35.875776
| 100
| 0.631752
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
train triplet net and get feature vectors
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import time
import argparse
# from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
import dump_vec
import triplet_net
def train_and_dump(model, optimizer, labeled_data_dict, unlabeled_data_dict,\
xp, batchsize, epoch, plot_dim, gpu, outputdim, d_name):
x_train = labeled_data_dict['data']
y_train = labeled_data_dict['target']
loss_list = np.empty(epoch)
for itr in six.moves.range(1, epoch + 1):
# for itr in tqdm(six.moves.range(1, epoch + 1)):
print('epoch', itr)
xall_a, xall_p, xall_f = data_feed(x_train, y_train)
n_train = len(xall_a)
perm = np.random.permutation(n_train)
sum_train_loss = 0
for i in six.moves.range(0, n_train, batchsize):
x_a = xall_a[perm[i:i + batchsize]]
x_p = xall_p[perm[i:i + batchsize]]
x_f = xall_f[perm[i:i + batchsize]]
x_a = xp.asarray(x_a, dtype=xp.float32)
x_p = xp.asarray(x_p, dtype=xp.float32)
x_f = xp.asarray(x_f, dtype=xp.float32)
real_batchsize = len(x_a)
optimizer.zero_grads()
loss = model(x_a, x_p, x_f)
loss.backward()
optimizer.update()
sum_train_loss += float(cuda.to_cpu(loss.data)) * real_batchsize
print('train mean loss={}'.format(sum_train_loss / n_train))
loss_list[epoch-1] = sum_train_loss / n_train
# print('dump model & optimizer')
# serializers.save_hdf5('../dump/triplet.model', model)
# serializers.save_hdf5('../dump/triplet.state', optimizer)
print('Make loss graph')
plt.clf()
plt.xlabel('weight update')
plt.ylabel('loss')
plt.plot(loss_list)
plt.savefig('../dump/{}_loss.png'.format(d_name))
print('dump feature vector')
dump_vec.dump_feature_vector(model, '../dump/{}_label'.format(d_name),\
labeled_data_dict, outputdim, batchsize, xp, gpu)
dump_vec.dump_feature_vector(model, '../dump/{}_unlabel'.format(d_name),\
unlabeled_data_dict, outputdim, batchsize, xp, gpu, plot_dim)
def get_mnist(n_data):
mnist = fetch_mldata('MNIST original')
r_data = mnist['data'].astype(np.float32)
r_label = mnist['target'].astype(np.int32)
# former 60,000 samples which is training data in MNIST.
perm = np.random.permutation(60000)
data = r_data[perm]
label = r_label[perm]
# split the data to training data(labeled data) and test data(unlabeled data)
ld_dict = {'data':data[:n_data].reshape((n_data, 1, 28, 28)) / 255.0,\
'target':label[:n_data]}
unld_dict = {'data':data[n_data:].reshape((60000-n_data, 1, 28, 28)) / 255.0,\
'target':label[n_data:]}
return ld_dict, unld_dict
def data_feed(train_data, train_label):
n_class = 10
nl = len(train_label)
nl_class = [len(np.where(train_label==c)[0]) for c in range(n_class)]
xa = np.asarray([train_data[idx] for c in range(n_class)
for i in range(nl-nl_class[c])
for idx in np.random.permutation(np.where(train_label==c)[0])])
xp = np.asarray([train_data[idx] for c in range(n_class)
for i in range(nl-nl_class[c])
for idx in np.random.permutation(np.where(train_label==c)[0])])
xf = np.asarray([train_data[idx] for c in range(n_class)
for i in range(nl_class[c])
for idx in np.random.permutation(np.where(train_label!=c)[0])])
return xa, xp, xf
if __name__ == '__main__':
st = time.clock()
s_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=-1, type=int)
parser.add_argument('--epoch', default=40, type=int)
parser.add_argument('--batchsize', default=100, type=int)
parser.add_argument('--initmodel', default=0, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--outputdim', default=2, type=int)
parser.add_argument('--n_train', default=1000, type=int)
parser.add_argument('--plot_dim', default=100, type=int)
parser.add_argument('--d_name', default='hoge', type=str)
args = parser.parse_args()
print('Create model')
model = triplet_net.Triplet_net(args.outputdim)
print('Check gpu')
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
print('Load dataset')
ld_dict, unld_dict = get_mnist(args.n_train)
print('Setup optimizer')
optimizer = optimizers.Adam(alpha=0.0002)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001))
if args.initmodel:
model_dir = '../dump/{}_triplet.model'.format(args.d_name)
serializers.load_hdf5(model_dir, model)
if args.resume:
state_dir = '../dump/{}_triplet.state'.format(args.d_name)
serializers.load_hdf5(state_dir, optimizer)
print('training and test')
train_and_dump(model, optimizer, ld_dict, unld_dict, xp, args.batchsize,\
args.epoch, args.plot_dim, args.gpu, args.outputdim, args.d_name)
print('end')
print('elapsed time[m]:', (time.clock() - st)/60.0)
| 3,436
| 0
| 69
|
3becdb32e6fc5a6e1682833cf8b1b539ef46ae75
| 2,704
|
py
|
Python
|
pythonravil10/Main.py
|
thekupidman/pythonravil10
|
8bf069e78e75311de45b5792fc0b383e07c8bebc
|
[
"Unlicense"
] | null | null | null |
pythonravil10/Main.py
|
thekupidman/pythonravil10
|
8bf069e78e75311de45b5792fc0b383e07c8bebc
|
[
"Unlicense"
] | null | null | null |
pythonravil10/Main.py
|
thekupidman/pythonravil10
|
8bf069e78e75311de45b5792fc0b383e07c8bebc
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# coding=utf-8
import sys
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSlot,QTimer
from PyQt5 import uic,QtGui
import random
import math
if __name__=='__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| 25.509434
| 98
| 0.571006
|
#!/usr/bin/env python3
# coding=utf-8
import sys
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSlot,QTimer
from PyQt5 import uic,QtGui
import random
import math
class Example(QWidget):
def __init__(self):
super().__init__()
QWidget.__init__(self)
self.initUI()
def frontPos(self, x, y):
self.frontX = x
self.frontY = y
self.label_front.move(self.frontX - 35, self.frontY - 35)
def initUI(self):
self.btn1 = QPushButton("GO!", self)
self.setWindowIcon(QtGui.QIcon('img/logo.png'))
self.label_back = QLabel(self)
pixmap = QPixmap('img/t10_v1_back.png')
self.label_back.setPixmap(pixmap)
self.label_back.setGeometry(0, 0, 560, 560)
self.label_front = QLabel(self)
self.label_front.setStyleSheet("background-color:#111111;")
self.label_front.setGeometry(0, 0, 70, 70)
self.btn1.clicked.connect(self.btn1Click)
self.btn1.setGeometry(10, 580, 130, 40)
self.frontX = 0
self.frontY = 0
self.stepX = 0
self.stepY = 0
self.frontPos(40, 40)
self.timer = QTimer(self)
self.timer.timeout.connect(self.timerEvent)
self.setGeometry(300, 300, 560, 640)
self.setWindowTitle('zadanie 10')
self.show()
def timerEvent(self):
self.frontPos(self.frontX + self.stepX, self.frontY + self.stepY)
if self.frontY <= 80 and self.frontX >= 280:
self.stepX = 0
self.stepY = 5
if self.frontY >= 120 and self.frontY < 160 and self.frontX < 320:
self.stepX = 5
self.stepY = 0
if self.frontY <= 160 and self.frontX >= 440:
self.stepX = 0
self.stepY = 5
if self.frontY >= 520 and self.frontX >= 400:
self.stepX = -5
self.stepY = 0
if self.frontY >= 480 and self.frontX >= 320 and self.frontX <= 360:
self.stepX = 0
self.stepY = -5
if self.frontY > 240 and self.frontY <= 280 and self.frontX >= 320 and self.frontX <= 400:
self.stepX = -5
self.stepY = 0
if self.frontY > 240 and self.frontX <= 40:
self.stepX = 0
self.stepY = 5
if self.frontY >= 520 and self.frontX <= 80:
self.stepX = 0
self.stepY = 0
self.timer.stop()
def btn1Click(self):
self.frontPos(40, 40)
self.stepX = 5
self.stepY = 0
self.timer.start(20)
if __name__=='__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| 2,225
| 2
| 158
|
213c75f949c20ce93af983e60de6bcc7ff886b5b
| 1,497
|
py
|
Python
|
tools/make_wrapper_types_header.py
|
sarthak-saxena/cef_node_webkit
|
cca786066cdc635d2bcfb67315a70a1c40c5d77a
|
[
"BSD-3-Clause"
] | 1,686
|
2017-04-02T19:51:57.000Z
|
2022-03-31T10:08:40.000Z
|
tools/make_wrapper_types_header.py
|
sarthak-saxena/cef_node_webkit
|
cca786066cdc635d2bcfb67315a70a1c40c5d77a
|
[
"BSD-3-Clause"
] | 16
|
2017-04-21T12:05:52.000Z
|
2022-03-01T23:15:13.000Z
|
tools/make_wrapper_types_header.py
|
sarthak-saxena/cef_node_webkit
|
cca786066cdc635d2bcfb67315a70a1c40c5d77a
|
[
"BSD-3-Clause"
] | 343
|
2017-04-21T11:20:31.000Z
|
2022-03-31T07:47:25.000Z
|
# Copyright (c) 2015 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from cef_parser import *
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 2:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <include_dir>\n')
sys.exit()
# create the header object
header = obj_header()
excluded_files = ['cef_api_hash.h', 'cef_application_mac.h', 'cef_version.h']
header.add_directory(sys.argv[1], excluded_files)
# dump the result to stdout
sys.stdout.write(make_wrapper_types_header(header))
| 29.352941
| 79
| 0.684035
|
# Copyright (c) 2015 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from cef_parser import *
def make_wrapper_types_header(header):
result = get_copyright()
result += '#ifndef CEF_LIBCEF_DLL_WRAPPER_TYPES_H_\n'+ \
'#define CEF_LIBCEF_DLL_WRAPPER_TYPES_H_\n' + \
'#pragma once\n\n' + \
'enum CefWrapperType {\n' + \
' WT_BASE_REF_COUNTED = 1,\n' + \
' WT_BASE_SCOPED,\n'
clsnames = sorted(header.get_class_names())
for clsname in clsnames:
result += ' ' + get_wrapper_type_enum(clsname) + ',\n'
result += '\n WT_LAST\n'
result += '};\n\n' + \
'#endif // CEF_LIBCEF_DLL_WRAPPER_TYPES_H_'
return result
def write_wrapper_types_header(header, file):
newcontents = make_wrapper_types_header(header)
return (file, newcontents)
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 2:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <include_dir>\n')
sys.exit()
# create the header object
header = obj_header()
excluded_files = ['cef_api_hash.h', 'cef_application_mac.h', 'cef_version.h']
header.add_directory(sys.argv[1], excluded_files)
# dump the result to stdout
sys.stdout.write(make_wrapper_types_header(header))
| 687
| 0
| 46
|
6cfac4dc66e5ee5540d8165efeefd8941289e305
| 1,307
|
py
|
Python
|
authenticationApp/backends.py
|
George-Okumu/IReporter-Django
|
5962984ce0069cdf048dbf91686377568a7cf55b
|
[
"MIT"
] | null | null | null |
authenticationApp/backends.py
|
George-Okumu/IReporter-Django
|
5962984ce0069cdf048dbf91686377568a7cf55b
|
[
"MIT"
] | 1
|
2021-10-06T20:15:11.000Z
|
2021-10-06T20:15:11.000Z
|
authenticationApp/backends.py
|
George-Okumu/IReporter-Django
|
5962984ce0069cdf048dbf91686377568a7cf55b
|
[
"MIT"
] | null | null | null |
from os import path
from django.conf import settings
from rest_framework import exceptions, authentication
import jwt
from .models import CustomUser
| 39.606061
| 93
| 0.670237
|
from os import path
from django.conf import settings
from rest_framework import exceptions, authentication
import jwt
from .models import CustomUser
class JWTAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
auth_header = authentication.get_authorization_header(request)
if not auth_header:
return None
auth_data = auth_header.decode('utf-8')
auth_token = auth_data.split(" ")
if len(auth_token)!=2:
raise exceptions.AuthenticationFailed('Token format is incorrect')
token = auth_token[1]
try:
payload=jwt.decode(token, key=settings.SECRET_KEY, algorithms="HS256")
email = payload['email']
user = CustomUser.objects.get(email= email)
return(user, token)
except jwt.DecodeError as identifier:
raise exceptions.AuthenticationFailed("Unable to authenticate, token is invalid")
except jwt.ExpiredSignatureError as identifier:
raise exceptions.AuthenticationFailed("Token Expired, login again")
except CustomUser.DoesNotExist as no_user:
raise exceptions.AuthenticationFailed("User does not exist")
return super().authenticate(request)
| 1,054
| 38
| 48
|
998f4a6842c75f878d5d80608bf11dd019ef2830
| 294
|
py
|
Python
|
src/hw_test/button_test.py
|
rundhall/ESP-LEGO-SPIKE-Simulator
|
dc83b895ff2aac5cf2fe576d0ba98426fea60827
|
[
"MIT"
] | null | null | null |
src/hw_test/button_test.py
|
rundhall/ESP-LEGO-SPIKE-Simulator
|
dc83b895ff2aac5cf2fe576d0ba98426fea60827
|
[
"MIT"
] | null | null | null |
src/hw_test/button_test.py
|
rundhall/ESP-LEGO-SPIKE-Simulator
|
dc83b895ff2aac5cf2fe576d0ba98426fea60827
|
[
"MIT"
] | null | null | null |
import time,machine
BUTTONPIN = 0
buttonpin = machine.Pin(BUTTONPIN, machine.Pin.IN, machine.Pin.PULL_UP)
buttonlast = 0
print("wait_until_pressed: Waits until the button is pressed.")
while buttonpin.value()==1:
print("push the button")
time.sleep_ms(100)
print("button is pressed")
| 26.727273
| 71
| 0.744898
|
import time,machine
BUTTONPIN = 0
buttonpin = machine.Pin(BUTTONPIN, machine.Pin.IN, machine.Pin.PULL_UP)
buttonlast = 0
print("wait_until_pressed: Waits until the button is pressed.")
while buttonpin.value()==1:
print("push the button")
time.sleep_ms(100)
print("button is pressed")
| 0
| 0
| 0
|
e9b8a70ff52561f8160b905d6c7d0f6adf09b3f2
| 9,945
|
py
|
Python
|
src/baseline/baseline_embeddings.py
|
fshdnc/disease_normalization
|
68b8fc118fe0f971fbd056ad2bffb44caa0e7abf
|
[
"Apache-2.0"
] | 1
|
2021-01-28T09:24:27.000Z
|
2021-01-28T09:24:27.000Z
|
src/baseline/baseline_embeddings.py
|
fshdnc/disease_normalization
|
68b8fc118fe0f971fbd056ad2bffb44caa0e7abf
|
[
"Apache-2.0"
] | 1
|
2019-07-08T03:25:30.000Z
|
2019-12-13T08:33:55.000Z
|
src/baseline/baseline_embeddings.py
|
fshdnc/disease_normalization
|
68b8fc118fe0f971fbd056ad2bffb44caa0e7abf
|
[
"Apache-2.0"
] | null | null | null |
'''Word2Vec Baseline
python3 baseline_embeddings.py path_to_embedding'''
import logging
import logging.config
import configparser as cp
#import args
import sys
import pickle
import numpy as np
import vectorizer
import load
import sample
#configurations
config = cp.ConfigParser(strict=False)
config.read('defaults.cfg')
#argparser
#args = args.get_args()
'''
>>> args.train
False
'''
#logging
logger = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level':'INFO',
'formatter': 'standard',
'class':'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
}
}
})
# word embedding
from gensim.models import KeyedVectors
def prepare_embedding_vocab(filename, binary = True, limit = 1000000):
'''filename: '~/disease-normalization/data/embeddings/wvec_50_haodi-li-et-al.bin'
1. Use gensim for reading in embedding model
2. Sort based on the index to make sure that they are in the correct order
3. Normalize the vectors
4. Build vocabulary mappings, zero for padding
5. Create an inverse dictionary
'''
vector_model = KeyedVectors.load_word2vec_format(filename, binary = binary, limit = limit)
#vector_model=KeyedVectors.load_word2vec_format(config['embedding']['emb_file'], binary=True, limit=50000)
words = [k for k,v in sorted(vector_model.vocab.items(),key = lambda x:x[1].index)]
vector_model.init_sims(replace = True)
vocabulary={"<SPECIAL>": 0, "<OOV>": 1}
for word in words:
vocabulary.setdefault(word, len(vocabulary))
inversed_vocabulary={value:key for key, value in vocabulary.items()}
return vector_model, vocabulary, inversed_vocabulary
def load_pretrained_word_embeddings(vocab,embedding_model):
"""vocab: vocabulary from data vectorizer
embedding_model: model loaded with gensim"""
pretrained_embeddings = np.random.uniform(low=-0.05, high=0.05, size=(len(vocab)-1,embedding_model.vectors.shape[1]))
pretrained_embeddings = np.vstack((np.zeros(shape=(1,embedding_model.vectors.shape[1])), pretrained_embeddings))
found=0
for word,idx in vocab.items():
if word in embedding_model.vocab:
pretrained_embeddings[idx]=embedding_model.get_vector(word)
found+=1
logger.info("Found pretrained vectors for {found} words.".format(found=found))
return pretrained_embeddings
if __name__ == '__main__':
emb_baseline(sys.argv[1])
#normalize(sys.argv[1], sys.argv[2])
| 39.153543
| 159
| 0.668477
|
'''Word2Vec Baseline
python3 baseline_embeddings.py path_to_embedding'''
import logging
import logging.config
import configparser as cp
#import args
import sys
import pickle
import numpy as np
import vectorizer
import load
import sample
#configurations
config = cp.ConfigParser(strict=False)
config.read('defaults.cfg')
#argparser
#args = args.get_args()
'''
>>> args.train
False
'''
#logging
logger = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level':'INFO',
'formatter': 'standard',
'class':'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
}
}
})
# word embedding
from gensim.models import KeyedVectors
def prepare_embedding_vocab(filename, binary = True, limit = 1000000):
'''filename: '~/disease-normalization/data/embeddings/wvec_50_haodi-li-et-al.bin'
1. Use gensim for reading in embedding model
2. Sort based on the index to make sure that they are in the correct order
3. Normalize the vectors
4. Build vocabulary mappings, zero for padding
5. Create an inverse dictionary
'''
vector_model = KeyedVectors.load_word2vec_format(filename, binary = binary, limit = limit)
#vector_model=KeyedVectors.load_word2vec_format(config['embedding']['emb_file'], binary=True, limit=50000)
words = [k for k,v in sorted(vector_model.vocab.items(),key = lambda x:x[1].index)]
vector_model.init_sims(replace = True)
vocabulary={"<SPECIAL>": 0, "<OOV>": 1}
for word in words:
vocabulary.setdefault(word, len(vocabulary))
inversed_vocabulary={value:key for key, value in vocabulary.items()}
return vector_model, vocabulary, inversed_vocabulary
def load_pretrained_word_embeddings(vocab,embedding_model):
"""vocab: vocabulary from data vectorizer
embedding_model: model loaded with gensim"""
pretrained_embeddings = np.random.uniform(low=-0.05, high=0.05, size=(len(vocab)-1,embedding_model.vectors.shape[1]))
pretrained_embeddings = np.vstack((np.zeros(shape=(1,embedding_model.vectors.shape[1])), pretrained_embeddings))
found=0
for word,idx in vocab.items():
if word in embedding_model.vocab:
pretrained_embeddings[idx]=embedding_model.get_vector(word)
found+=1
logger.info("Found pretrained vectors for {found} words.".format(found=found))
return pretrained_embeddings
def emb_baseline(emb_path):
#vector_model, vocabulary, inversed_vocabulary = prepare_embedding_vocab('/home/lenz/disease-normalization/data/embeddings/wvec_200_win-30_chiu-et-al.bin')
vector_model, vocabulary, inversed_vocabulary = prepare_embedding_vocab(emb_path, binary = True)
pretrained = load_pretrained_word_embeddings(vocabulary, vector_model)
# MEDIC dictionary
dictionary = load.Terminology()
# dictionary of entries, key = canonical id, value = named tuple in the form of
# MEDIC_ENTRY(DiseaseID='MESH:D005671', DiseaseName='Fused Teeth',
# AllDiseaseIDs=('MESH:D005671',), AllNames=('Fused Teeth', 'Teeth, Fused')
dictionary.loaded = load.load(config['terminology']['dict_file'],'MEDIC')
import vectorizer
dictionary.no_cangen_tokenized = vectorizer.MEDIC_dict_tokenizer_no_cangen(dictionary.loaded,config['methods']['tokenizer'])
dictionary.no_cangen_vectorized = vectorizer.MEDIC_dict_vectorizer_no_cangen(dictionary.no_cangen_tokenized,vocabulary)
# concepts
concept_ids = [] # list of all concept ids
concept_all_ids = [] # list of (lists of all concept ids with alt IDs)
concept_names = [] # list of all names, same length as concept_ids
concept_map = {} # names as keys, ids as concepts
for k in dictionary.loaded.keys(): # keys should be in congruent order
c_id = dictionary.loaded[k].DiseaseID
a_ids = dictionary.loaded[k].AllDiseaseIDs
if int(config['settings']['all_names']):
for n in dictionary.loaded[k].AllNames:
concept_ids.append(c_id)
concept_all_ids.append(a_ids)
concept_names.append(n)
if n in concept_map: # one name corresponds to multiple concepts
concept_map[n].append(c_id)
# logger.warning('{0} already in the dictionary with id {1}'.format(n,concept_map[n]))
else:
concept_map[n] = [c_id]
else:
for n in dictionary.loaded[k].DiseaseName:
concept_ids.append(c_id)
concept_all_ids.append(a_ids)
concept_names.append(n)
if n in concept_map: # one name corresponds to multiple concepts
concept_map[n].append(c_id)
# logger.warning('{0} already in the dictionary with id {1}'.format(n,concept_map[n]))
else:
concept_map[n] = [c_id]
# save the stuff to object
concept = sample.NewDataSet('concepts')
concept.ids = concept_ids
concept.all_ids = concept_all_ids
concept.names = concept_names
concept.map = concept_map
#concept_vectorize = np.array([dictionary.no_cangen_vectorized[k] for k in concept.ids])
# corpus
#corpus_train = sample.NewDataSet('training corpus')
#corpus_train.objects = load.load(config['corpus']['training_file'],'NCBI')
corpus_dev = sample.NewDataSet('dev corpus')
corpus_dev.objects = load.load(config['corpus']['development_file'],'NCBI')
#corpus_test = sample.NewDataSet('test corpus')
#corpus_test.objects = load.load('/home/lhchan/disease_normalization/data/NCBItestset_corpus.txt','NCBI')
#corpus_dev=corpus_test
for corpus in [corpus_dev]:
mention_ids = [] # list of all ids (gold standard for each mention)
mention_names = [] # list of all names
mention_all = [] # list of tuples (mention_text,gold,context,(start,end,docid))
#sth wrong here that sometimes throw an error
#import pdb;pdb.set_trace()
for abstract in corpus.objects:
for section in abstract.sections: # title and abstract
for mention in section.mentions:
nor_ids = [sample._nor_id(one_id) for one_id in mention.id]
mention_ids.append(nor_ids) # append list of ids, usually len(list)=1
mention_names.append(mention.text)
mention_all.append((mention.text,nor_ids,section.text,(mention.start,mention.end,abstract.docid)))
# tokenization & vectorization of mentions
#mention_tokenize = [nltk.word_tokenize(name) for name in mention_names]
#mention_vectorize = np.array([[vocabulary.get(text,1) for text in mention] for mention in mention_tokenize])
# mention_elmo = elmo_default([mention_names])
corpus.ids = mention_ids
corpus.names = mention_names
corpus.all = mention_all
# corpus.tokenize = mention_tokenize
# corpus.vectorize = mention_vectorize
# corpus.elmo = mention_elmo
# vector representations
import nltk
mention_embeddings = []
for mention in corpus.names:
tokenized = nltk.word_tokenize(mention.lower())
index = [vocabulary.get(token,1) for token in tokenized]
#emb = np.mean(np.array([pretrained[i] for i in index]), axis=0)
emb = np.sum(np.array([pretrained[i] for i in index]), axis=0)
mention_embeddings.append(emb)
mention_embeddings = np.array(mention_embeddings)
concept_embeddings = []
for mention in concept.names:
tokenized = nltk.word_tokenize(mention.lower())
index = [vocabulary.get(token,1) for token in tokenized]
#emb = np.mean(np.array([pretrained[i] for i in index]), axis=0)
emb = np.sum(np.array([pretrained[i] for i in index]), axis=0)
concept_embeddings.append(emb)
concept_embeddings = np.array(concept_embeddings)
'''
from vectorizer_elmo import elmo_default
# chunk the concepts down since the list is too big
concept_chunk = [concept.names[i:i + 5000] for i in range(0, len(concept.names), 5000)]
concept.elmo = []
for chunk in concept_chunk:
[elmo_chunk] = [c for c in elmo_default([chunk])]
concept.elmo.append(elmo_chunk)
[concept.elmo] = [chunk for chunk in elmo_default([concept_chunk])]
#with open('gitig_concept_elmo.pickle','wb') as f:
# pickle.dump(concept.elmo,f,protocol=4)
#concept.elmo = pickle.load(open('gitig_concept_elmo.pickle','rb'))
concept.elmo = np.array([item for sublist in concept.elmo for item in sublist])
[corpus_dev.elmo] = [chunk for chunk in elmo_default([corpus_dev.names])]
'''
concept_emb = concept_embeddings #concept.elmo
mention_emb = mention_embeddings #corpus_dev.elmo
from sklearn.preprocessing import normalize
nor_concepts = normalize(concept_emb)
nor_corpus_dev = normalize(mention_emb)
dot_product_matrix = np.dot(nor_corpus_dev,np.transpose(nor_concepts))
prediction_indices = np.argmax(dot_product_matrix,axis=1)
predictions = np.array(concept.ids)[prediction_indices].tolist()
correct = 0
#incorrect = 0
#incorrect_indices = []
for prediction, mention_gold in zip(predictions,corpus_dev.ids):
if prediction == mention_gold[0] and len(mention_gold)==1:
correct += 1
print('Accuracy:{0}'.format(correct/len(corpus_dev.names)))
#[1] if men[0] in can and len(men)==1 else [0]
if __name__ == '__main__':
emb_baseline(sys.argv[1])
#normalize(sys.argv[1], sys.argv[2])
| 7,077
| 0
| 23
|
cd57085bc6d677111edf3ff6f26a0fb5115f0d21
| 2,336
|
py
|
Python
|
src/main.py
|
divelab/gunet
|
570dc99fca7a258cb8d38247c3eb34d5cec4201e
|
[
"MIT"
] | 7
|
2019-06-26T22:06:07.000Z
|
2020-09-02T05:04:34.000Z
|
src/main.py
|
divelab/gunet
|
570dc99fca7a258cb8d38247c3eb34d5cec4201e
|
[
"MIT"
] | null | null | null |
src/main.py
|
divelab/gunet
|
570dc99fca7a258cb8d38247c3eb34d5cec4201e
|
[
"MIT"
] | null | null | null |
import argparse
import random
import time
import torch
import numpy as np
from network import GNet
from trainer import Trainer
from utils.data_loader import FileLoader
if __name__ == "__main__":
main()
| 36.5
| 79
| 0.671233
|
import argparse
import random
import time
import torch
import numpy as np
from network import GNet
from trainer import Trainer
from utils.data_loader import FileLoader
def get_args():
parser = argparse.ArgumentParser(description='Args for graph predition')
parser.add_argument('-seed', type=int, default=1, help='seed')
parser.add_argument('-data', default='DD', help='data folder name')
parser.add_argument('-fold', type=int, default=1, help='fold (1..10)')
parser.add_argument('-num_epochs', type=int, default=2, help='epochs')
parser.add_argument('-batch', type=int, default=8, help='batch size')
parser.add_argument('-lr', type=float, default=0.001, help='learning rate')
parser.add_argument('-deg_as_tag', type=int, default=0, help='1 or degree')
parser.add_argument('-l_num', type=int, default=3, help='layer num')
parser.add_argument('-h_dim', type=int, default=512, help='hidden dim')
parser.add_argument('-l_dim', type=int, default=48, help='layer dim')
parser.add_argument('-drop_n', type=float, default=0.3, help='drop net')
parser.add_argument('-drop_c', type=float, default=0.2, help='drop output')
parser.add_argument('-act_n', type=str, default='ELU', help='network act')
parser.add_argument('-act_c', type=str, default='ELU', help='output act')
parser.add_argument('-ks', nargs='+', type=float, default='0.9 0.8 0.7')
parser.add_argument('-acc_file', type=str, default='re', help='acc file')
args, _ = parser.parse_known_args()
return args
def set_random(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def app_run(args, G_data, fold_idx):
G_data.use_fold_data(fold_idx)
net = GNet(G_data.feat_dim, G_data.num_class, args)
trainer = Trainer(args, net, G_data)
trainer.train()
def main():
args = get_args()
print(args)
set_random(args.seed)
start = time.time()
G_data = FileLoader(args).load_data()
print('load data using ------>', time.time()-start)
if args.fold == 0:
for fold_idx in range(10):
print('start training ------> fold', fold_idx+1)
app_run(args, G_data, fold_idx)
else:
print('start training ------> fold', args.fold)
app_run(args, G_data, args.fold-1)
if __name__ == "__main__":
main()
| 2,032
| 0
| 92
|
479934381a06e295d8434572902c2bdc5cf55ae9
| 52
|
py
|
Python
|
src/__init__.py
|
treyhunner/pysource
|
ae54b39cadd4fa108a42f01512fbb07c29585e48
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
treyhunner/pysource
|
ae54b39cadd4fa108a42f01512fbb07c29585e48
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
treyhunner/pysource
|
ae54b39cadd4fa108a42f01512fbb07c29585e48
|
[
"MIT"
] | null | null | null |
from src.pysource import get_callable, print_source
| 26
| 51
| 0.865385
|
from src.pysource import get_callable, print_source
| 0
| 0
| 0
|
21d6d6909499248ae9cc2c10b507830129a742ea
| 3,993
|
py
|
Python
|
qdsl/boolean.py
|
csams/qdsl
|
b80c996ad21f5604372622755da9d5b6dc9d35b2
|
[
"Apache-2.0"
] | 2
|
2020-11-05T21:48:00.000Z
|
2020-12-18T03:13:02.000Z
|
qdsl/boolean.py
|
csams/qdsl
|
b80c996ad21f5604372622755da9d5b6dc9d35b2
|
[
"Apache-2.0"
] | null | null | null |
qdsl/boolean.py
|
csams/qdsl
|
b80c996ad21f5604372622755da9d5b6dc9d35b2
|
[
"Apache-2.0"
] | 1
|
2021-09-28T08:22:35.000Z
|
2021-09-28T08:22:35.000Z
|
"""
The boolean module lets you create complicated boolean expressions by composing
objects. The compositions can be evaluated against multiple values.
"""
import logging
import operator
import re
from functools import partial, wraps
from itertools import count
log = logging.getLogger(__name__)
__all__ = [
"pred",
"pred2",
"flip",
"TRUE",
"FALSE",
"flip",
"pred",
"pred2",
"lt",
"le",
"eq",
"ge",
"gt",
"isin",
"contains",
"search",
"matches",
"startswith",
"endswith",
]
# Optimization: generate regular python functions from the AST.
# This "compilation" takes microseconds.
class Predicate(Boolean):
""" Calls a function to determine truth value. """
pred = Predicate
def flip(f):
"""
Switches position of the first two arguments to f and ensures
its result is a bool.
"""
@wraps(f)
return inner
TRUE = TRUE()
FALSE = FALSE()
lt = pred2(operator.lt)
le = pred2(operator.le)
eq = pred2(operator.eq)
ge = pred2(operator.ge)
gt = pred2(operator.gt)
isin = pred2(flip(operator.contains))
contains = pred2(operator.contains)
search = pred2(flip(re.search))
matches = search
startswith = pred2(str.startswith)
endswith = pred2(str.endswith)
| 21.819672
| 79
| 0.574255
|
"""
The boolean module lets you create complicated boolean expressions by composing
objects. The compositions can be evaluated against multiple values.
"""
import logging
import operator
import re
from functools import partial, wraps
from itertools import count
log = logging.getLogger(__name__)
__all__ = [
"pred",
"pred2",
"flip",
"TRUE",
"FALSE",
"flip",
"pred",
"pred2",
"lt",
"le",
"eq",
"ge",
"gt",
"isin",
"contains",
"search",
"matches",
"startswith",
"endswith",
]
class Boolean:
def test(self, value):
raise NotImplementedError()
def __and__(self, other):
return All(self, other)
def __or__(self, other):
return Any(self, other)
def __invert__(self):
return Not(self)
# Optimization: generate regular python functions from the AST.
# This "compilation" takes microseconds.
def to_pyfunc(self):
env = {
"log": log,
"logging": logging
}
ids = count()
def expr(b):
if isinstance(b, All):
return "(" + " and ".join(expr(p) for p in b.predicates) + ")"
elif isinstance(b, Any):
return "(" + " or ".join(expr(p) for p in b.predicates) + ")"
elif isinstance(b, Not):
return "(" + "not " + expr(b.predicate) + ")"
elif isinstance(b, Predicate):
num = next(ids)
func = f"func_{num}"
args = f"args_{num}"
kwargs = f"kwargs_{num}"
env[func] = b.predicate
env[args] = b.args
env[kwargs] = b.kwargs
return func + "(value, " + "*" + args + ", **" + kwargs + ")"
func = f"""
def predicate(value):
try:
return {expr(self)}
except Exception as ex:
if log.isEnabledFor(logging.DEBUG):
log.debug(ex)
return False
"""
if log.isEnabledFor(logging.DEBUG):
log.debug(func)
exec(func, env, env)
return env["predicate"]
class Any(Boolean):
def __init__(self, *predicates):
self.predicates = predicates
def test(self, value):
return any(predicate.test(value) for predicate in self.predicates)
class All(Boolean):
def __init__(self, *predicates):
self.predicates = predicates
def test(self, value):
return all(predicate.test(value) for predicate in self.predicates)
class Not(Boolean):
def __init__(self, predicate):
self.predicate = predicate
def test(self, value):
return not self.predicate.test(value)
class Predicate(Boolean):
""" Calls a function to determine truth value. """
def __init__(self, predicate, *args, **kwargs):
self.predicate = predicate
self.args = args
self.kwargs = kwargs
def test(self, value):
try:
return self.predicate(value, *self.args, **self.kwargs)
except Exception as ex:
if log.isEnabledFor(logging.DEBUG):
log.debug(ex)
return False
pred = Predicate
def pred2(predicate, *args, **kwargs):
return partial(Predicate, predicate)
def flip(f):
"""
Switches position of the first two arguments to f and ensures
its result is a bool.
"""
@wraps(f)
def inner(a, b, *args, **kwargs):
return bool(f(b, a, *args, **kwargs))
return inner
class TRUE(Boolean):
def test(self, value):
return True
class FALSE(Boolean):
def test(self, value):
return False
TRUE = TRUE()
FALSE = FALSE()
lt = pred2(operator.lt)
le = pred2(operator.le)
eq = pred2(operator.eq)
ge = pred2(operator.ge)
gt = pred2(operator.gt)
isin = pred2(flip(operator.contains))
contains = pred2(operator.contains)
search = pred2(flip(re.search))
matches = search
startswith = pred2(str.startswith)
endswith = pred2(str.endswith)
| 2,146
| -14
| 585
|
0e501240ff928cf0aab6e8c990701343605ccca5
| 930
|
py
|
Python
|
check_params_decorate.py
|
MarionYoung/python
|
d2ed5be63de344cb689fe00c04cf75f89d91f28a
|
[
"Apache-2.0"
] | null | null | null |
check_params_decorate.py
|
MarionYoung/python
|
d2ed5be63de344cb689fe00c04cf75f89d91f28a
|
[
"Apache-2.0"
] | null | null | null |
check_params_decorate.py
|
MarionYoung/python
|
d2ed5be63de344cb689fe00c04cf75f89d91f28a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/4 14:15
# @Author : Marrion
#
import functools,inspect
@check
print(add(3,4))
| 35.769231
| 109
| 0.604301
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/4 14:15
# @Author : Marrion
#
import functools,inspect
def check(fn):
@functools.wraps(fn)
def warp(*args,**kwargs):
sig = inspect.signature(fn)
parms = inspect.signature(fn).parameters
for k,v in kwargs.items():
parm_type = parms[k].annotation
if parm_type != inspect._empty and not isinstance(v,parm_type):
raise TypeError('parameter {} required {},but {}'.format(k,parms[k].annotation,type(v)))
for idx,v in enumerate(args):
parm = list(parms.values())[idx]
if parm.annotation != inspect._empty and not isinstance(v,parm.annotation):
raise TypeError('parameter {} required {},but {}'.format(parm.name, parm.annotation,type(v)))
return fn(*args,**kwargs)
return warp
@check
def add(x,y:int)->int:
return x + y
print(add(3,4))
| 742
| 0
| 44
|
ea323d4b5d93a223c92508e50138297ba3a7ebf9
| 1,426
|
py
|
Python
|
torcms/model/classify_model.py
|
bukun/TorCMS
|
5d7480865fd46e706b84f5f65a5c24cd03bb2142
|
[
"MIT"
] | 243
|
2015-02-11T03:22:19.000Z
|
2022-03-02T11:13:27.000Z
|
torcms/model/classify_model.py
|
bukun/TorCMS
|
5d7480865fd46e706b84f5f65a5c24cd03bb2142
|
[
"MIT"
] | 8
|
2015-09-09T10:49:52.000Z
|
2020-08-30T08:52:48.000Z
|
torcms/model/classify_model.py
|
bukun/TorCMS
|
5d7480865fd46e706b84f5f65a5c24cd03bb2142
|
[
"MIT"
] | 101
|
2015-02-12T02:17:16.000Z
|
2021-11-19T09:20:10.000Z
|
# -*- coding:utf-8 -*-
'''
Model for classify.
'''
from config import CMS_CFG
from torcms.model.core_tab import TabPost, TabPost2Tag, TabTag
class MClassify():
'''
Model for classify.
'''
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| 27.960784
| 75
| 0.587658
|
# -*- coding:utf-8 -*-
'''
Model for classify.
'''
from config import CMS_CFG
from torcms.model.core_tab import TabPost, TabPost2Tag, TabTag
class MClassify():
'''
Model for classify.
'''
def __init__(self):
super().__init__()
@staticmethod
def query_pager_by_classify(current_page_num=1):
recs = TabTag.select().where(TabTag.uid.endswith("00")).order_by(
TabTag.uid).paginate(current_page_num, CMS_CFG['list_num'])
return recs
@staticmethod
def count_of_certain():
recs = TabTag.select().where(TabTag.uid.endswith("00"))
return recs.count()
@staticmethod
def query_pager_by_classify_all():
recs = TabTag.select().where(TabTag.uid.endswith("00")).order_by(
TabTag.uid)
return recs
@staticmethod
def count_of_classify(tagid):
if tagid.endswith('00'):
recs = TabPost.select().join(
TabPost2Tag, on=(TabPost2Tag.post_id == TabPost.uid)).join(
TabTag, on=(TabPost2Tag.tag_id == TabTag.uid)).where(
TabTag.uid.startswith(tagid[:2]))
else:
recs = TabPost.select().join(
TabPost2Tag, on=(TabPost2Tag.post_id == TabPost.uid)).join(
TabTag, on=(TabPost2Tag.tag_id == TabTag.uid)).where(
TabTag.uid == tagid)
return recs.count()
| 1,017
| 0
| 130
|
8469b52da50e995d6953b3f007086e6d9fc96c76
| 1,082
|
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Format adapter for the terminaltables module."""
import terminaltables
import itertools
from cli_helpers.utils import filter_dict_by_key
from .preprocessors import (convert_to_string, override_missing_value,
style_output)
supported_formats = ('ascii', 'double', 'github')
preprocessors = (override_missing_value, convert_to_string, style_output)
def adapter(data, headers, table_format=None, **kwargs):
"""Wrap terminaltables inside a function for TabularOutputFormatter."""
keys = ('title', )
table_format_handler = {
'ascii': terminaltables.AsciiTable,
'double': terminaltables.DoubleTable,
'github': terminaltables.GithubFlavoredMarkdownTable,
}
table = table_format_handler[table_format]
t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))
dimensions = terminaltables.width_and_alignment.max_dimensions(
t.table_data,
t.padding_left,
t.padding_right)[:3]
for r in t.gen_table(*dimensions):
yield u''.join(r)
| 30.914286
| 75
| 0.698706
|
# -*- coding: utf-8 -*-
"""Format adapter for the terminaltables module."""
import terminaltables
import itertools
from cli_helpers.utils import filter_dict_by_key
from .preprocessors import (convert_to_string, override_missing_value,
style_output)
supported_formats = ('ascii', 'double', 'github')
preprocessors = (override_missing_value, convert_to_string, style_output)
def adapter(data, headers, table_format=None, **kwargs):
"""Wrap terminaltables inside a function for TabularOutputFormatter."""
keys = ('title', )
table_format_handler = {
'ascii': terminaltables.AsciiTable,
'double': terminaltables.DoubleTable,
'github': terminaltables.GithubFlavoredMarkdownTable,
}
table = table_format_handler[table_format]
t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))
dimensions = terminaltables.width_and_alignment.max_dimensions(
t.table_data,
t.padding_left,
t.padding_right)[:3]
for r in t.gen_table(*dimensions):
yield u''.join(r)
| 0
| 0
| 0
|
201b27541ad73178629e7ff21ae7b88cdb35af68
| 1,844
|
py
|
Python
|
licensePlates.py
|
matspi/CoolLicensePlates
|
0276c9c62afbe928066590c2ac73805d4752c332
|
[
"MIT"
] | null | null | null |
licensePlates.py
|
matspi/CoolLicensePlates
|
0276c9c62afbe928066590c2ac73805d4752c332
|
[
"MIT"
] | null | null | null |
licensePlates.py
|
matspi/CoolLicensePlates
|
0276c9c62afbe928066590c2ac73805d4752c332
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import requests
from google.cloud import translate
REGIST_URL = r"https://www.berlin.de/daten/liste-der-kfz-kennzeichen/kfz-kennz-d.csv"
NUMBERS = (
"null",
"eins",
"zwei",
"drei",
"vier",
"fünf",
"sechs",
"sieben",
"acht",
"neun"
)
csvData = requests.get(REGIST_URL)
csvData.encoding = "utf-8"
PREFIXES = {k.lower(): v for k, v in (
line.split(",")[0:2] for line in csvData.text.split("\r\n")[1:-1])} # first and last line contain garbage
translateClient = translate.Client()
languages = translateClient.get_languages()
result = {}
for lang in languages:
langCode = lang["language"]
if langCode == "de":
continue
translated = [i["translatedText"].lower() for i in translateClient.translate(
NUMBERS[1:], target_language=langCode, source_language="de")]
findMatches(lang["name"], translated)
findMatches("German", NUMBERS[1:])
print("%d results" % len(result))
for lang in result:
print("\n##", lang, "\n")
for res in result[lang]:
print(" - %s - %s %d* %s" % res)
| 26.724638
| 109
| 0.590564
|
#!/usr/bin/env python3
import sys
import requests
from google.cloud import translate
REGIST_URL = r"https://www.berlin.de/daten/liste-der-kfz-kennzeichen/kfz-kennz-d.csv"
NUMBERS = (
"null",
"eins",
"zwei",
"drei",
"vier",
"fünf",
"sechs",
"sieben",
"acht",
"neun"
)
csvData = requests.get(REGIST_URL)
csvData.encoding = "utf-8"
PREFIXES = {k.lower(): v for k, v in (
line.split(",")[0:2] for line in csvData.text.split("\r\n")[1:-1])} # first and last line contain garbage
translateClient = translate.Client()
languages = translateClient.get_languages()
result = {}
def findMatches(language, numbers):
for idx in range(0, len(numbers)):
foreignNumer = numbers[idx]
if len(foreignNumer) > 5:
continue
for prefix in PREFIXES.keys():
search = foreignNumer.partition(prefix)
if search[0]:
continue # No prefix
if len(search[2]) > 2 or len(search[2]) < 1:
continue # suffix too long or short
if not all(ord(char) < 128 for char in search[2]):
continue # illegal characters
if not language in result:
result[language] = []
result.get(language).append(
(prefix.upper(), search[2].upper(), idx + 1, PREFIXES[prefix]))
for lang in languages:
langCode = lang["language"]
if langCode == "de":
continue
translated = [i["translatedText"].lower() for i in translateClient.translate(
NUMBERS[1:], target_language=langCode, source_language="de")]
findMatches(lang["name"], translated)
findMatches("German", NUMBERS[1:])
print("%d results" % len(result))
for lang in result:
print("\n##", lang, "\n")
for res in result[lang]:
print(" - %s - %s %d* %s" % res)
| 717
| 0
| 23
|
6a649bec9e4e2327f289422a9b7cb04d2ef6054a
| 2,197
|
py
|
Python
|
env/lib/python3.8/site-packages/celery/worker/heartbeat.py
|
rajancolab/blogsite
|
0721a3ab9b61bfca6fd84d5fae60b0574bfb0287
|
[
"MIT"
] | 13
|
2018-03-28T23:07:01.000Z
|
2022-03-12T06:01:21.000Z
|
env/lib/python3.8/site-packages/celery/worker/heartbeat.py
|
rajancolab/blogsite
|
0721a3ab9b61bfca6fd84d5fae60b0574bfb0287
|
[
"MIT"
] | 11
|
2018-06-18T15:49:07.000Z
|
2021-11-25T01:45:33.000Z
|
env/lib/python3.9/site-packages/celery/worker/heartbeat.py
|
simotwo/AbileneParadox-ddd
|
c85961efb37aba43c0d99ed1c36d083507e2b2d3
|
[
"MIT"
] | 5
|
2018-03-28T23:07:05.000Z
|
2021-12-09T19:02:00.000Z
|
# -*- coding: utf-8 -*-
"""Heartbeat service.
This is the internal thread responsible for sending heartbeat events
at regular intervals (may not be an actual thread).
"""
from __future__ import absolute_import, unicode_literals
from celery.signals import heartbeat_sent
from celery.utils.sysinfo import load_average
from .state import SOFTWARE_INFO, active_requests, all_total_count
__all__ = ('Heart',)
class Heart(object):
"""Timer sending heartbeats at regular intervals.
Arguments:
timer (kombu.asynchronous.timer.Timer): Timer to use.
eventer (celery.events.EventDispatcher): Event dispatcher
to use.
interval (float): Time in seconds between sending
heartbeats. Default is 2 seconds.
"""
| 33.8
| 70
| 0.616295
|
# -*- coding: utf-8 -*-
"""Heartbeat service.
This is the internal thread responsible for sending heartbeat events
at regular intervals (may not be an actual thread).
"""
from __future__ import absolute_import, unicode_literals
from celery.signals import heartbeat_sent
from celery.utils.sysinfo import load_average
from .state import SOFTWARE_INFO, active_requests, all_total_count
__all__ = ('Heart',)
class Heart(object):
"""Timer sending heartbeats at regular intervals.
Arguments:
timer (kombu.asynchronous.timer.Timer): Timer to use.
eventer (celery.events.EventDispatcher): Event dispatcher
to use.
interval (float): Time in seconds between sending
heartbeats. Default is 2 seconds.
"""
def __init__(self, timer, eventer, interval=None):
self.timer = timer
self.eventer = eventer
self.interval = float(interval or 2.0)
self.tref = None
# Make event dispatcher start/stop us when enabled/disabled.
self.eventer.on_enabled.add(self.start)
self.eventer.on_disabled.add(self.stop)
# Only send heartbeat_sent signal if it has receivers.
self._send_sent_signal = (
heartbeat_sent.send if heartbeat_sent.receivers else None)
def _send(self, event, retry=True):
if self._send_sent_signal is not None:
self._send_sent_signal(sender=self)
return self.eventer.send(event, freq=self.interval,
active=len(active_requests),
processed=all_total_count[0],
loadavg=load_average(),
retry=retry,
**SOFTWARE_INFO)
def start(self):
if self.eventer.enabled:
self._send('worker-online')
self.tref = self.timer.call_repeatedly(
self.interval, self._send, ('worker-heartbeat',),
)
def stop(self):
if self.tref is not None:
self.timer.cancel(self.tref)
self.tref = None
if self.eventer.enabled:
self._send('worker-offline', retry=False)
| 1,327
| 0
| 108
|
dba41e658b9b0dbcd00581e0eb820cc725866c0b
| 660
|
py
|
Python
|
tests/geohash/lib/test_ray.py
|
nziehn/toolbox-geohash
|
d397a02cbf0932d3a6463dcc47dfc177c8e35033
|
[
"MIT"
] | 1
|
2021-11-21T12:31:38.000Z
|
2021-11-21T12:31:38.000Z
|
tests/geohash/lib/test_ray.py
|
nziehn/toolbox-geohash
|
d397a02cbf0932d3a6463dcc47dfc177c8e35033
|
[
"MIT"
] | null | null | null |
tests/geohash/lib/test_ray.py
|
nziehn/toolbox-geohash
|
d397a02cbf0932d3a6463dcc47dfc177c8e35033
|
[
"MIT"
] | null | null | null |
from toolbox.geohash.lib import ray as _uut
from nose import tools as _tools
from toolbox.geohash.lib import point as _point
| 17.837838
| 48
| 0.568182
|
from toolbox.geohash.lib import ray as _uut
from nose import tools as _tools
from toolbox.geohash.lib import point as _point
def test_ray():
origin = _point.Point(x=0, y=0, z=0)
x1 = _point.Point(x=1, y=0, z=0)
x2y1 = _point.Point(x=2, y=1, z=0)
ray1 = _uut.Ray(start=x1, end=x2y1)
_tools.assert_equal(
ray1.m,
_point.Point(x=1, y=1, z=0) # x2y1 - x1
)
_tools.assert_equal(
ray1.b,
x1
)
ray2 = _uut.Ray(start=origin, end=x2y1)
_tools.assert_equal(
ray2.m,
_point.Point(x=2, y=1, z=0) # x2y1
)
_tools.assert_equal(
ray2.b,
origin
)
| 508
| 0
| 23
|
f91e7da2b03dd53eb8e9c707369d892377c7d10b
| 286
|
py
|
Python
|
data/mnist_data/create_mnist_imgs.py
|
felixGer/PySDDR
|
a7680e7190185ba605df6ad85b4fdf19401473b3
|
[
"MIT"
] | 14
|
2021-04-07T17:33:19.000Z
|
2022-02-07T14:49:37.000Z
|
data/mnist_data/create_mnist_imgs.py
|
felixGer/PySDDR
|
a7680e7190185ba605df6ad85b4fdf19401473b3
|
[
"MIT"
] | 3
|
2021-11-30T15:03:32.000Z
|
2022-01-09T06:24:29.000Z
|
data/mnist_data/create_mnist_imgs.py
|
felixGer/PySDDR
|
a7680e7190185ba605df6ad85b4fdf19401473b3
|
[
"MIT"
] | 7
|
2021-04-20T08:48:57.000Z
|
2022-03-02T10:45:19.000Z
|
import numpy as np
import os
import imageio
mnist = np.load('mnist.npy')
num_imgs = mnist.shape[0]
for i in range(num_imgs):
img = mnist[i,:,:]
name = 'img_%s.jpg'%(i)
file_path = os.path.join('mnist_images', name)
imageio.imwrite(file_path, (img*255).astype(np.uint8))
| 26
| 58
| 0.671329
|
import numpy as np
import os
import imageio
mnist = np.load('mnist.npy')
num_imgs = mnist.shape[0]
for i in range(num_imgs):
img = mnist[i,:,:]
name = 'img_%s.jpg'%(i)
file_path = os.path.join('mnist_images', name)
imageio.imwrite(file_path, (img*255).astype(np.uint8))
| 0
| 0
| 0
|
0d0fa6addcfe5c2847163ce9c815a1679e2eed6b
| 1,977
|
py
|
Python
|
test/selenium/src/lib/rest_facades/roles_rest_facade.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
test/selenium/src/lib/rest_facades/roles_rest_facade.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/selenium/src/lib/rest_facades/roles_rest_facade.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""REST facade for roles."""
from lib import users
from lib.constants import roles, objects, object_states
from lib.decorator import memoize
from lib.entities import entities_factory
from lib.entities.entity import AccessControlRoleEntity
from lib.service import rest_facade
def get_role_name_and_id(object_type, role):
"""Returns role name and id as dict according to passed role entity or
name and object type."""
if isinstance(role, AccessControlRoleEntity):
return {"role_name": role.name, "role_id": role.id}
return {"role_name": role, "role_id": roles.ACLRolesIDs.id_of_role(
object_type, role)}
def custom_read_role(object_type):
"""Creates and returns custom access control role for object with 'Read'
rights."""
current_user = users.current_user()
users.set_current_user(entities_factory.PeopleFactory.superuser)
role = rest_facade.create_access_control_role(
object_type=object_type, read=True, update=False, delete=False)
users.set_current_user(current_user)
return role
@memoize
def custom_audit_read_role():
"""Returns custom access control role with 'Read' rights for Audit."""
return custom_read_role(objects.get_singular(objects.AUDITS, title=True))
@memoize
def custom_asmt_read_role():
"""Returns custom access control role with 'Read' rights for Assessment."""
return custom_read_role(objects.get_singular(objects.ASSESSMENTS,
title=True))
def add_verifier_to_set_obj_state(obj, state, person):
"""Assign a person as verifier if verifier presence is necessary for
setting an object into specific state and obj has no verifiers assigned."""
if state in object_states.VERIFIER_REQUIRING_STATES and not obj.verifiers:
rest_facade.update_acl(
objs=[obj], people=person,
**get_role_name_and_id(obj.type, roles.VERIFIERS))
| 38.019231
| 78
| 0.754679
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""REST facade for roles."""
from lib import users
from lib.constants import roles, objects, object_states
from lib.decorator import memoize
from lib.entities import entities_factory
from lib.entities.entity import AccessControlRoleEntity
from lib.service import rest_facade
def get_role_name_and_id(object_type, role):
"""Returns role name and id as dict according to passed role entity or
name and object type."""
if isinstance(role, AccessControlRoleEntity):
return {"role_name": role.name, "role_id": role.id}
return {"role_name": role, "role_id": roles.ACLRolesIDs.id_of_role(
object_type, role)}
def custom_read_role(object_type):
"""Creates and returns custom access control role for object with 'Read'
rights."""
current_user = users.current_user()
users.set_current_user(entities_factory.PeopleFactory.superuser)
role = rest_facade.create_access_control_role(
object_type=object_type, read=True, update=False, delete=False)
users.set_current_user(current_user)
return role
@memoize
def custom_audit_read_role():
"""Returns custom access control role with 'Read' rights for Audit."""
return custom_read_role(objects.get_singular(objects.AUDITS, title=True))
@memoize
def custom_asmt_read_role():
"""Returns custom access control role with 'Read' rights for Assessment."""
return custom_read_role(objects.get_singular(objects.ASSESSMENTS,
title=True))
def add_verifier_to_set_obj_state(obj, state, person):
"""Assign a person as verifier if verifier presence is necessary for
setting an object into specific state and obj has no verifiers assigned."""
if state in object_states.VERIFIER_REQUIRING_STATES and not obj.verifiers:
rest_facade.update_acl(
objs=[obj], people=person,
**get_role_name_and_id(obj.type, roles.VERIFIERS))
| 0
| 0
| 0
|
420d617f1e058c42ad47f5a158d2d53d46c66af5
| 329
|
py
|
Python
|
tests/test_pw_hasher.py
|
jordic/fastapi_iam
|
aab1815dcb8b93f303fd9a83d85660ac3f92b7af
|
[
"MIT"
] | 1
|
2021-03-18T22:11:52.000Z
|
2021-03-18T22:11:52.000Z
|
tests/test_pw_hasher.py
|
jordic/fastapi_iam
|
aab1815dcb8b93f303fd9a83d85660ac3f92b7af
|
[
"MIT"
] | null | null | null |
tests/test_pw_hasher.py
|
jordic/fastapi_iam
|
aab1815dcb8b93f303fd9a83d85660ac3f92b7af
|
[
"MIT"
] | null | null | null |
import pytest
from fastapi_iam import auth
pytestmark = pytest.mark.asyncio
| 25.307692
| 62
| 0.768997
|
import pytest
from fastapi_iam import auth
pytestmark = pytest.mark.asyncio
async def test_password_argon_hasher():
service = auth.ArgonPasswordHasher()
password = "1qaz2wsx"
token = await service.hash_password(password)
check_pass = await service.check_password(token, password)
assert check_pass is True
| 228
| 0
| 23
|
7f0837223c4424fece8121cb0718fcbac587c4f2
| 3,718
|
py
|
Python
|
pyvlova/op/grouped_conv.py
|
ModelTC/pyvlova
|
080437c80bd995776507f5c9f4975a0420391cb0
|
[
"Apache-2.0"
] | 1
|
2021-11-11T03:11:49.000Z
|
2021-11-11T03:11:49.000Z
|
pyvlova/op/grouped_conv.py
|
ModelTC/pyvlova
|
080437c80bd995776507f5c9f4975a0420391cb0
|
[
"Apache-2.0"
] | null | null | null |
pyvlova/op/grouped_conv.py
|
ModelTC/pyvlova
|
080437c80bd995776507f5c9f4975a0420391cb0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Jiang Shenghu
# SPDX-License-Identifier: Apache-2.0
from tvm import topi
from ..poly import TensorTable, Statement, ScheduleTree
from .conv import PlainConv2d, Conv2d
| 39.136842
| 104
| 0.589295
|
# Copyright 2020 Jiang Shenghu
# SPDX-License-Identifier: Apache-2.0
from tvm import topi
from ..poly import TensorTable, Statement, ScheduleTree
from .conv import PlainConv2d, Conv2d
def schedule(**kwargs):
init_t = 'stmt_init[n, c, h, w]'
calc_t = 'stmt_calc[n, c, h, w, i, j, k]'
output_constraints = '0 <= n < batch and 0 <= c < out_channel ' \
'and 0 <= h < out_height and 0 <= w < out_width'
calc_constraints = '0 <= i < in_group_size and 0 <= j < kernel_height and 0 <= k < kernel_width'
domain = '[batch, in_channel, in_height, in_width, out_channel, out_height, out_width, ' \
'kernel_height, kernel_width, in_group_size] -> {' \
f'{init_t}: {output_constraints}; ' \
f'{calc_t}: {output_constraints} and {calc_constraints}' \
'}'
outer_schedule = '[%s]' % ', '.join(map(
lambda x: f'{{{init_t}->[({x})];{calc_t}->[({x})]}}', ('n', 'c', 'h', 'w')))
inner_schedule = '[%s]' % ', '.join(map(
lambda x: f'{{{calc_t}->[({x})]}}', ('i', 'j', 'k')))
tree = ScheduleTree.from_yaml(f'''
domain: "{domain}"
child:
schedule: "{outer_schedule}"
permutable: 1
coincident: [1, 1, 1, 1]
child:
sequence:
- filter: "{{{init_t}}}"
- filter: "{{{calc_t}}}"
child:
schedule: "{inner_schedule}"
permutable: 1
coincident: [1, 1, 1]
''')
tree.apply_params(**kwargs)
return tree
def tensors(batch=1, in_channel=1, in_height=1, in_width=1, out_channel=1,
out_height=1, out_width=1, kernel_height=1, kernel_width=1, in_group_size=1, **_):
table = TensorTable()
table.add_tensor('x', [batch, in_channel, in_height, in_width])
table.add_tensor('weight', [out_channel, in_group_size, kernel_height, kernel_width])
table.add_tensor('out', [batch, out_channel, out_height, out_width])
return table
def statements(stride_height=1, stride_width=1, in_group_size=1, out_group_size=1, **_):
def stmt_init(t, n, c, h, w):
t['out'][n, c, h, w] = 0.0
def stmt_calc(t, n, c, h, w, i, j, k):
in_offset = c // out_group_size * in_group_size
t['out'][n, c, h, w] = t['out'][n, c, h, w] \
+ t['x'][n, i + in_offset, h * stride_height + j, w * stride_width + k] \
* t['weight'][c, i, j, k]
res = {}
for f in [stmt_init, stmt_calc]:
res[f.__name__] = Statement.from_calc(f)
return res
class PlainGroupedConv2d(PlainConv2d):
required_args = PlainConv2d.required_args + ['groups']
calculated_args = {**PlainConv2d.calculated_args, **{
'in_group_size': lambda **a: a['in_channel'] // a['groups'],
'out_group_size': lambda **a: a['out_channel'] // a['groups'],
}}
schedule_factory = schedule
tensors_factory = tensors
statements_factory = statements
topi_cuda_task_name = 'group_conv2d_nchw.cuda'
def topi_cuda_args(self, x=None, weight=None, out=None):
return [x, weight, [self.stride_height, self.stride_width], 0, 1, self.groups, out.dtype]
topi_cuda_calc_func = topi.cuda.group_conv2d_nchw
topi_cuda_schedule_func = topi.cuda.schedule_group_conv2d_nchw
topi_cuda_calc_ret_map = ['out']
class GroupedConv2d(Conv2d):
def __init__(self, groups=1, **kwargs):
super().__init__(**kwargs)
op_idx = self._ops.index(self.conv)
self.conv = PlainGroupedConv2d(name=self.name + '.conv', groups=groups, **self.conv.arguments)
self.weight = self.conv.tensors['weight']
self._ops[op_idx] = self.conv
| 2,764
| 623
| 141
|
4ad335965ac7f1188f59a89d4c60cee16b923221
| 1,158
|
py
|
Python
|
scenic/projects/baselines/detr/main.py
|
NielsRogge/scenic
|
4418bf4c6954fffe61d9bafc802981baa9440e49
|
[
"Apache-2.0"
] | 688
|
2021-07-26T21:45:18.000Z
|
2022-03-31T11:53:34.000Z
|
scenic/projects/baselines/detr/main.py
|
NielsRogge/scenic
|
4418bf4c6954fffe61d9bafc802981baa9440e49
|
[
"Apache-2.0"
] | 35
|
2021-08-03T11:31:10.000Z
|
2022-03-31T21:58:58.000Z
|
scenic/projects/baselines/detr/main.py
|
NielsRogge/scenic
|
4418bf4c6954fffe61d9bafc802981baa9440e49
|
[
"Apache-2.0"
] | 88
|
2021-08-03T13:19:50.000Z
|
2022-03-31T08:35:22.000Z
|
"""Main file for DETR."""
from typing import Any
from absl import flags
from clu import metric_writers
import jax
import jax.numpy as jnp
import ml_collections
from scenic import app
from scenic.projects.baselines.detr import model as detr_model
from scenic.projects.baselines.detr import trainer
from scenic.train_lib import train_utils
FLAGS = flags.FLAGS
def get_model_cls(model_name: str) -> Any:
"""Returns model class given its name."""
if model_name == 'detr':
return detr_model.DETRModel
else:
raise ValueError(f'Unrecognized model: {model_name}.')
def main(rng: jnp.ndarray, config: ml_collections.ConfigDict, workdir: str,
writer: metric_writers.MetricWriter):
"""Main function for the DETR project."""
model_cls = get_model_cls(config.model_name)
data_rng, rng = jax.random.split(rng)
dataset = train_utils.get_dataset(
config, data_rng, dataset_service_address=FLAGS.dataset_service_address)
trainer.train_and_evaluate(
rng=rng,
config=config,
model_cls=model_cls,
dataset=dataset,
workdir=workdir,
writer=writer)
if __name__ == '__main__':
app.run(main=main)
| 26.318182
| 78
| 0.741796
|
"""Main file for DETR."""
from typing import Any
from absl import flags
from clu import metric_writers
import jax
import jax.numpy as jnp
import ml_collections
from scenic import app
from scenic.projects.baselines.detr import model as detr_model
from scenic.projects.baselines.detr import trainer
from scenic.train_lib import train_utils
FLAGS = flags.FLAGS
def get_model_cls(model_name: str) -> Any:
"""Returns model class given its name."""
if model_name == 'detr':
return detr_model.DETRModel
else:
raise ValueError(f'Unrecognized model: {model_name}.')
def main(rng: jnp.ndarray, config: ml_collections.ConfigDict, workdir: str,
writer: metric_writers.MetricWriter):
"""Main function for the DETR project."""
model_cls = get_model_cls(config.model_name)
data_rng, rng = jax.random.split(rng)
dataset = train_utils.get_dataset(
config, data_rng, dataset_service_address=FLAGS.dataset_service_address)
trainer.train_and_evaluate(
rng=rng,
config=config,
model_cls=model_cls,
dataset=dataset,
workdir=workdir,
writer=writer)
if __name__ == '__main__':
app.run(main=main)
| 0
| 0
| 0
|
7821f2ff484f39551a14eddf76087769af5e5a96
| 4,973
|
py
|
Python
|
students/K33402/Akhmetzhanov Alisher/lr3_4/lab3_4/libraryBackend/main/views.py
|
AlishKZ/ITMO_ICT_WebDevelopment_2020-2021
|
b3ce82e17392d26d815e64343f5103f1bd46cd81
|
[
"MIT"
] | null | null | null |
students/K33402/Akhmetzhanov Alisher/lr3_4/lab3_4/libraryBackend/main/views.py
|
AlishKZ/ITMO_ICT_WebDevelopment_2020-2021
|
b3ce82e17392d26d815e64343f5103f1bd46cd81
|
[
"MIT"
] | null | null | null |
students/K33402/Akhmetzhanov Alisher/lr3_4/lab3_4/libraryBackend/main/views.py
|
AlishKZ/ITMO_ICT_WebDevelopment_2020-2021
|
b3ce82e17392d26d815e64343f5103f1bd46cd81
|
[
"MIT"
] | null | null | null |
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from main.serializers import BookReplicaSerializer, BookSerializer, LibrarianSerializer, LibraryHallSerializer, ReaderSerializer
from main.models import Book, BookReplica, CustomUser, Librarian, LibraryHall, Reader
from rest_framework import generics
from django.shortcuts import render
# Create your views here.
| 36.29927
| 128
| 0.717474
|
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from main.serializers import BookReplicaSerializer, BookSerializer, LibrarianSerializer, LibraryHallSerializer, ReaderSerializer
from main.models import Book, BookReplica, CustomUser, Librarian, LibraryHall, Reader
from rest_framework import generics
from django.shortcuts import render
# Create your views here.
class LibraryHallListView(generics.ListCreateAPIView):
queryset = LibraryHall.objects.all()
serializer_class = LibraryHallSerializer
class LibraryHallDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = LibraryHall.objects.all()
serializer_class = LibraryHallSerializer
def delete(self, request, pk): # можно убрать
print('LibraryHallView -> DELETE')
try:
library_hall = LibraryHall.objects.get(pk=pk)
library_hall.readers.clear()
library_hall.delete()
except LibraryHall.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'message': 'Deleted!'}, status=status.HTTP_202_ACCEPTED)
class LibraryHallReadersView(APIView):
def get(self, request, pk, format=None):
try:
currentHall = LibraryHall.objects.get(pk=pk)
readers = currentHall.readers.all()
serializer = ReaderSerializer(readers, many=True)
except LibraryHall.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class LibraryHallCopiesView(APIView):
def get(self, request, pk, format=None):
try:
currentHall = LibraryHall.objects.get(pk=pk)
copies = currentHall.hall_copies.all()
serializer = BookReplicaSerializer(copies, many=True)
except LibraryHall.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class BookListView(generics.ListCreateAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer
class BookDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer
class BookCopiesView(APIView):
def get(self, request, pk ,format=None):
try:
currentBook = Book.objects.get(pk=pk)
copies = currentBook.book_copies.all()
serializer = BookReplicaSerializer(copies, many=True)
except Book.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class ReaderListView(generics.ListAPIView):
queryset = Reader.objects.all()
serializer_class = ReaderSerializer
class ReaderDetailView(generics.RetrieveUpdateAPIView):
queryset = Reader.objects.all()
serializer_class = ReaderSerializer
class ReaderByUserView(APIView):
def get(self, request, pk ,format=None):
try:
currentUser = CustomUser.objects.get(pk=pk)
readerProfile = currentUser.reader_profile
serializer = ReaderSerializer(readerProfile)
except CustomUser.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class LibrarianByUserView(APIView):
def get(self, request, pk ,format=None):
try:
currentUser = CustomUser.objects.get(pk=pk)
readerProfile = currentUser.librarian_profile
serializer = LibrarianSerializer(readerProfile)
except CustomUser.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class ReaderCopiesView(APIView):
def get(self, request, pk ,format=None):
try:
currentReader = Reader.objects.get(pk=pk)
copies = currentReader.reading_books.all()
serializer = BookReplicaSerializer(copies, many=True)
except Book.DoesNotExist:
return Response({'error': 'Object does not exist'}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class LibrarianListView(generics.ListAPIView):
queryset = Librarian.objects.all()
serializer_class = LibrarianSerializer
class ReplicaListView(generics.ListCreateAPIView):
queryset = BookReplica.objects.all()
serializer_class = BookReplicaSerializer
class ReplicaDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = BookReplica.objects.all()
serializer_class = BookReplicaSerializer
| 2,909
| 1,123
| 502
|
6d5e238930e86ae7e01b39e44815055131942fef
| 275
|
py
|
Python
|
predict.py
|
fhvermei/chemprop_solvation
|
b6b2ea87440bb600593b5ff50eb8a15cfd79f33d
|
[
"MIT"
] | null | null | null |
predict.py
|
fhvermei/chemprop_solvation
|
b6b2ea87440bb600593b5ff50eb8a15cfd79f33d
|
[
"MIT"
] | 1
|
2022-03-01T19:15:33.000Z
|
2022-03-01T19:15:33.000Z
|
predict.py
|
fhvermei/chemprop_solvation
|
b6b2ea87440bb600593b5ff50eb8a15cfd79f33d
|
[
"MIT"
] | null | null | null |
"""Loads a trained model checkpoint and makes predictions on a dataset."""
from chemprop_solvation.parsing import parse_predict_args
from chemprop_solvation.train import make_predictions
if __name__ == '__main__':
args = parse_predict_args()
make_predictions(args)
| 30.555556
| 74
| 0.796364
|
"""Loads a trained model checkpoint and makes predictions on a dataset."""
from chemprop_solvation.parsing import parse_predict_args
from chemprop_solvation.train import make_predictions
if __name__ == '__main__':
args = parse_predict_args()
make_predictions(args)
| 0
| 0
| 0
|
08c5f818e4cec818b0a9461dbd18e340f34b25ce
| 175
|
py
|
Python
|
matrixprofile/exceptions.py
|
MORE-EU/matrixprofile
|
7c598385f7723f337d7bf7d3f90cffb690c6b0df
|
[
"Apache-2.0"
] | 262
|
2020-02-28T20:42:27.000Z
|
2022-03-30T14:02:28.000Z
|
matrixprofile/exceptions.py
|
MORE-EU/matrixprofile
|
7c598385f7723f337d7bf7d3f90cffb690c6b0df
|
[
"Apache-2.0"
] | 79
|
2020-03-01T01:42:14.000Z
|
2022-03-30T07:15:48.000Z
|
matrixprofile/exceptions.py
|
MORE-EU/matrixprofile
|
7c598385f7723f337d7bf7d3f90cffb690c6b0df
|
[
"Apache-2.0"
] | 56
|
2020-03-03T14:56:27.000Z
|
2022-03-22T07:18:42.000Z
|
class NoSolutionPossible(Exception):
"""A simple class used to explicitly let a user know that a solution is not
possible given the current inputs.
"""
pass
| 21.875
| 79
| 0.702857
|
class NoSolutionPossible(Exception):
"""A simple class used to explicitly let a user know that a solution is not
possible given the current inputs.
"""
pass
| 0
| 0
| 0
|
ecf722da8089f170f217dfec0cc7d3e66aae3380
| 8,753
|
py
|
Python
|
sdk/python/pulumi_google_native/managedidentities/v1beta1/backup.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/managedidentities/v1beta1/backup.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/managedidentities/v1beta1/backup.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['BackupArgs', 'Backup']
@pulumi.input_type
| 37.891775
| 134
| 0.619331
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['BackupArgs', 'Backup']
@pulumi.input_type
class BackupArgs:
def __init__(__self__, *,
backup_id: pulumi.Input[str],
domain_id: pulumi.Input[str],
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Backup resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user provided metadata.
"""
pulumi.set(__self__, "backup_id", backup_id)
pulumi.set(__self__, "domain_id", domain_id)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "backup_id")
@backup_id.setter
def backup_id(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_id", value)
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "domain_id")
@domain_id.setter
def domain_id(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_id", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional. Resource labels to represent user provided metadata.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class Backup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_id: Optional[pulumi.Input[str]] = None,
domain_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a Backup for a domain.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user provided metadata.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a Backup for a domain.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param BackupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_id: Optional[pulumi.Input[str]] = None,
domain_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackupArgs.__new__(BackupArgs)
if backup_id is None and not opts.urn:
raise TypeError("Missing required property 'backup_id'")
__props__.__dict__["backup_id"] = backup_id
if domain_id is None and not opts.urn:
raise TypeError("Missing required property 'domain_id'")
__props__.__dict__["domain_id"] = domain_id
__props__.__dict__["labels"] = labels
__props__.__dict__["project"] = project
__props__.__dict__["create_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
__props__.__dict__["status_message"] = None
__props__.__dict__["type"] = None
__props__.__dict__["update_time"] = None
super(Backup, __self__).__init__(
'google-native:managedidentities/v1beta1:Backup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Backup':
"""
Get an existing Backup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BackupArgs.__new__(BackupArgs)
__props__.__dict__["create_time"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
__props__.__dict__["status_message"] = None
__props__.__dict__["type"] = None
__props__.__dict__["update_time"] = None
return Backup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time the backups was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, str]]:
"""
Optional. Resource labels to represent user provided metadata.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The unique name of the Backup in the form of projects/{project_id}/locations/global/domains/{domain_name}/backups/{name}
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the backup.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> pulumi.Output[str]:
"""
Additional information about the current status of this backup, if available.
"""
return pulumi.get(self, "status_message")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Indicates whether it’s an on-demand backup or scheduled.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
Last update time.
"""
return pulumi.get(self, "update_time")
| 2,792
| 5,543
| 45
|
edebeb0f91dc89522eb52c4c79f88ea14cac0264
| 7,331
|
py
|
Python
|
rpython/rlib/special_value.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 34
|
2015-07-09T04:53:27.000Z
|
2021-07-19T05:22:27.000Z
|
rpython/rlib/special_value.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 6
|
2015-05-30T17:20:45.000Z
|
2017-06-12T14:29:23.000Z
|
rpython/rlib/special_value.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 11
|
2015-09-07T14:26:08.000Z
|
2020-04-10T07:20:41.000Z
|
import math
from rpython.rlib.rfloat import isnan, isinf, copysign
# code to deal with special values (infinities, NaNs, ...)
#
# The special types can be:
ST_NINF = 0 # negative infinity
ST_NEG = 1 # negative finite number (nonzero)
ST_NZERO = 2 # -0.
ST_PZERO = 3 # +0.
ST_POS = 4 # positive finite number (nonzero)
ST_PINF = 5 # positive infinity
ST_NAN = 6 # Not a Number
P = math.pi
P14 = 0.25 * math.pi
P12 = 0.5 * math.pi
P34 = 0.75 * math.pi
INF = 1e200 * 1e200
N = INF / INF
U = -9.5426319407711027e33 # unlikely value, used as placeholder
Z = 0.0 # defined here instead of in the tuples below, because of a bug
# in pypy releases < 1.5
NAN = N
acos_special_values = build_table([
(P34,INF), (P,INF), (P,INF), (P,-INF), (P,-INF), (P34,-INF), (N,INF),
(P12,INF), (U,U), (U,U), (U,U), (U,U), (P12,-INF), (N,N),
(P12,INF), (U,U), (P12,Z), (P12,-Z), (U,U), (P12,-INF), (P12,N),
(P12,INF), (U,U), (P12,Z), (P12,-Z), (U,U), (P12,-INF), (P12,N),
(P12,INF), (U,U), (U,U), (U,U), (U,U), (P12,-INF), (N,N),
(P14,INF), (Z,INF), (Z,INF), (Z,-INF), (Z,-INF), (P14,-INF), (N,INF),
(N,INF), (N,N), (N,N), (N,N), (N,N), (N,-INF), (N,N),
])
acosh_special_values = build_table([
(INF,-P34), (INF,-P), (INF,-P), (INF,P), (INF,P), (INF,P34), (INF,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-P12), (Z,P12), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-P12), (Z,P12), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,N), (N,N), (N,N), (INF,N), (N,N),
])
asinh_special_values = build_table([
(-INF,-P14), (-INF,-Z), (-INF,-Z),(-INF,Z), (-INF,Z), (-INF,P14), (-INF,N),
(-INF,-P12), (U,U), (U,U), (U,U), (U,U), (-INF,P12), (N,N),
(-INF,-P12), (U,U), (-Z,-Z), (-Z,Z), (U,U), (-INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,-Z), (N,Z), (N,N), (INF,N), (N,N),
])
atanh_special_values = build_table([
(-Z,-P12), (-Z,-P12), (-Z,-P12), (-Z,P12), (-Z,P12), (-Z,P12), (-Z,N),
(-Z,-P12), (U,U), (U,U), (U,U), (U,U), (-Z,P12), (N,N),
(-Z,-P12), (U,U), (-Z,-Z), (-Z,Z), (U,U), (-Z,P12), (-Z,N),
(Z,-P12), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,P12), (Z,N),
(Z,-P12), (U,U), (U,U), (U,U), (U,U), (Z,P12), (N,N),
(Z,-P12), (Z,-P12), (Z,-P12), (Z,P12), (Z,P12), (Z,P12), (Z,N),
(Z,-P12), (N,N), (N,N), (N,N), (N,N), (Z,P12), (N,N),
])
log_special_values = build_table([
(INF,-P34), (INF,-P), (INF,-P), (INF,P), (INF,P), (INF,P34), (INF,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (-INF,-P), (-INF,P), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (-INF,-Z), (-INF,Z), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,N), (N,N), (N,N), (INF,N), (N,N),
])
sqrt_special_values = build_table([
(INF,-INF), (Z,-INF), (Z,-INF), (Z,INF), (Z,INF), (INF,INF), (N,INF),
(INF,-INF), (U,U), (U,U), (U,U), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (U,U), (U,U), (U,U), (INF,INF), (N,N),
(INF,-INF), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,INF), (INF,N),
(INF,-INF), (N,N), (N,N), (N,N), (N,N), (INF,INF), (N,N),
])
exp_special_values = build_table([
(Z,Z), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,Z), (Z,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,N), (U,U), (1.,-Z), (1.,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (1.,-Z), (1.,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
cosh_special_values = build_table([
(INF,N), (U,U), (INF,Z), (INF,-Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,Z), (U,U), (1.,Z), (1.,-Z), (U,U), (N,Z), (N,Z),
(N,Z), (U,U), (1.,-Z), (1.,Z), (U,U), (N,Z), (N,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,Z), (N,Z), (N,N), (N,N), (N,N),
])
sinh_special_values = build_table([
(INF,N), (U,U), (-INF,-Z), (-INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(Z,N), (U,U), (-Z,-Z), (-Z,Z), (U,U), (Z,N), (Z,N),
(Z,N), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,N), (Z,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
tanh_special_values = build_table([
(-1.,Z), (U,U), (-1.,-Z), (-1.,Z), (U,U), (-1.,Z), (-1.,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,N), (U,U), (-Z,-Z), (-Z,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (Z,-Z), (Z,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(1.,Z), (U,U), (1.,-Z), (1.,Z), (U,U), (1.,Z), (1.,Z),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
rect_special_values = build_table([
(INF,N), (U,U), (-INF,Z), (-INF,-Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(Z,Z), (U,U), (-Z,Z), (-Z,-Z), (U,U), (Z,Z), (Z,Z),
(Z,Z), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,Z), (Z,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,Z), (N,Z), (N,N), (N,N), (N,N),
])
assert copysign(1., acosh_special_values[5][2][1]) == -1.
| 43.123529
| 79
| 0.367753
|
import math
from rpython.rlib.rfloat import isnan, isinf, copysign
# code to deal with special values (infinities, NaNs, ...)
#
# The special types can be:
ST_NINF = 0 # negative infinity
ST_NEG = 1 # negative finite number (nonzero)
ST_NZERO = 2 # -0.
ST_PZERO = 3 # +0.
ST_POS = 4 # positive finite number (nonzero)
ST_PINF = 5 # positive infinity
ST_NAN = 6 # Not a Number
def special_type(d):
if isnan(d):
return ST_NAN
elif isinf(d):
if d > 0.0:
return ST_PINF
else:
return ST_NINF
else:
if d != 0.0:
if d > 0.0:
return ST_POS
else:
return ST_NEG
else:
if copysign(1., d) == 1.:
return ST_PZERO
else:
return ST_NZERO
P = math.pi
P14 = 0.25 * math.pi
P12 = 0.5 * math.pi
P34 = 0.75 * math.pi
INF = 1e200 * 1e200
N = INF / INF
U = -9.5426319407711027e33 # unlikely value, used as placeholder
Z = 0.0 # defined here instead of in the tuples below, because of a bug
# in pypy releases < 1.5
NAN = N
def build_table(lst):
table = []
assert len(lst) == 49
it = iter(lst)
for j in range(7):
row = []
for i in range(7):
(x, y) = it.next()
row.append((x, y))
table.append(row)
return table
acos_special_values = build_table([
(P34,INF), (P,INF), (P,INF), (P,-INF), (P,-INF), (P34,-INF), (N,INF),
(P12,INF), (U,U), (U,U), (U,U), (U,U), (P12,-INF), (N,N),
(P12,INF), (U,U), (P12,Z), (P12,-Z), (U,U), (P12,-INF), (P12,N),
(P12,INF), (U,U), (P12,Z), (P12,-Z), (U,U), (P12,-INF), (P12,N),
(P12,INF), (U,U), (U,U), (U,U), (U,U), (P12,-INF), (N,N),
(P14,INF), (Z,INF), (Z,INF), (Z,-INF), (Z,-INF), (P14,-INF), (N,INF),
(N,INF), (N,N), (N,N), (N,N), (N,N), (N,-INF), (N,N),
])
acosh_special_values = build_table([
(INF,-P34), (INF,-P), (INF,-P), (INF,P), (INF,P), (INF,P34), (INF,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-P12), (Z,P12), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-P12), (Z,P12), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,N), (N,N), (N,N), (INF,N), (N,N),
])
asinh_special_values = build_table([
(-INF,-P14), (-INF,-Z), (-INF,-Z),(-INF,Z), (-INF,Z), (-INF,P14), (-INF,N),
(-INF,-P12), (U,U), (U,U), (U,U), (U,U), (-INF,P12), (N,N),
(-INF,-P12), (U,U), (-Z,-Z), (-Z,Z), (U,U), (-INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,-Z), (N,Z), (N,N), (INF,N), (N,N),
])
atanh_special_values = build_table([
(-Z,-P12), (-Z,-P12), (-Z,-P12), (-Z,P12), (-Z,P12), (-Z,P12), (-Z,N),
(-Z,-P12), (U,U), (U,U), (U,U), (U,U), (-Z,P12), (N,N),
(-Z,-P12), (U,U), (-Z,-Z), (-Z,Z), (U,U), (-Z,P12), (-Z,N),
(Z,-P12), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,P12), (Z,N),
(Z,-P12), (U,U), (U,U), (U,U), (U,U), (Z,P12), (N,N),
(Z,-P12), (Z,-P12), (Z,-P12), (Z,P12), (Z,P12), (Z,P12), (Z,N),
(Z,-P12), (N,N), (N,N), (N,N), (N,N), (Z,P12), (N,N),
])
log_special_values = build_table([
(INF,-P34), (INF,-P), (INF,-P), (INF,P), (INF,P), (INF,P34), (INF,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (-INF,-P), (-INF,P), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (-INF,-Z), (-INF,Z), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,N), (N,N), (N,N), (INF,N), (N,N),
])
sqrt_special_values = build_table([
(INF,-INF), (Z,-INF), (Z,-INF), (Z,INF), (Z,INF), (INF,INF), (N,INF),
(INF,-INF), (U,U), (U,U), (U,U), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (U,U), (U,U), (U,U), (INF,INF), (N,N),
(INF,-INF), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,INF), (INF,N),
(INF,-INF), (N,N), (N,N), (N,N), (N,N), (INF,INF), (N,N),
])
exp_special_values = build_table([
(Z,Z), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,Z), (Z,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,N), (U,U), (1.,-Z), (1.,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (1.,-Z), (1.,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
cosh_special_values = build_table([
(INF,N), (U,U), (INF,Z), (INF,-Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,Z), (U,U), (1.,Z), (1.,-Z), (U,U), (N,Z), (N,Z),
(N,Z), (U,U), (1.,-Z), (1.,Z), (U,U), (N,Z), (N,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,Z), (N,Z), (N,N), (N,N), (N,N),
])
sinh_special_values = build_table([
(INF,N), (U,U), (-INF,-Z), (-INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(Z,N), (U,U), (-Z,-Z), (-Z,Z), (U,U), (Z,N), (Z,N),
(Z,N), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,N), (Z,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
tanh_special_values = build_table([
(-1.,Z), (U,U), (-1.,-Z), (-1.,Z), (U,U), (-1.,Z), (-1.,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,N), (U,U), (-Z,-Z), (-Z,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (Z,-Z), (Z,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(1.,Z), (U,U), (1.,-Z), (1.,Z), (U,U), (1.,Z), (1.,Z),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
rect_special_values = build_table([
(INF,N), (U,U), (-INF,Z), (-INF,-Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(Z,Z), (U,U), (-Z,Z), (-Z,-Z), (U,U), (Z,Z), (Z,Z),
(Z,Z), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,Z), (Z,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,Z), (N,Z), (N,N), (N,N), (N,N),
])
assert copysign(1., acosh_special_values[5][2][1]) == -1.
| 644
| 0
| 46
|
4043addbcf2a6913bd26e0dcd14ebb7242e74209
| 1,479
|
py
|
Python
|
torstack/library/compat.py
|
longniao/torstack
|
148139eeca0f3cd8a8c2196ae2a6f8cea519d9b5
|
[
"MIT"
] | 7
|
2018-12-11T03:41:04.000Z
|
2018-12-11T06:08:45.000Z
|
torstack/library/compat.py
|
longniao/torstack
|
148139eeca0f3cd8a8c2196ae2a6f8cea519d9b5
|
[
"MIT"
] | null | null | null |
torstack/library/compat.py
|
longniao/torstack
|
148139eeca0f3cd8a8c2196ae2a6f8cea519d9b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
torstack.library.compat.py
compat definition.
:copyright: (c) 2018 by longniao <longniao@gmail.com>
:license: MIT, see LICENSE for more details.
'''
import sys
# __all__ = (
# 'text_type', 'string_types', 'izip', 'iteritems', 'itervalues',
# 'with_metaclass',
# )
PY3 = sys.version_info >= (3,)
if PY3:
text_type = str
string_types = (str, )
integer_types = int
izip = zip
_xrange = range
MAXSIZE = sys.maxsize
else:
text_type = unicode
string_types = (basestring, )
integer_types = (int, long)
from itertools import izip
_xrange = xrange
MAXSIZE = sys.maxint
# "raise x, y, z" raises SyntaxError in Python 3
exec("""def reraise(exctype, value, trace=None):
raise exctype, str(value), trace
""")
_unicode = unicode
| 20.541667
| 69
| 0.612576
|
# -*- coding: utf-8 -*-
'''
torstack.library.compat.py
compat definition.
:copyright: (c) 2018 by longniao <longniao@gmail.com>
:license: MIT, see LICENSE for more details.
'''
import sys
# __all__ = (
# 'text_type', 'string_types', 'izip', 'iteritems', 'itervalues',
# 'with_metaclass',
# )
PY3 = sys.version_info >= (3,)
if PY3:
text_type = str
string_types = (str, )
integer_types = int
izip = zip
_xrange = range
MAXSIZE = sys.maxsize
def iteritems(o):
return iter(o.items())
def itervalues(o):
return iter(o.values())
def bytes_from_hex(h):
return bytes.fromhex(h)
def reraise(exctype, value, trace=None):
raise exctype(str(value)).with_traceback(trace)
def _unicode(s):
return s
else:
text_type = unicode
string_types = (basestring, )
integer_types = (int, long)
from itertools import izip
_xrange = xrange
MAXSIZE = sys.maxint
def b(s):
# See comments above. In python 2.x b('foo') is just 'foo'.
return s
def iteritems(o):
return o.iteritems()
def itervalues(o):
return o.itervalues()
def bytes_from_hex(h):
return h.decode('hex')
# "raise x, y, z" raises SyntaxError in Python 3
exec("""def reraise(exctype, value, trace=None):
raise exctype, str(value), trace
""")
_unicode = unicode
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
| 390
| 0
| 266
|