hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96b1acfddaea8acf23911566a94f1e0201a808e2
| 1,084
|
py
|
Python
|
models.py
|
VonStruddle/tacocat-app-treehouse
|
3ca8e21fabe9037d8271fed979857b1eac13a083
|
[
"MIT"
] | null | null | null |
models.py
|
VonStruddle/tacocat-app-treehouse
|
3ca8e21fabe9037d8271fed979857b1eac13a083
|
[
"MIT"
] | null | null | null |
models.py
|
VonStruddle/tacocat-app-treehouse
|
3ca8e21fabe9037d8271fed979857b1eac13a083
|
[
"MIT"
] | null | null | null |
import datetime
from flask_login import UserMixin
from flask_bcrypt import generate_password_hash
from peewee import *
DB = SqliteDatabase('tacocat.db')
class User(UserMixin, Model):
email = CharField(unique=True)
password = CharField(max_length=100)
class Meta:
database = DB
@classmethod
def create_user(cls, email, password):
try:
with DB.transaction():
cls.create(
email=email,
password=generate_password_hash(password)
)
except IntegrityError:
raise ValueError('User already exists')
class Taco(Model):
user = ForeignKeyField(
rel_model=User,
related_name='tacos'
)
protein = CharField()
cheese = BooleanField()
shell = CharField()
extras = TextField()
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = DB
order_by = ('-created_at',)
def initializeDB():
DB.connect()
DB.create_tables([User, Taco], safe=True)
DB.close()
| 21.68
| 61
| 0.617159
|
cb5f021d6816325cff962dc371b5af51b459fc81
| 1,006
|
py
|
Python
|
src/mrnet/utils/graphs.py
|
KhanWhale/mrnet
|
fc6660b1b4175315b242b1eb69189e1fe8853cf6
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/mrnet/utils/graphs.py
|
KhanWhale/mrnet
|
fc6660b1b4175315b242b1eb69189e1fe8853cf6
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/mrnet/utils/graphs.py
|
KhanWhale/mrnet
|
fc6660b1b4175315b242b1eb69189e1fe8853cf6
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from typing import List, Tuple
from pymatgen.analysis.graphs import MoleculeGraph
def extract_bond_environment(
mg: MoleculeGraph, bonds: List[Tuple[int, int]], order=1
) -> set:
"""
Extract the local environment of a particular chemical bond in a MoleculeGraph
:param bonds:
:param order:
:return: set of integers representing the relevant atom indices
"""
indices = set()
if order < 0:
return indices
elif order == 0:
for bond in bonds:
indices.add(bond[0])
indices.add(bond[1])
return indices
else:
graph = mg.graph.to_undirected()
for bond in bonds:
sub_bonds = list()
for neighbor in graph[bond[0]]:
sub_bonds.append((bond[0], neighbor))
for neighbor in graph[bond[1]]:
sub_bonds.append((bond[1], neighbor))
indices = indices.union(extract_bond_environment(mg, sub_bonds, order - 1))
return indices
| 27.944444
| 87
| 0.606362
|
2fe0699a44b7eddd414f73f3bcbab7ec23811dcd
| 3,759
|
py
|
Python
|
setup.py
|
timothydlister/iosfw
|
29759d8595610492db921d19eee1e79c4f114811
|
[
"MIT"
] | 74
|
2018-07-17T01:45:32.000Z
|
2022-03-28T11:46:11.000Z
|
setup.py
|
timothydlister/iosfw
|
29759d8595610492db921d19eee1e79c4f114811
|
[
"MIT"
] | 12
|
2019-06-28T20:57:14.000Z
|
2021-06-22T17:13:00.000Z
|
setup.py
|
timothydlister/iosfw
|
29759d8595610492db921d19eee1e79c4f114811
|
[
"MIT"
] | 16
|
2019-08-14T21:05:34.000Z
|
2022-03-20T20:02:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Lovingly copied from https://github.com/kennethreitz/setup.py
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'iosfw'
DESCRIPTION = 'Automates upgrading firmware on Cisco IOS devices.'
URL = 'https://github.com/austind/iosfw'
EMAIL = 'austindcc@gmail.com'
AUTHOR = 'Austin de Coup-Crank'
REQUIRES_PYTHON = '>=3.6'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'napalm', 'netmiko', 'hashlib', 'tqdm', 'yaml'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 28.263158
| 86
| 0.644586
|
4b4e529b7367b13746c0daa55008ec910826e248
| 93
|
py
|
Python
|
src/contactus/apps.py
|
Ameesha2001/Ekyam-scl-maxo
|
d9a8ec0e2d297d20fcf9139cfe50a7c814cdb2f5
|
[
"MIT"
] | 10
|
2021-01-02T14:54:33.000Z
|
2022-02-09T07:11:05.000Z
|
yantragene/contactus/apps.py
|
brijeshgzp05/techfest-yantragene
|
98efdaefede9ea0e4fb7241e97272f0079673863
|
[
"MIT"
] | 44
|
2020-05-13T20:15:26.000Z
|
2022-03-04T02:58:58.000Z
|
yantragene/contactus/apps.py
|
brijeshgzp05/techfest-yantragene
|
98efdaefede9ea0e4fb7241e97272f0079673863
|
[
"MIT"
] | 8
|
2020-12-24T07:58:24.000Z
|
2022-01-04T15:22:30.000Z
|
from django.apps import AppConfig
class ContactusConfig(AppConfig):
name = 'contactus'
| 15.5
| 33
| 0.763441
|
73bf0bac862e49305c40c94a2032a3563af7cc72
| 1,095
|
py
|
Python
|
rssant_api/migrations/0012_auto_20191025_1526.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 1,176
|
2019-12-24T01:51:22.000Z
|
2022-03-29T06:00:25.000Z
|
rssant_api/migrations/0012_auto_20191025_1526.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 33
|
2020-03-06T03:29:46.000Z
|
2022-03-11T06:24:26.000Z
|
rssant_api/migrations/0012_auto_20191025_1526.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 110
|
2019-12-29T05:49:24.000Z
|
2022-03-28T06:44:21.000Z
|
# Generated by Django 2.2.6 on 2019-10-25 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rssant_api', '0011_auto_20190714_0550'),
]
operations = [
migrations.AddField(
model_name='feed',
name='monthly_story_count_data',
field=models.BinaryField(blank=True, help_text='monthly story count data', max_length=514, null=True),
),
migrations.AlterField(
model_name='feed',
name='status',
field=models.CharField(choices=[('pending', 'pending'), ('updating', 'updating'), ('ready', 'ready'), ('error', 'error'), ('discard', 'discard')], default='pending', help_text='状态', max_length=20),
),
migrations.AlterField(
model_name='feedcreation',
name='status',
field=models.CharField(choices=[('pending', 'pending'), ('updating', 'updating'), ('ready', 'ready'), ('error', 'error'), ('discard', 'discard')], default='pending', help_text='状态', max_length=20),
),
]
| 37.758621
| 209
| 0.593607
|
7f849a261c6daf72ba9d476f4742b67f76d98176
| 6,968
|
py
|
Python
|
hedwig/models/bert/__main__.py
|
arjunnlp/hedwig-anlp
|
b8f6c50d788509bc9e5670caeee3503257d716d0
|
[
"Apache-2.0"
] | 3
|
2019-07-20T15:23:59.000Z
|
2021-04-26T02:57:59.000Z
|
hedwig/models/bert/__main__.py
|
arjunnlp/hedwig-anlp
|
b8f6c50d788509bc9e5670caeee3503257d716d0
|
[
"Apache-2.0"
] | null | null | null |
hedwig/models/bert/__main__.py
|
arjunnlp/hedwig-anlp
|
b8f6c50d788509bc9e5670caeee3503257d716d0
|
[
"Apache-2.0"
] | null | null | null |
import os
import random
import time
import numpy as np
import torch
from common.evaluators.bert_evaluator import BertEvaluator
from common.trainers.bert_trainer import BertTrainer
from datasets.bert_processors.aapd_processor import AAPDProcessor
from datasets.bert_processors.agnews_processor import AGNewsProcessor
from datasets.bert_processors.imdb_processor import IMDBProcessor
from datasets.bert_processors.reuters_processor import ReutersProcessor
from datasets.bert_processors.sogou_processor import SogouProcessor
from datasets.bert_processors.sst_processor import SST2Processor
from datasets.bert_processors.yelp2014_processor import Yelp2014Processor
from datasets.bert_processors.mbti_processor import MBTIProcessor
from models.bert.args import get_args
from models.bert.model import BertForSequenceClassification
from utils.io import PYTORCH_PRETRAINED_BERT_CACHE
from utils.optimization import BertAdam
from utils.tokenization import BertTokenizer
# String templates for logging results
LOG_HEADER = 'Split Dev/Acc. Dev/Hamm. Dev/Jacc. Dev/Prec Dev/Rec Dev/micro-F1 Dev/F1 Dev/Loss'
LOG_TEMPLATE = ' '.join('{:>5s},{:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(','))
def evaluate_split(model, processor, args, split='dev'):
evaluator = BertEvaluator(model, processor, args, split)
start_time = time.time()
accuracy, hamming, jaccard, precision, recall, microf1, f1, avg_loss = evaluator.get_scores(silent=True)[0]
print("Inference time", time.time() - start_time)
print('\n' + LOG_HEADER)
print(LOG_TEMPLATE.format(split.upper(), accuracy, hamming, jaccard, precision, recall, f1, microf1, f1, avg_loss))
if __name__ == '__main__':
# Set default configuration in args.py
args = get_args()
if args.local_rank == -1 or not args.cuda:
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
print('Device:', str(device).upper())
print('Number of GPUs:', n_gpu)
print('Distributed training:', bool(args.local_rank != -1))
print('FP16:', args.fp16)
# Set random seed for reproducibility
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
dataset_map = {
'SST-2': SST2Processor,
'Reuters': ReutersProcessor,
'IMDB': IMDBProcessor,
'AAPD': AAPDProcessor,
'AGNews': AGNewsProcessor,
'Yelp2014': Yelp2014Processor,
'Sogou': SogouProcessor,
'MBTI': MBTIProcessor,
}
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if args.dataset not in dataset_map:
raise ValueError('Unrecognized dataset')
args.batch_size = args.batch_size // args.gradient_accumulation_steps
args.device = device
args.n_gpu = n_gpu
args.num_labels = dataset_map[args.dataset].NUM_CLASSES
args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL
if not args.trained_model:
save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME)
os.makedirs(save_path, exist_ok=True)
processor = dataset_map[args.dataset]()
args.is_lowercase = 'uncased' in args.model
args.is_hierarchical = False
tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
train_examples = None
num_train_optimization_steps = None
if not args.trained_model:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
model = BertForSequenceClassification.from_pretrained(args.model, cache_dir=cache_dir, num_labels=args.num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Install NVIDIA Apex to use distributed and FP16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install NVIDIA Apex for distributed and FP16 training")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.lr,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.lr,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
trainer = BertTrainer(model, optimizer, processor, args)
if not args.trained_model:
trainer.train()
model = torch.load(trainer.snapshot_path)
else:
model = BertForSequenceClassification.from_pretrained(args.model, num_labels=args.num_labels)
model_ = torch.load(args.trained_model, map_location=lambda storage, loc: storage)
state={}
for key in model_.state_dict().keys():
new_key = key.replace("module.", "")
state[new_key] = model_.state_dict()[key]
model.load_state_dict(state)
model = model.to(device)
evaluate_split(model, processor, args, split='dev')
evaluate_split(model, processor, args, split='test')
| 41.47619
| 144
| 0.692308
|
4f71853f1bbf67546075ef75c55a0f9aea741497
| 1,004
|
py
|
Python
|
tests/test_util.py
|
ties/rpki-client-web
|
afce9bb2e8deeaab8cb6c4ff3fd26a1cde8563d9
|
[
"MIT"
] | 1
|
2021-11-23T12:43:43.000Z
|
2021-11-23T12:43:43.000Z
|
tests/test_util.py
|
ties/rpki-client-web
|
afce9bb2e8deeaab8cb6c4ff3fd26a1cde8563d9
|
[
"MIT"
] | 18
|
2021-04-08T19:21:04.000Z
|
2022-03-11T14:41:30.000Z
|
tests/test_util.py
|
ties/rpki-client-web
|
afce9bb2e8deeaab8cb6c4ff3fd26a1cde8563d9
|
[
"MIT"
] | 1
|
2021-04-08T18:52:04.000Z
|
2021-04-08T18:52:04.000Z
|
import pytest
from rpkiclientweb.util import parse_host, validate
def test_parse_legacy_format():
res = parse_host(
"rpki.cnnic.cn/rpki/A9162E3D0000/1587/h6wAjnaB0p2pptlBZ4bkOwowMM8.roa"
)
assert res == "rpki.cnnic.cn"
def test_rrdp_format():
res = parse_host(
"rrdp/73885635ed28814800212d730ae80581fc5112c4b5804a08d55d6bda2afa1615/ca.rg.net/rpki/RGnet-OU/ovsCA/IOUcOeBGM_Tb4dwfvswY4bnNZYY.mft"
)
assert res == "ca.rg.net"
def test_rsync_format():
res = parse_host(
"rsync/rpki.apnic.net/repository/838DB214166511E2B3BC286172FD1FF2/C5zKkN0Neoo3ZmsZIX_g2EA3t6I.mft:"
)
assert res == "rpki.apnic.net"
def test_validate_passes():
res = validate(True, "should not raise")
assert res is None
def test_validate_raises():
with pytest.raises(ValueError):
validate(False, "should raise")
def test_validate_raises_template():
with pytest.raises(ValueError):
validate(False, "should raise {}", "and template")
| 25.1
| 141
| 0.72012
|
880c27987c3d8d6bc450c8d1c45ea48f67538bab
| 6,111
|
py
|
Python
|
rbtools/commands/patch.py
|
brettdh/rbtools
|
ddaa655aeac4a087580696f8e1155f90637d80be
|
[
"MIT"
] | null | null | null |
rbtools/commands/patch.py
|
brettdh/rbtools
|
ddaa655aeac4a087580696f8e1155f90637d80be
|
[
"MIT"
] | null | null | null |
rbtools/commands/patch.py
|
brettdh/rbtools
|
ddaa655aeac4a087580696f8e1155f90637d80be
|
[
"MIT"
] | null | null | null |
import re
from rbtools.api.errors import APIError
from rbtools.commands import Command, CommandError, Option
from rbtools.utils.filesystem import make_tempfile
# MARKDOWN_ESCAPED_CHARS comes from markdown.Markdown.ESCAPED_CHARS. We don't
# want to have a dependency on markdown for rbtools, so we just copy it into
# here.
MARKDOWN_ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
MARKDOWN_SPECIAL_CHARS = re.escape(r''.join(MARKDOWN_ESCAPED_CHARS))
UNESCAPE_CHARS_RE = re.compile(r'\\([%s])' % MARKDOWN_SPECIAL_CHARS)
class Patch(Command):
"""Applies a specific patch from a RB server.
The patch file indicated by the request id is downloaded from the
server and then applied locally."""
name = "patch"
author = "The Review Board Project"
args = "<review-request-id>"
option_list = [
Option("-c", "--commit",
dest="commit",
action="store_true",
default=False,
help="Commit using information fetched "
"from the review request (Git only)."),
Option("--diff-revision",
dest="diff_revision",
default=None,
help="revision id of diff to be used as patch"),
Option("--px",
dest="px",
default=None,
help="numerical pX argument for patch"),
Option("--print",
dest="patch_stdout",
action="store_true",
default=False,
help="print patch to stdout instead of applying"),
Command.server_options,
Command.repository_options,
]
def get_patch(self, request_id, api_root, diff_revision=None):
"""Return the diff as a string, the used diff revision and its basedir.
If a diff revision is not specified, then this will look at the most
recent diff.
"""
try:
diffs = api_root.get_diffs(review_request_id=request_id)
except APIError, e:
raise CommandError("Error getting diffs: %s" % e)
# Use the latest diff if a diff revision was not given.
# Since diff revisions start a 1, increment by one, and
# never skip a number, the latest diff revisions number
# should be equal to the number of diffs.
if diff_revision is None:
diff_revision = diffs.total_results
try:
diff = diffs.get_item(diff_revision)
diff_body = diff.get_patch().data
base_dir = getattr(diff, 'basedir', None) or ''
except APIError:
raise CommandError('The specified diff revision does not exist.')
return diff_body, diff_revision, base_dir
def apply_patch(self, repository_info, tool, request_id, diff_revision,
diff_file_path, base_dir):
"""Apply patch patch_file and display results to user."""
print ("Patch is being applied from request %s with diff revision "
" %s." % (request_id, diff_revision))
tool.apply_patch(diff_file_path, repository_info.base_path,
base_dir, self.options.px)
def _extract_commit_message(self, review_request):
"""Returns a commit message based on the review request.
The commit message returned contains the Summary, Description, Bugs,
and Testing Done fields from the review request, if available.
"""
info = []
summary = review_request.summary
description = review_request.description
testing_done = review_request.testing_done
if not description.startswith(summary):
info.append(summary)
info.append(description)
if testing_done:
info.append('Testing Done:\n%s' % testing_done)
if review_request.bugs_closed:
info.append('Bugs closed: %s'
% ', '.join(review_request.bugs_closed))
info.append('Reviewed at %s' % review_request.absolute_url)
return '\n\n'.join(info)
def main(self, request_id):
"""Run the command."""
repository_info, tool = self.initialize_scm_tool(
client_name=self.options.repository_type)
server_url = self.get_server_url(repository_info, tool)
api_client, api_root = self.get_api(server_url)
# Get the patch, the used patch ID and base dir for the diff
diff_body, diff_revision, base_dir = self.get_patch(
request_id,
api_root,
self.options.diff_revision)
if self.options.patch_stdout:
print diff_body
else:
try:
if tool.has_pending_changes():
message = 'Working directory is not clean.'
if not self.options.commit:
print 'Warning: %s' % message
else:
raise CommandError(message)
except NotImplementedError:
pass
tmp_patch_file = make_tempfile(diff_body)
self.apply_patch(repository_info, tool, request_id, diff_revision,
tmp_patch_file, base_dir)
if self.options.commit:
try:
review_request = api_root.get_review_request(
review_request_id=request_id,
force_text_type='plain')
except APIError, e:
raise CommandError('Error getting review request %s: %s'
% (request_id, e))
message = self._extract_commit_message(review_request)
author = review_request.get_submitter()
try:
tool.create_commit(message, author)
print('Changes committed to current branch.')
except NotImplementedError:
raise CommandError('--commit is not supported with %s'
% tool.name)
| 37.956522
| 79
| 0.580429
|
6cadb21bc375444a3c12dea15d7cb9cf09c15767
| 702
|
py
|
Python
|
incomplete/search-engine/sizes.py
|
adlerliu/500lines
|
9100aaa8cf510439460ab8a1fad3311926a94d90
|
[
"CC-BY-3.0"
] | 26,185
|
2015-01-01T04:59:51.000Z
|
2022-03-31T10:20:14.000Z
|
incomplete/search-engine/sizes.py
|
fsxchen/500lines
|
3f2cd407ebedaf0a3cfa6858c4cf94543067433d
|
[
"CC-BY-3.0"
] | 160
|
2015-01-05T12:20:21.000Z
|
2021-10-03T07:25:43.000Z
|
incomplete/search-engine/sizes.py
|
fsxchen/500lines
|
3f2cd407ebedaf0a3cfa6858c4cf94543067433d
|
[
"CC-BY-3.0"
] | 6,572
|
2015-01-01T01:31:00.000Z
|
2022-03-31T07:31:22.000Z
|
#!/usr/bin/python
def xcumsum(xs):
total = 0
for xx in xs:
yield total
total += xx
def describe(sizes):
print ' ' * 4 + ' '.join("%4d" % size for size in sizes)
print ' ' * 4 + ' '.join("%4d" % tots for tots in xcumsum(sizes))
sizes = []
while True:
line = raw_input("+ ")
if line == '':
sizes = []
else:
sizes.append(int(line))
sizes.sort()
describe(sizes)
merge = [cumsum >= size for cumsum, size in zip(xcumsum(sizes), sizes)]
if any(merge):
max_merge = max(ii for ii in range(len(merge)) if merge[ii]) + 1
sizes[:max_merge] = [sum(sizes[:max_merge])]
sizes.sort()
describe(sizes)
| 22.645161
| 75
| 0.538462
|
2a6bf3bd9f1d921d9048cff8b4c962d6c797017e
| 337
|
py
|
Python
|
test_habitica.py
|
haclark30/habitica-cli
|
5a5c2a919760a95af38a875da08e330e0948f620
|
[
"MIT"
] | null | null | null |
test_habitica.py
|
haclark30/habitica-cli
|
5a5c2a919760a95af38a875da08e330e0948f620
|
[
"MIT"
] | null | null | null |
test_habitica.py
|
haclark30/habitica-cli
|
5a5c2a919760a95af38a875da08e330e0948f620
|
[
"MIT"
] | null | null | null |
import unittest
from habitica import HabiticaAPI
import time
class TestHabitica(unittest.TestCase):
def setUp(self):
self.hbt_api = HabiticaAPI()
def test_get_content(self):
content = self.hbt_api.get_content()
time_diff = time.time() - content['dateRetrieved']
self.assertLess(time_diff, 86400)
| 28.083333
| 58
| 0.703264
|
75ce8216f60b0353564c104d8fa5defa30da03d3
| 53,928
|
py
|
Python
|
lib/sqlalchemy/orm/collections.py
|
brussee/sqlalchemy
|
5e3357c70e419c244156ac3885b2cf784b5b3fc0
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/collections.py
|
brussee/sqlalchemy
|
5e3357c70e419c244156ac3885b2cf784b5b3fc0
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/collections.py
|
brussee/sqlalchemy
|
5e3357c70e419c244156ac3885b2cf784b5b3fc0
|
[
"MIT"
] | null | null | null |
# orm/collections.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass:
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import operator
import threading
import weakref
from sqlalchemy.util.compat import inspect_getfullargspec
from . import base
from .. import exc as sa_exc
from .. import util
from ..sql import coercions
from ..sql import expression
from ..sql import roles
__all__ = [
"collection",
"collection_adapter",
"mapped_collection",
"column_mapped_collection",
"attribute_mapped_collection",
]
__instrumentation_mutex = threading.Lock()
class _PlainColumnGetter:
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter:
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(
state, state.dict, m.mapped_table.columns[k]
)
for k in self.colkeys
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, "metadata", None)
for (ckey, tkey) in self.colkeys:
if tkey is None or metadata is None or tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [
coercions.expect(roles.ColumnArgumentRole, q, argname="mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter:
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name,)
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
.. warning:: the key value must be assigned to its final value
**before** it is accessed by the attribute mapped collection.
Additionally, changes to the key attribute are **not tracked**
automatically, which means the key in the dictionary is not
automatically synchronized with the key value on the target object
itself. See the section :ref:`key_collections_mutations`
for an example.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection:
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "appender"
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "remover"
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = "iterator"
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
@util.deprecated(
"1.3",
"The :meth:`.collection.converter` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":class:`.AttributeEvents.bulk_replace` listener interface in "
"conjunction with the :func:`.event.listen` function.",
)
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = "converter"
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_remove_event", arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
collection_adapter = operator.attrgetter("_sa_adapter")
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter:
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"_converter",
"invalidated",
"empty",
)
def __init__(self, attr, owner_state, data):
self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
self.empty = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def _set_empty(self, user_data):
assert (
not self.empty
), "This collection adapter is already in the 'empty' state"
self.empty = True
self.owner_state._empty_collections[self._key] = user_data
def _reset_empty(self):
assert (
self.empty
), "This collection adapter is not in the 'empty' state"
self.empty = False
self.owner_state.dict[
self._key
] = self.owner_state._empty_collections.pop(self._key)
def _refuse_empty(self):
raise sa_exc.InvalidRequestError(
"This is a special 'empty' collection which cannot accommodate "
"internal mutation operations"
)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_wo_mutation_event(self, item, initiator=None):
"""Notify that a entity is entering the collection but is already
present.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
.. versionadded:: 1.4.15
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_wo_mutation_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator
)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state, self.owner_state.dict, initiator=initiator
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
"empty": self.empty,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
self._data = weakref.ref(d["data"])
self._converter = d["data"]._sa_converter
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
self.empty = d.get("empty", False)
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
for member in removals:
existing_adapter.fire_remove_event(member, initiator=initiator)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in (
"appender",
"remover",
"iterator",
"converter",
)
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, "_sa_converter"):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set_wo_mutation(collection, item, _sa_initiator=None):
"""Run set wo mutation events.
The collection is not mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_append_wo_mutation_event(item, _sa_initiator)
def __set(collection, item, _sa_initiator=None):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
if value is self:
return
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
def extend(fn):
def extend(self, iterable):
for value in list(iterable):
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in list(iterable):
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
__before_pop(self)
_to_del = key in self
if default is Unspecified:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
value = self.__getitem__(key)
if value is default:
__set_wo_mutation(self, value, None)
return value
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
__set_wo_mutation(self, __other[key], None)
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
else:
__set_wo_mutation(self, value, None)
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
else:
__set_wo_mutation(self, kw[key], None)
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (
isinstance(obj, _set_binop_bases + (self.__class__,))
or util.duck_type_collection(obj) == set
)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
else:
__set_wo_mutation(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{"appender": "append", "remover": "remove", "iterator": "__iter__"},
_list_decorators(),
),
set: (
{"appender": "add", "remover": "remove", "iterator": "__iter__"},
_set_decorators(),
),
# decorators are required for dicts and object collections.
dict: ({"iterator": "values"}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" % (value, self[key], key)
)
self.__delitem__(key, _sa_initiator)
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
| 32.042781
| 78
| 0.621996
|
26e01f3706948ee9fdf0451e0ff891a7945fbade
| 1,536
|
py
|
Python
|
src/loader.py
|
mlennox/numeraiexplore
|
888468f0802358f9778295b8f1fdf5f5f760053c
|
[
"MIT"
] | 1
|
2019-07-09T01:35:16.000Z
|
2019-07-09T01:35:16.000Z
|
src/loader.py
|
mlennox/numeraiexplore
|
888468f0802358f9778295b8f1fdf5f5f760053c
|
[
"MIT"
] | null | null | null |
src/loader.py
|
mlennox/numeraiexplore
|
888468f0802358f9778295b8f1fdf5f5f760053c
|
[
"MIT"
] | null | null | null |
import pandas as pd
def load_training():
return pd.read_csv(
'./data/numerai_training_data.csv', header=0)
def load_tournament():
return pd.read_csv(
'./data/numerai_tournament_data.csv', header=0)
def pull_features(data_source, for_era=None, target_bernie_value=None):
if for_era is not None:
dataset = data_source[data_source['era'] == 'era' + str(for_era)]
else:
dataset = data_source
if target_bernie_value is not None:
dataset = dataset[dataset['target_bernie'] == target_bernie_value]
# print(dataset.describe())
features = [feature for feature in list(dataset) if "feature" in feature]
return dataset[features]
def pull_lots(dataset):
features = [feature for feature in list(dataset) if "feature" in feature]
features.append('era')
features.append('target_bernie')
return dataset[features]
def pull_features_and_era_label(data_source, for_era):
# if for_era = 12 the data will pull onlu rows with 'era' column matching 'era12'
# and the returned dataframe will have labels suffixed with era 'feature23_12'
dataset = pull_features(data_source, for_era)
features = [feature for feature in list(dataset) if "feature" in feature]
features_suffixed = [f + '_' + for_era for f in features]
# print('SUFFIXED', features_suffixed)
updated_columns = dict(zip(features, features_suffixed))
print('UPDATED COLUMNS', updated_columns)
dataset.rename(columns=updated_columns, inplace=True)
return dataset
| 34.909091
| 85
| 0.713542
|
505be55c5e56be133e20a1c1780045f41296b2ae
| 1,650
|
py
|
Python
|
pychron/pipeline/plot/editors/spectrum_editor.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/pipeline/plot/editors/spectrum_editor.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/pipeline/plot/editors/spectrum_editor.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Instance
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.options.options_manager import SpectrumOptionsManager
from pychron.pipeline.plot.editors.interpreted_age_editor import InterpretedAgeEditor
from pychron.pipeline.plot.models.spectrum_model import SpectrumModel
class SpectrumEditor(InterpretedAgeEditor):
plotter_options_manager = Instance(SpectrumOptionsManager, ())
basename = "spec"
figure_model_klass = SpectrumModel
def _set_preferred_age_kind(self, ias):
for ia in ias:
if ia.plateau_age:
ia.preferred_age_kind = "Plateau"
else:
ia.preferred_age_kind = "Integrated"
# ============= EOF =============================================
| 40.243902
| 85
| 0.597576
|
4395c486be6be5c4d811f96353eded08d823105a
| 120
|
py
|
Python
|
plugins/get_url/komand_get_url/util/constants.py
|
JaredAllen13/insightconnect-plugins
|
f68ce8c60ad20439284228dfcbcd9f8c1c0c7d31
|
[
"MIT"
] | null | null | null |
plugins/get_url/komand_get_url/util/constants.py
|
JaredAllen13/insightconnect-plugins
|
f68ce8c60ad20439284228dfcbcd9f8c1c0c7d31
|
[
"MIT"
] | null | null | null |
plugins/get_url/komand_get_url/util/constants.py
|
JaredAllen13/insightconnect-plugins
|
f68ce8c60ad20439284228dfcbcd9f8c1c0c7d31
|
[
"MIT"
] | null | null | null |
DEFAULT_USER_AGENT = "Mozilla/5.0"
DEFAULT_TIMEOUT = 60
DEFAULT_ENCODING = "utf-8"
DEFAULT_CACHE_FOLDER = "/var/cache/"
| 24
| 36
| 0.766667
|
f243d9573d49de3564ec245c4a468dd324ac39bd
| 9,212
|
py
|
Python
|
experiments/cleverhans/tutorials/mnist_tutorial_jsma.py
|
QMLB-GPapersCapstoneRepos/capstone18-AdversarialTexPlayground2
|
8feb6e1ef1d293dcb8869faa8a84216cdc3dd5ce
|
[
"Apache-2.0"
] | null | null | null |
experiments/cleverhans/tutorials/mnist_tutorial_jsma.py
|
QMLB-GPapersCapstoneRepos/capstone18-AdversarialTexPlayground2
|
8feb6e1ef1d293dcb8869faa8a84216cdc3dd5ce
|
[
"Apache-2.0"
] | 1
|
2020-09-25T21:55:03.000Z
|
2020-09-25T21:55:03.000Z
|
experiments/cleverhans/tutorials/mnist_tutorial_jsma.py
|
Qdata4Capstone/capstone18-AdversarialTexPlayground2
|
8feb6e1ef1d293dcb8869faa8a84216cdc3dd5ce
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import keras
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from cleverhans.attacks import SaliencyMapMethod
from cleverhans.utils import other_classes, cnn_model
from cleverhans.utils import pair_visual, grid_visual, AccuracyReport
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval, model_argmax
FLAGS = flags.FLAGS
def mnist_tutorial_jsma(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=True, nb_epochs=6,
batch_size=128, nb_classes=10, source_samples=10,
learning_rate=0.1):
"""
MNIST tutorial for the Jacobian-based saliency map approach (JSMA)
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param source_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# MNIST-specific dimensions
img_rows = 28
img_cols = 28
channels = 1
# Disable Keras learning phase since we will be serving through tensorflow
keras.layers.core.K.set_learning_phase(0)
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Image dimensions ordering should follow the TensorFlow convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
"to 'th', temporarily setting to 'tf'")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
print("Created TensorFlow session and set Keras backend.")
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
# Define TF model graph
model = cnn_model()
preds = model(x)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, preds, X_train, Y_train, args=train_params)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using the Jacobian-based saliency map approach
###########################################################################
print('Crafting ' + str(source_samples) + ' * ' + str(nb_classes-1)
+ ' adversarial examples')
# Keep track of success (adversarial example classified in target)
results = np.zeros((nb_classes, source_samples), dtype='i')
# Rate of perturbed features for each test set example and target class
perturbations = np.zeros((nb_classes, source_samples), dtype='f')
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols, channels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
# Instantiate a SaliencyMapMethod attack object
jsma = SaliencyMapMethod(model, back='tf', sess=sess)
jsma_params = {'theta': 1., 'gamma': 0.1,
'nb_classes': nb_classes, 'clip_min': 0.,
'clip_max': 1., 'targets': y,
'y_val': None}
figure = None
# Loop over the samples we want to perturb into adversarial examples
for sample_ind in xrange(0, source_samples):
print('--------------------------------------')
print('Attacking input %i/%i' % (sample_ind + 1, source_samples))
sample = X_test[sample_ind:(sample_ind+1)]
# We want to find an adversarial example for each possible target class
# (i.e. all classes that differ from the label given in the dataset)
current_class = int(np.argmax(Y_test[sample_ind]))
target_classes = other_classes(nb_classes, current_class)
# For the grid visualization, keep original images along the diagonal
grid_viz_data[current_class, current_class, :, :, :] = np.reshape(
sample, (img_rows, img_cols, channels))
# Loop over all target classes
for target in target_classes:
print('Generating adv. example for target class %i' % target)
# This call runs the Jacobian-based saliency map approach
one_hot_target = np.zeros((1, nb_classes), dtype=np.float32)
one_hot_target[0, target] = 1
jsma_params['y_val'] = one_hot_target
adv_x = jsma.generate_np(sample, **jsma_params)
# Check if success was achieved
res = int(model_argmax(sess, x, preds, adv_x) == target)
# Computer number of modified features
adv_x_reshape = adv_x.reshape(-1)
test_in_reshape = X_test[sample_ind].reshape(-1)
nb_changed = np.where(adv_x_reshape != test_in_reshape)[0].shape[0]
percent_perturb = float(nb_changed) / adv_x.reshape(-1).shape[0]
# Display the original and adversarial images side-by-side
if viz_enabled:
figure = pair_visual(
np.reshape(sample, (img_rows, img_cols)),
np.reshape(adv_x, (img_rows, img_cols)), figure)
# Add our adversarial example to our grid data
grid_viz_data[target, current_class, :, :, :] = np.reshape(
adv_x, (img_rows, img_cols, channels))
# Update the arrays for later analysis
results[target, sample_ind] = res
perturbations[target, sample_ind] = percent_perturb
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
nb_targets_tried = ((nb_classes - 1) * source_samples)
succ_rate = float(np.sum(results)) / nb_targets_tried
print('Avg. rate of successful adv. examples {0:.4f}'.format(succ_rate))
report.clean_train_adv_eval = 1. - succ_rate
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(perturbations)
print('Avg. rate of perturbed features {0:.4f}'.format(percent_perturbed))
# Compute the average distortion introduced for successful samples only
percent_perturb_succ = np.mean(perturbations * (results == 1))
print('Avg. rate of perturbed features for successful '
'adversarial examples {0:.4f}'.format(percent_perturb_succ))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
import matplotlib.pyplot as plt
plt.close(figure)
_ = grid_visual(grid_viz_data)
return report
def main(argv=None):
mnist_tutorial_jsma(viz_enabled=FLAGS.viz_enabled,
nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
nb_classes=FLAGS.nb_classes,
source_samples=FLAGS.source_samples,
learning_rate=FLAGS.learning_rate)
if __name__ == '__main__':
flags.DEFINE_boolean('viz_enabled', True, 'Visualize adversarial ex.')
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
flags.DEFINE_integer('source_samples', 10, 'Nb of test inputs to attack')
flags.DEFINE_float('learning_rate', 0.1, 'Learning rate for training')
app.run()
| 42.256881
| 79
| 0.640252
|
3299c04c4ae554f3701d7b1ac46db7238a44ac28
| 4,021
|
py
|
Python
|
DarcyFrictionFactor.py
|
DrTol/pressure_loss_calculator-Python
|
44f2c2adce161ef663fd6774d1f1c188b4c7e5c3
|
[
"MIT"
] | 2
|
2020-07-16T01:13:05.000Z
|
2021-03-17T06:45:48.000Z
|
DarcyFrictionFactor.py
|
DrTol/pressure_loss_calculator-Python
|
44f2c2adce161ef663fd6774d1f1c188b4c7e5c3
|
[
"MIT"
] | null | null | null |
DarcyFrictionFactor.py
|
DrTol/pressure_loss_calculator-Python
|
44f2c2adce161ef663fd6774d1f1c188b4c7e5c3
|
[
"MIT"
] | 3
|
2020-12-13T15:08:28.000Z
|
2022-01-11T10:48:55.000Z
|
## returns the Darcy-Weisbach friction factor for pressure loss calculations
# prepared by Hakan İbrahim Tol, PhD on 02/07/2019
import math
import warnings
# via Clamond algorithm _ Copied,Modified,Pasted!
def f_Clamond(R,K=0):
# DWc = COLEBROOK(R,K) fast, accurate and robust computation of the
# Darcy-Weisbach friction factor F according to the Colebrook equation:
# - -
# 1 | K 2.51 |
# --------- = -2 * Log_10 | ----- + ------------- |
# sqrt(DWc) | 3.7 R * sqrt(DWc) |
# - -
# INPUT:
# R : Reynolds' number (should be >= 2300).
# K : Equivalent sand roughness height divided by the hydraulic
# diameter (default K=0).
#
# OUTPUT:
# DWc : Darcy Weisbach Friction factor.
#
# FORMAT:
# R, K and DWc are ONLY scalars in this Python Module
#
# ACCURACY:
# Around machine precision forall R > 3 and forall 0 <= K,
# i.e. forall values of physical interest.
#
# EXAMPLE: DWc = f_Clamond(7e5,0.01)
#
# Method: Quartic iterations.
# Reference: http://arxiv.org/abs/0810.5564
# Read this reference to understand the method and to modify the code.
# Author: D. Clamond, 2008-09-16.
# Modified for Python by Hakan İbrahim Tol, PhD, 2019-07-02
# Check for errors.
if R<2300:
warnings.warn('The Colebrook equation is valid for Reynolds'' numbers >= 2300.')
if K<0:
warnings.warn('The relative sand roughness must be non-negative.')
# Initialization.
X1 = K * R * 0.123968186335417556 # X1 <- K * R * log(10) / 18.574.
X2 = math.log(R) - 0.779397488455682028 # X2 <- log( R * log(10) / 5.02
# Initial guess.
DWc = X2 - 0.2
# First iteration.
E = ( math.log(X1+DWc) - 0.2 ) / ( 1 + X1 + DWc )
DWc = DWc - (1+X1+DWc+0.5*E) * E *(X1+DWc) / (1+X1+DWc+E*(1+E/3))
# Second iteration (remove the next two lines for moderate accuracy).
E = ( math.log(X1+DWc) + DWc - X2 ) / ( 1 + X1 + DWc )
DWc = DWc - (1+X1+DWc+0.5*E) * E *(X1+DWc) / (1+X1+DWc+E*(1+E/3))
# Finalized solution.
DWc = 1.151292546497022842 / DWc # DWc <- 0.5 * log(10) / DWc;
DWc = DWc * DWc # DWc <- Friction factor.
return DWc
# via solving implicit Colebrook-White equation
def f_ColebrookWhite(D,Re,aRou,fTol=0.001,MaxIter=2000):
# INPUTS
# D : Inner diameter of the pipe [mm]
# Re : Reynolds Number [-]
# aRou : Absolute roughness of pipe [mm]
# fTol : Iteration termination tolerance
# MaxIter : Maximum limit (iteration)
# Initializing the Iteration
error=10; # Iteration error
IterNum=0; # Iteration steps number
x0=f_SwameeJain(D,Re,aRou) # Initial estimate
# Fasten your seat belts - Iteration starts
while error>fTol and IterNum<MaxIter:
x1 = (2*math.log10((aRou/D)/3.7+2.51/(Re*math.sqrt(x0))))**(-2)
error=abs(x1-x0)
IterNum=IterNum+1
x0=x1
return x1
# via solving explicit equation by Swamee PK & Jain AK
def f_SwameeJain(D,Re,aRou):
# INPUTS
# D : Inner diameter of the pipe [mm]
# Re : Reynolds Number [-]
# aRou : Absolute roughness of pipe [mm]
# Checking limitations
if Re<5000 or Re>1e8:
warnings.warn('Swamee&Jain algorithm is valid for a Reynold range as in 5000<Re<1e8')
if aRou/D<1e-6 or aRou/D>0.05:
warnings.warn('Swamee&Jain algorithm is valid for a relative roughness range as in 1e-6<eps/D<0.05')
# Calculation
f=0.25/(math.log10((aRou/D)/3.7+5.74/Re**0.9))**2
return f
| 35.901786
| 109
| 0.536931
|
f64e5eddbbf9dbb75c4cad1f1fbdbcbccc9cdfe8
| 1,477
|
py
|
Python
|
app/backend/visualization/helpers/get_and_process_image.py
|
admariner/social-media-profiler
|
2001167201fc9602fef3070ee9d31f005978bfe8
|
[
"MIT"
] | 34
|
2020-12-14T15:48:26.000Z
|
2022-02-27T14:24:29.000Z
|
app/backend/visualization/helpers/get_and_process_image.py
|
pandrey2003/social-media-profiler
|
4160e318997d161d63b8233511a65669542da026
|
[
"MIT"
] | 1
|
2021-12-15T02:37:32.000Z
|
2021-12-15T02:37:32.000Z
|
app/backend/visualization/helpers/get_and_process_image.py
|
admariner/social-media-profiler
|
2001167201fc9602fef3070ee9d31f005978bfe8
|
[
"MIT"
] | 6
|
2021-02-11T16:29:04.000Z
|
2022-03-23T11:42:32.000Z
|
# -*- coding: utf-8 -*-
"""The module to obtain an image by the URL and process it."""
import io
from urllib.request import urlopen
from PIL import Image, ImageDraw
import numpy as np
def get_and_process_image(url: str) -> io.BytesIO:
"""
Get the image by the URL, then crop it into a circle using numpy and Pillow, \
and return cropped version of the image.
Args:
`url`: the URL to the image we need to get and process.
Returns:
`io.BytesIO`: the io.BytesIO object that contains processed image.
"""
img = io.BytesIO(urlopen(url).read())
pillow_img = Image.open(img).convert("RGB")
pillow_img = pillow_img.resize(size=(400, 400))
np_image = np.array(pillow_img)
height, width = pillow_img.size
# Create same size alpha layer with circle
alpha = Image.new("L", pillow_img.size, 0)
draw = ImageDraw.Draw(alpha)
draw.pieslice([0, 0, height, width], 0, 360, fill=255)
# Convert alpha Image to numpy array
np_alpha = np.array(alpha)
# Add alpha layer to RGB
np_image = np.dstack((np_image, np_alpha))
# Save with alpha
cropped_image_in_bytes = io.BytesIO()
cropped_image = Image.fromarray(np_image)
cropped_image.save(cropped_image_in_bytes, "PNG")
cropped_image_in_bytes.seek(0)
cropped_image = cropped_image_in_bytes.read()
# Return io.BytesIO image with all done manipulations
data_bytes_io = io.BytesIO(cropped_image)
return data_bytes_io
| 31.425532
| 82
| 0.689235
|
aff0228f47873aed5b62f0642ef7ba7139775e7b
| 4,644
|
py
|
Python
|
tests/manage/pv_services/test_pvc_delete_verify_size_is_returned_to_backendpool.py
|
vikasmulaje/ocs-ci
|
98ce950150e061ba872c62f2d55f9bd395241a6e
|
[
"MIT"
] | null | null | null |
tests/manage/pv_services/test_pvc_delete_verify_size_is_returned_to_backendpool.py
|
vikasmulaje/ocs-ci
|
98ce950150e061ba872c62f2d55f9bd395241a6e
|
[
"MIT"
] | null | null | null |
tests/manage/pv_services/test_pvc_delete_verify_size_is_returned_to_backendpool.py
|
vikasmulaje/ocs-ci
|
98ce950150e061ba872c62f2d55f9bd395241a6e
|
[
"MIT"
] | null | null | null |
"""
A test case to verify after deleting pvc whether
size is returned to backend pool
"""
import logging
import pytest
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from tests import helpers
from ocs_ci.framework.testlib import tier1, ManageTest
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.resources import pod
from ocs_ci.ocs import ocp
from tests.fixtures import (
create_rbd_storageclass, create_ceph_block_pool,
create_rbd_secret
)
from ocs_ci.ocs.cluster import CephCluster
logger = logging.getLogger(__name__)
_templating = templating.Templating()
PV = ocp.OCP(
kind='PersistentVolume', namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
@retry(UnexpectedBehaviour, tries=5, delay=3, backoff=1)
def verify_pv_not_exists(pvc_obj, cbp_name, rbd_image_id):
"""
Ensure that pv does not exists
"""
# Validate on ceph side
logger.info(f"Verifying PV {pvc_obj.backed_pv} exists on backend")
status = helpers.verify_volume_deleted_in_backend(
interface=constants.CEPHBLOCKPOOL, image_uuid=rbd_image_id,
pool_name=cbp_name
)
if not status:
raise UnexpectedBehaviour(f"PV {pvc_obj.backed_pv} exists on backend")
logger.info(
f"Expected: PV {pvc_obj.backed_pv} "
f"doesn't exist on backend after deleting PVC"
)
# Validate on oc side
logger.info("Verifying whether PV is deleted")
try:
assert helpers.validate_pv_delete(pvc_obj.backed_pv)
except AssertionError as ecf:
assert "not found" in str(ecf), (
f"Unexpected: PV {pvc_obj.backed_pv} still exists"
)
logger.info(
f"Expected: PV should not be found "
f"after deleting corresponding PVC"
)
def create_pvc_and_verify_pvc_exists(sc_name, cbp_name):
"""
Create pvc, verify pvc is bound in state and
pvc exists on ceph side
"""
pvc_obj = helpers.create_pvc(sc_name=sc_name, size='10Gi')
helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
pvc_obj.reload()
# Validate pv is created on ceph
logger.info(f"Verifying PV exists on backend")
assert not helpers.verify_volume_deleted_in_backend(
interface=constants.CEPHBLOCKPOOL, image_uuid=pvc_obj.image_uuid,
pool_name=cbp_name
)
return pvc_obj
@pytest.mark.usefixtures(
create_rbd_secret.__name__,
create_ceph_block_pool.__name__,
create_rbd_storageclass.__name__
)
@pytest.mark.polarion_id("OCS-372")
class TestPVCDeleteAndVerifySizeIsReturnedToBackendPool(ManageTest):
"""
Testing after pvc deletion the size is returned to backendpool
"""
@tier1
def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(self):
"""
Test case to verify after delete pvc size returned to backend pools
"""
failed_to_delete = []
ceph_obj1 = CephCluster()
used_before_creating_pvc = ceph_obj1.check_ceph_pool_used_space(cbp_name=self.cbp_obj.name)
logger.info(f"Used before creating PVC {used_before_creating_pvc}")
pvc_obj = create_pvc_and_verify_pvc_exists(
self.sc_obj.name, self.cbp_obj.name
)
pod_obj = helpers.create_pod(
interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc_obj.name
)
helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
pod_obj.reload()
pod.run_io_and_verify_mount_point(pod_obj, bs='10M', count='300')
used_after_creating_pvc = ceph_obj1.check_ceph_pool_used_space(cbp_name=self.cbp_obj.name)
logger.info(f"Used after creating PVC {used_after_creating_pvc}")
assert used_before_creating_pvc < used_after_creating_pvc
rbd_image_id = pvc_obj.image_uuid
for resource in pod_obj, pvc_obj:
resource.delete()
try:
resource.ocp.wait_for_delete(resource)
except TimeoutError:
failed_to_delete.append(resource)
if failed_to_delete:
raise UnexpectedBehaviour(
f"Failed to delete resources: {failed_to_delete}"
)
verify_pv_not_exists(pvc_obj, self.cbp_obj.name, rbd_image_id)
ceph_obj2 = CephCluster()
used_after_deleting_pvc = ceph_obj2.check_ceph_pool_used_space(cbp_name=self.cbp_obj.name)
logger.info(f"Used after deleting PVC {used_after_deleting_pvc}")
assert used_after_deleting_pvc < used_after_creating_pvc
assert (abs(
used_after_deleting_pvc - used_before_creating_pvc) < 0.5
)
| 34.4
| 99
| 0.711886
|
49a8665aa056b55e2eaca11f08cd7bd3b2173199
| 1,822
|
py
|
Python
|
rusentrel/rusentrel_ds/ctx/cnn.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | null | null | null |
rusentrel/rusentrel_ds/ctx/cnn.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | 1
|
2020-12-16T18:21:11.000Z
|
2020-12-30T10:08:27.000Z
|
rusentrel/rusentrel_ds/ctx/cnn.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | 1
|
2021-03-29T20:58:26.000Z
|
2021-03-29T20:58:26.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../')
from io_utils import RuSentRelBasedExperimentsIOUtils
from arekit.common.evaluation.evaluators.two_class import TwoClassEvaluator
from arekit.contrib.experiments.single.model import SingleInstanceTensorflowModel
from arekit.contrib.experiments.nn_io.rusentrel_with_ruattitudes import RuSentRelWithRuAttitudesBasedExperimentIO
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.context.architectures.cnn import VanillaCNN
from rusentrel.ctx_names import ModelNames
from arekit.contrib.experiments.engine import run_testing
from arekit.contrib.experiments.callback import CustomCallback
from rusentrel.rusentrel_ds.common import DS_NAME_PREFIX, \
ds_ctx_common_config_settings, \
ds_common_callback_modification_func
from rusentrel.classic.ctx.cnn import ctx_cnn_custom_config
def run_testing_cnn(
name_prefix=DS_NAME_PREFIX,
cv_count=1,
common_callback_func=ds_common_callback_modification_func):
run_testing(full_model_name=name_prefix + ModelNames().CNN,
create_network=VanillaCNN,
create_config=CNNConfig,
create_nn_io=RuSentRelWithRuAttitudesBasedExperimentIO,
create_model=SingleInstanceTensorflowModel,
evaluator_class=TwoClassEvaluator,
create_callback=CustomCallback,
cv_count=cv_count,
experiments_io=RuSentRelBasedExperimentsIOUtils(),
common_config_modification_func=ds_ctx_common_config_settings,
common_callback_modification_func=common_callback_func,
custom_config_modification_func=ctx_cnn_custom_config)
if __name__ == "__main__":
run_testing_cnn()
| 40.488889
| 113
| 0.770033
|
d6e41c67f36a6e0a2afcd907bc30cf397bbaf10f
| 13,324
|
py
|
Python
|
reagent/reporting/training_reporter.py
|
alexnikulkov/ReAgent
|
e404c5772ea4118105c2eb136ca96ad5ca8e01db
|
[
"BSD-3-Clause"
] | null | null | null |
reagent/reporting/training_reporter.py
|
alexnikulkov/ReAgent
|
e404c5772ea4118105c2eb136ca96ad5ca8e01db
|
[
"BSD-3-Clause"
] | null | null | null |
reagent/reporting/training_reporter.py
|
alexnikulkov/ReAgent
|
e404c5772ea4118105c2eb136ca96ad5ca8e01db
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from collections import deque
from typing import Deque, List, NamedTuple, Optional
import numpy as np
import torch
from reagent.tensorboardX import SummaryWriterContext
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
LOSS_REPORT_INTERVAL = 100
class BatchStats(NamedTuple):
td_loss: Optional[torch.Tensor] = None
reward_loss: Optional[torch.Tensor] = None
imitator_loss: Optional[torch.Tensor] = None
logged_actions: Optional[torch.Tensor] = None
logged_propensities: Optional[torch.Tensor] = None
logged_rewards: Optional[torch.Tensor] = None
logged_values: Optional[torch.Tensor] = None
model_propensities: Optional[torch.Tensor] = None
model_rewards: Optional[torch.Tensor] = None
model_values: Optional[torch.Tensor] = None
model_values_on_logged_actions: Optional[torch.Tensor] = None
model_action_idxs: Optional[torch.Tensor] = None
def write_summary(self, actions: List[str]):
if actions:
for field, log_key in [
("logged_actions", "actions/logged"),
("model_action_idxs", "actions/model"),
]:
val = getattr(self, field)
if val is None:
continue
for i, action in enumerate(actions):
# pyre-fixme[16]: `SummaryWriterContext` has no attribute
# `add_scalar`.
SummaryWriterContext.add_scalar(
"{}/{}".format(log_key, action), (val == i).sum().item()
)
for field, log_key in [
("td_loss", "td_loss"),
("imitator_loss", "imitator_loss"),
("reward_loss", "reward_loss"),
("logged_propensities", "propensities/logged"),
("logged_rewards", "reward/logged"),
("logged_values", "value/logged"),
("model_values_on_logged_actions", "value/model_logged_action"),
]:
val = getattr(self, field)
if val is None:
continue
assert len(val.shape) == 1 or (
len(val.shape) == 2 and val.shape[1] == 1
), "Unexpected shape for {}: {}".format(field, val.shape)
self._log_histogram_and_mean(log_key, val)
for field, log_key in [
("model_propensities", "propensities/model"),
("model_rewards", "reward/model"),
("model_values", "value/model"),
]:
val = getattr(self, field)
if val is None:
continue
if (
len(val.shape) == 1 or (len(val.shape) == 2 and val.shape[1] == 1)
) and not actions:
self._log_histogram_and_mean(log_key, val)
elif len(val.shape) == 2 and val.shape[1] == len(actions):
for i, action in enumerate(actions):
self._log_histogram_and_mean(f"{log_key}/{action}", val[:, i])
else:
raise ValueError(
"Unexpected shape for {}: {}; actions: {}".format(
field, val.shape, actions
)
)
def _log_histogram_and_mean(self, log_key, val):
try:
SummaryWriterContext.add_histogram(log_key, val)
SummaryWriterContext.add_scalar(f"{log_key}/mean", val.mean())
except ValueError:
logger.warning(
f"Cannot create histogram for key: {log_key}; "
"this is likely because you have NULL value in your input; "
f"value: {val}"
)
raise
@staticmethod
def add_custom_scalars(action_names: Optional[List[str]]):
if not action_names:
return
SummaryWriterContext.add_custom_scalars_multilinechart(
[
"propensities/model/{}/mean".format(action_name)
for action_name in action_names
],
category="propensities",
title="model",
)
SummaryWriterContext.add_custom_scalars_multilinechart(
[
"propensities/logged/{}/mean".format(action_name)
for action_name in action_names
],
category="propensities",
title="logged",
)
SummaryWriterContext.add_custom_scalars_multilinechart(
["actions/logged/{}".format(action_name) for action_name in action_names],
category="actions",
title="logged",
)
SummaryWriterContext.add_custom_scalars_multilinechart(
["actions/model/{}".format(action_name) for action_name in action_names],
category="actions",
title="model",
)
def merge_tensor_namedtuple_list(l, cls):
def merge_tensor(f):
vals = [getattr(e, f) for e in l]
not_none_vals = [v for v in vals if v is not None]
assert len(not_none_vals) == 0 or len(not_none_vals) == len(vals)
if not not_none_vals:
return None
return torch.cat(not_none_vals, dim=0)
return cls(**{f: merge_tensor(f) for f in cls._fields})
class StatsByAction(object):
def __init__(self, actions):
self.stats = {action: [] for action in actions}
def append(self, stats):
for k in stats:
assert k in self.stats
for k in self.stats:
v = stats.get(k, 0)
if isinstance(v, torch.Tensor):
v = v.item()
self.stats[k].append(v)
def items(self):
return self.stats.items()
def __len__(self):
return len(self.stats)
class NoOpTrainingReporter:
def report(self, **kwargs):
pass
def flush(self):
pass
class TrainingReporter(object):
RECENT_WINDOW_SIZE = 100
def __init__(self, action_names: Optional[List[str]] = None):
assert action_names is None or len(action_names) > 0
self.action_names: List[str] = action_names or []
self.loss_report_interval = LOSS_REPORT_INTERVAL
BatchStats.add_custom_scalars(action_names)
self.clear()
def clear(self):
self.running_reward: Deque[float] = deque(maxlen=int(1e6))
self.td_loss: List[float] = []
self.reward_loss: List[float] = []
self.imitator_loss: List[float] = []
self.logged_action_q_value: List[float] = []
self.logged_action_counts = {action: 0 for action in self.action_names}
self.model_values = StatsByAction(self.action_names)
self.model_value_stds = StatsByAction(self.action_names)
self.model_action_counts = StatsByAction(self.action_names)
self.model_action_counts_cumulative = {
action: 0 for action in self.action_names
}
self.model_action_distr = StatsByAction(self.action_names)
self.incoming_stats: List[BatchStats] = []
@property
def num_batches(self):
return len(self.td_loss)
def report(self, **kwargs):
def _to_tensor(v):
if v is None:
return None
if not isinstance(v, torch.Tensor):
v = torch.tensor(v)
if len(v.shape) == 0:
v = v.reshape(1)
return v.detach().cpu()
kwargs = {k: _to_tensor(v) for k, v in kwargs.items()}
batch_stats = BatchStats(**kwargs)
self.incoming_stats.append(batch_stats)
if len(self.incoming_stats) >= self.loss_report_interval:
self.flush()
@torch.no_grad()
def flush(self):
if not len(self.incoming_stats):
logger.info("Nothing to report")
return
logger.info("Loss on {} batches".format(len(self.incoming_stats)))
batch_stats = merge_tensor_namedtuple_list(self.incoming_stats, BatchStats)
batch_stats.write_summary(self.action_names)
print_details = "Loss:\n"
td_loss_mean = float(batch_stats.td_loss.mean())
self.td_loss.append(td_loss_mean)
print_details = print_details + "TD LOSS: {0:.3f}\n".format(td_loss_mean)
if batch_stats.logged_rewards is not None:
flattened_rewards = torch.flatten(batch_stats.logged_rewards).tolist()
self.running_reward.extend(flattened_rewards)
if batch_stats.reward_loss is not None:
reward_loss_mean = float(batch_stats.reward_loss.mean())
self.reward_loss.append(reward_loss_mean)
print_details = print_details + "REWARD LOSS: {0:.3f}\n".format(
reward_loss_mean
)
if batch_stats.imitator_loss is not None:
imitator_loss_mean = float(batch_stats.imitator_loss.mean())
self.imitator_loss.append(imitator_loss_mean)
print_details = print_details + "IMITATOR LOSS: {0:.3f}\n".format(
imitator_loss_mean
)
if batch_stats.model_values is not None and self.action_names:
self.model_values.append(
dict(zip(self.action_names, batch_stats.model_values.mean(dim=0)))
)
self.model_value_stds.append(
dict(zip(self.action_names, batch_stats.model_values.std(dim=0)))
)
if batch_stats.model_values_on_logged_actions is not None:
self.logged_action_q_value.append(
batch_stats.model_values_on_logged_actions.mean().item()
)
if (
batch_stats.logged_actions is not None
and batch_stats.model_action_idxs is not None
):
logged_action_counts = {
action: (batch_stats.logged_actions == i).sum().item()
for i, action in enumerate(self.action_names)
}
model_action_counts = {
action: (batch_stats.model_action_idxs == i).sum().item()
for i, action in enumerate(self.action_names)
}
print_details += "The distribution of logged actions : {}\n".format(
logged_action_counts
)
print_details += "The distribution of model actions : {}\n".format(
model_action_counts
)
for action, count in logged_action_counts.items():
self.logged_action_counts[action] += count
self.model_action_counts.append(model_action_counts)
for action, count in model_action_counts.items():
self.model_action_counts_cumulative[action] += count
total = float(sum(model_action_counts.values()))
self.model_action_distr.append(
{action: count / total for action, count in model_action_counts.items()}
)
print_details += "Batch Evaluator Finished"
for print_detail in print_details.split("\n"):
logger.info(print_detail)
self.incoming_stats.clear()
def get_td_loss_after_n(self, n):
return self.td_loss[n:]
def get_recent_td_loss(self):
return TrainingReporter.calculate_recent_window_average(
self.td_loss, TrainingReporter.RECENT_WINDOW_SIZE, num_entries=1
)
def get_recent_reward_loss(self):
return TrainingReporter.calculate_recent_window_average(
self.reward_loss, TrainingReporter.RECENT_WINDOW_SIZE, num_entries=1
)
def get_recent_imitator_loss(self):
return TrainingReporter.calculate_recent_window_average(
self.imitator_loss, TrainingReporter.RECENT_WINDOW_SIZE, num_entries=1
)
def get_logged_action_distribution(self):
total_actions = 1.0 * sum(self.logged_action_counts.values())
return {k: (v / total_actions) for k, v in self.logged_action_counts.items()}
def get_model_action_distribution(self):
total_actions = 1.0 * sum(self.model_action_counts_cumulative.values())
return {
k: (v / total_actions)
for k, v in self.model_action_counts_cumulative.items()
}
def get_recent_rewards(self):
return self.running_reward
def log_to_tensorboard(self, epoch: int) -> None:
def none_to_zero(x: Optional[float]) -> float:
if x is None or math.isnan(x):
return 0.0
return x
for name, value in [
("Training/td_loss", self.get_recent_td_loss()),
("Training/reward_loss", self.get_recent_reward_loss()),
("Training/imitator_loss", self.get_recent_imitator_loss()),
]:
# pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`.
SummaryWriterContext.add_scalar(name, none_to_zero(value), epoch)
@staticmethod
def calculate_recent_window_average(arr, window_size, num_entries):
if len(arr) > 0:
begin = max(0, len(arr) - window_size)
return np.mean(np.array(arr[begin:]), axis=0)
else:
logger.error("Not enough samples for evaluation.")
if num_entries == 1:
return float("nan")
else:
return [float("nan")] * num_entries
| 36.604396
| 88
| 0.599745
|
2dc97e5543c5f8cea310db78d2fd7123fe944862
| 6,343
|
py
|
Python
|
biothings/web/connections.py
|
newgene/biothings.api
|
e3278695ac15a55fe420aa49c464946f81ec019d
|
[
"Apache-2.0"
] | 30
|
2017-07-23T14:50:29.000Z
|
2022-02-08T08:08:16.000Z
|
biothings/web/connections.py
|
kevinxin90/biothings.api
|
8ff3bbaecd72d04db4933ff944898ee7b7c0e04a
|
[
"Apache-2.0"
] | 163
|
2017-10-24T18:45:40.000Z
|
2022-03-28T03:46:26.000Z
|
biothings/web/connections.py
|
newgene/biothings.api
|
e3278695ac15a55fe420aa49c464946f81ec019d
|
[
"Apache-2.0"
] | 22
|
2017-06-12T18:30:15.000Z
|
2022-03-01T18:10:47.000Z
|
import hashlib
import logging
import os
import pickle
from functools import partial
import boto3
import elasticsearch
import elasticsearch_dsl
import requests
from biothings.utils.common import run_once
from elasticsearch import AIOHttpConnection
from elasticsearch import RequestsHttpConnection as _Conn
from requests_aws4auth import AWS4Auth
from tornado.ioloop import IOLoop
logger = logging.getLogger(__name__)
_should_log = run_once()
def _log_pkg():
es_ver = elasticsearch.__version__
es_dsl_ver = elasticsearch_dsl.__version__
logger.info("Elasticsearch Package Version: %s",
'.'.join(map(str, es_ver)))
logger.info("Elasticsearch DSL Package Version: %s",
'.'.join(map(str, es_dsl_ver)))
def _log_db(client, uri):
logger.info(client)
def _log_es(client, hosts):
_log_db(client, hosts)
# only perform health check with the async client
# so that it doesn't slow down program start time
if isinstance(client, elasticsearch.AsyncElasticsearch):
async def log_cluster(async_client):
cluster = await async_client.info()
# not specifying timeout in the function above because
# there could be a number of es tasks scheduled before
# this call and would take the cluster a while to respond
if _should_log():
_log_pkg()
cluster_name = cluster['cluster_name']
version = cluster['version']['number']
logger.info('%s: %s %s', hosts, cluster_name, version)
IOLoop.current().add_callback(log_cluster, client)
# ------------------------
# Low Level Functions
# ------------------------
class _AsyncConn(AIOHttpConnection):
def __init__(self, *args, **kwargs):
self.aws_auth = None
if isinstance(kwargs.get('http_auth'), AWS4Auth):
self.aws_auth = kwargs['http_auth']
kwargs['http_auth'] = None
super().__init__(*args, **kwargs)
async def perform_request(
self, method, url, params=None, body=None,
timeout=None, ignore=(), headers=None
):
req = requests.PreparedRequest()
req.prepare(method, self.host + url, headers, None, body, params)
self.aws_auth(req) # sign the request
headers.update(req.headers)
return await super().perform_request(
method, url, params, body,
timeout, ignore, headers
)
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
AWS_META_URL = "http://169.254.169.254/latest/dynamic/instance-identity/document"
def get_es_client(hosts=None, async_=False, **settings):
""" Enhanced ES client initialization.
Additionally support these parameters:
async_: use AsyncElasticserach instead of Elasticsearch.
aws: setup request signing and provide reasonable ES settings
to access AWS OpenSearch, by default assuming it is on HTTPS.
sniff: provide resonable default settings to enable client-side
LB to an ES cluster. this param itself is not an ES param.
"""
if settings.pop('aws', False):
# find region
session = boto3.Session()
region = session.region_name
if not region: # not in ~/.aws/config
region = os.environ.get("AWS_REGION")
if not region: # not in environment variable
try: # assume same-region service access
res = requests.get(AWS_META_URL)
region = res.json()["region"]
except: # not running in VPC
region = "us-west-2" # default
# find credentials
credentials = session.get_credentials()
awsauth = AWS4Auth(
refreshable_credentials=credentials,
region=region, service='es'
)
_cc = _AsyncConn if async_ else _Conn
settings.update(http_auth=awsauth, connection_class=_cc)
settings.setdefault('use_ssl', True)
settings.setdefault('verify_certs', True)
# not evaluated when 'aws' flag is set because
# AWS OpenSearch is internally load-balanced
# and does not support client-side sniffing.
elif settings.pop('sniff', False):
settings.setdefault('sniff_on_start', True)
settings.setdefault('sniff_on_connection_fail', True)
settings.setdefault('sniffer_timeout', 60)
if async_:
from elasticsearch import AsyncElasticsearch
client = AsyncElasticsearch
else:
from elasticsearch import Elasticsearch
client = Elasticsearch
return client(hosts, **settings)
def get_sql_client(uri, **settings):
from sqlalchemy import create_engine
return create_engine(uri, **settings).connect()
def get_mongo_client(uri, **settings):
from pymongo import MongoClient
return MongoClient(uri, **settings).get_default_database()
def _not_implemented_client():
raise NotImplementedError()
# ------------------------
# High Level Utilities
# ------------------------
class _ClientPool:
def __init__(self, client_factory, async_factory, callback=None):
self._client_factory = client_factory
self._clients = {}
self._async_client_factory = async_factory
self._async_clients = {}
self.callback = callback or _log_db
@staticmethod
def hash(config):
_config = pickle.dumps(config)
_hash = hashlib.md5(_config)
return _hash.hexdigest()
def _get_client(self, repo, factory, uri, settings):
hash = self.hash((uri, settings))
if hash in repo:
return repo[hash]
repo[hash] = factory(uri, **settings)
self.callback(repo[hash], uri)
return repo[hash]
def get_client(self, uri, **settings):
return self._get_client(
self._clients,
self._client_factory,
uri, settings
)
def get_async_client(self, uri, **settings):
return self._get_client(
self._async_clients,
self._async_client_factory,
uri, settings
)
es = _ClientPool(get_es_client, partial(get_es_client, async_=True), _log_es)
sql = _ClientPool(get_sql_client, _not_implemented_client)
mongo = _ClientPool(get_mongo_client, _not_implemented_client)
| 31.246305
| 86
| 0.650166
|
b874d68887aff283f08bcbe962daafa1810614e8
| 154
|
py
|
Python
|
Common/Measures/Portfolio/Timely/PortfolioAnnually.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 2
|
2020-03-04T11:18:38.000Z
|
2020-05-10T15:36:42.000Z
|
Common/Measures/Portfolio/Timely/PortfolioAnnually.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 6
|
2020-03-30T16:42:47.000Z
|
2021-12-13T20:37:21.000Z
|
Common/Measures/Portfolio/Timely/PortfolioAnnually.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 1
|
2020-04-14T11:26:16.000Z
|
2020-04-14T11:26:16.000Z
|
from Common.Measures.Portfolio.Timely.AbstractPortfolioTimely import AbstractPortfolioTimely
class PortfolioAnnually(AbstractPortfolioTimely):
pass
| 25.666667
| 92
| 0.87013
|
f68e9f54641d80b7467dc53d58a8ef02ecdc7921
| 12,946
|
py
|
Python
|
skl2onnx/operator_converters/sgd_classifier.py
|
xiaowuhu/sklearn-onnx
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
[
"Apache-2.0"
] | null | null | null |
skl2onnx/operator_converters/sgd_classifier.py
|
xiaowuhu/sklearn-onnx
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
[
"Apache-2.0"
] | null | null | null |
skl2onnx/operator_converters/sgd_classifier.py
|
xiaowuhu/sklearn-onnx
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from ..common._apply_operation import (
apply_add, apply_cast, apply_clip, apply_concat, apply_div, apply_exp,
apply_identity, apply_mul, apply_reciprocal, apply_reshape, apply_sub)
from ..common.data_types import (
BooleanTensorType, Int64TensorType, guess_numpy_type,
guess_proto_type)
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
from ..common.utils_classifier import get_label_classes
from ..proto import onnx_proto
def _decision_function(scope, operator, container, model, proto_type):
"""Predict for linear model.
score = X * coefficient + intercept
"""
coef_name = scope.get_unique_variable_name('coef')
intercept_name = scope.get_unique_variable_name('intercept')
matmul_result_name = scope.get_unique_variable_name(
'matmul_result')
score_name = scope.get_unique_variable_name('score')
coef = model.coef_.T
container.add_initializer(coef_name, proto_type,
coef.shape, coef.ravel())
container.add_initializer(intercept_name, proto_type,
model.intercept_.shape, model.intercept_)
input_name = operator.inputs[0].full_name
if type(operator.inputs[0].type) in (BooleanTensorType, Int64TensorType):
cast_input_name = scope.get_unique_variable_name('cast_input')
apply_cast(scope, operator.input_full_names, cast_input_name,
container, to=proto_type)
input_name = cast_input_name
container.add_node(
'MatMul', [input_name, coef_name],
matmul_result_name,
name=scope.get_unique_operator_name('MatMul'))
apply_add(scope, [matmul_result_name, intercept_name],
score_name, container, broadcast=0)
return score_name
def _handle_zeros(scope, container, scores, proba, reduced_proba, num_classes,
proto_type):
"""Handle cases where reduced_proba values are zeros to avoid NaNs in
class probability scores because of divide by 0 when we calculate
proba / reduced_proba in _normalise_proba().
This is done by replacing reduced_proba values of 0s with
num_classes and corresponding proba values with 1.
"""
num_classes_name = scope.get_unique_variable_name('num_classes')
bool_reduced_proba_name = scope.get_unique_variable_name(
'bool_reduced_proba')
bool_not_reduced_proba_name = scope.get_unique_variable_name(
'bool_not_reduced_proba')
not_reduced_proba_name = scope.get_unique_variable_name(
'not_reduced_proba')
proba_updated_name = scope.get_unique_variable_name('proba_updated')
mask_name = scope.get_unique_variable_name('mask')
reduced_proba_updated_name = scope.get_unique_variable_name(
'reduced_proba_updated')
container.add_initializer(num_classes_name, proto_type,
[], [num_classes])
apply_cast(scope, reduced_proba, bool_reduced_proba_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', bool_reduced_proba_name,
bool_not_reduced_proba_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, bool_not_reduced_proba_name, not_reduced_proba_name,
container, to=proto_type)
apply_add(scope, [proba, not_reduced_proba_name],
proba_updated_name, container, broadcast=1)
apply_mul(scope, [not_reduced_proba_name, num_classes_name],
mask_name, container, broadcast=1)
apply_add(scope, [reduced_proba, mask_name],
reduced_proba_updated_name, container, broadcast=0)
return proba_updated_name, reduced_proba_updated_name
def _normalise_proba(scope, operator, container, scores, proba, num_classes,
unity_name, proto_type):
reduced_proba_name = scope.get_unique_variable_name('reduced_proba')
sub_result_name = scope.get_unique_variable_name('sub_result')
if num_classes == 2:
apply_sub(scope, [unity_name, proba],
sub_result_name, container, broadcast=1)
apply_concat(scope, [sub_result_name, proba],
operator.outputs[1].full_name, container, axis=1)
else:
if container.target_opset < 13:
container.add_node(
'ReduceSum', proba, reduced_proba_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
else:
axis_name = scope.get_unique_variable_name('axis')
container.add_initializer(
axis_name, onnx_proto.TensorProto.INT64, [1], [1])
container.add_node(
'ReduceSum', [proba, axis_name], reduced_proba_name,
name=scope.get_unique_operator_name('ReduceSum'))
proba_updated, reduced_proba_updated = _handle_zeros(
scope, container, scores, proba, reduced_proba_name, num_classes,
proto_type)
apply_div(scope, [proba_updated, reduced_proba_updated],
operator.outputs[1].full_name, container, broadcast=1)
return operator.outputs[1].full_name
def _predict_proba_log(scope, operator, container, scores, num_classes,
proto_type):
"""Probability estimation for SGDClassifier with loss=log and
Logistic Regression.
Positive class probabilities are computed as
1. / (1. + exp(-scores))
multiclass is handled by normalising that over all classes.
"""
if num_classes >= 3 or container.target_opset < 13:
negated_scores_name = scope.get_unique_variable_name('negated_scores')
negate_name = scope.get_unique_variable_name('negate')
exp_result_name = scope.get_unique_variable_name('exp_result')
unity_name = scope.get_unique_variable_name('unity')
add_result_name = scope.get_unique_variable_name('add_result')
proba_name = scope.get_unique_variable_name('proba')
container.add_initializer(negate_name, proto_type, [], [-1])
container.add_initializer(unity_name, proto_type, [], [1])
apply_mul(scope, [scores, negate_name],
negated_scores_name, container, broadcast=1)
apply_exp(scope, negated_scores_name, exp_result_name, container)
apply_add(scope, [exp_result_name, unity_name],
add_result_name, container, broadcast=1)
apply_reciprocal(scope, add_result_name, proba_name, container)
return _normalise_proba(scope, operator, container, scores, proba_name,
num_classes, unity_name, proto_type)
# Sigmoid cannot be used for num_classes > 2 because
# onnxruntime has a different implementation than numpy.
# It introduces discrepancies when x < 1e16.
# Below that threshold, Sigmoid must be replaced by Exp
# because Sigmoid is not an increasing function.
sigmo = scope.get_unique_variable_name('sigmoid')
container.add_node('Sigmoid', [scores], [sigmo],
name=scope.get_unique_operator_name('Sigmoid'))
unity_name = scope.get_unique_variable_name('unity')
container.add_initializer(unity_name, proto_type, [1], [1])
sigmo_0 = scope.get_unique_variable_name('sigmo_0')
container.add_node('Sub', [unity_name, sigmo], [sigmo_0],
name=scope.get_unique_operator_name('Sub'))
apply_concat(scope, [sigmo_0, sigmo], [operator.outputs[1].full_name],
container, axis=1)
return operator.outputs[1].full_name
def _predict_proba_modified_huber(scope, operator, container,
scores, num_classes, proto_type):
"""Probability estimation for SGDClassifier with
loss=modified_huber.
Multiclass probability estimates are derived from binary
estimates by normalisation.
Binary probability estimates are given by
(clip(scores, -1, 1) + 1) / 2.
"""
dtype = guess_numpy_type(operator.inputs[0].type)
if dtype != np.float64:
dtype = np.float32
unity_name = scope.get_unique_variable_name('unity')
constant_name = scope.get_unique_variable_name('constant')
add_result_name = scope.get_unique_variable_name('add_result')
proba_name = scope.get_unique_variable_name('proba')
clipped_scores_name = scope.get_unique_variable_name('clipped_scores')
container.add_initializer(unity_name, proto_type,
[], [1])
container.add_initializer(constant_name, proto_type,
[], [2])
apply_clip(scope, scores, clipped_scores_name, container,
max=np.array(1, dtype=dtype),
min=np.array(-1, dtype=dtype))
apply_add(scope, [clipped_scores_name, unity_name],
add_result_name, container, broadcast=1)
apply_div(scope, [add_result_name, constant_name],
proba_name, container, broadcast=1)
return _normalise_proba(scope, operator, container, scores, proba_name,
num_classes, unity_name, proto_type)
def convert_sklearn_sgd_classifier(scope: Scope, operator: Operator,
container: ModelComponentContainer):
"""Converter for SGDClassifier."""
sgd_op = operator.raw_operator
classes = get_label_classes(scope, sgd_op)
class_type = onnx_proto.TensorProto.STRING
proto_type = guess_proto_type(operator.inputs[0].type)
if proto_type != onnx_proto.TensorProto.DOUBLE:
proto_type = onnx_proto.TensorProto.FLOAT
if np.issubdtype(classes.dtype, np.floating):
class_type = onnx_proto.TensorProto.INT32
classes = classes.astype(np.int32)
elif np.issubdtype(classes.dtype, np.signedinteger):
class_type = onnx_proto.TensorProto.INT32
else:
classes = np.array([s.encode('utf-8') for s in classes])
classes_name = scope.get_unique_variable_name('classes')
predicted_label_name = scope.get_unique_variable_name(
'predicted_label')
final_label_name = scope.get_unique_variable_name('final_label')
container.add_initializer(classes_name, class_type,
classes.shape, classes)
scores = _decision_function(scope, operator, container, sgd_op, proto_type)
options = container.get_options(sgd_op, dict(raw_scores=False))
use_raw_scores = options['raw_scores']
if sgd_op.loss == 'log' and not use_raw_scores:
proba = _predict_proba_log(scope, operator, container, scores,
len(classes), proto_type)
elif sgd_op.loss == 'modified_huber' and not use_raw_scores:
proba = _predict_proba_modified_huber(
scope, operator, container, scores, len(classes),
proto_type)
else:
if len(classes) == 2:
negate_name = scope.get_unique_variable_name('negate')
negated_scores_name = scope.get_unique_variable_name(
'negated_scores')
container.add_initializer(
negate_name, proto_type, [], [-1])
apply_mul(scope, [scores, negate_name],
negated_scores_name, container, broadcast=1)
apply_concat(scope, [negated_scores_name, scores],
operator.outputs[1].full_name, container, axis=1)
else:
apply_identity(scope, scores,
operator.outputs[1].full_name, container)
proba = operator.outputs[1].full_name
container.add_node('ArgMax', proba,
predicted_label_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
container.add_node(
'ArrayFeatureExtractor', [classes_name, predicted_label_name],
final_label_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
if class_type == onnx_proto.TensorProto.INT32:
reshaped_final_label_name = scope.get_unique_variable_name(
'reshaped_final_label')
apply_reshape(scope, final_label_name, reshaped_final_label_name,
container, desired_shape=(-1,))
apply_cast(scope, reshaped_final_label_name,
operator.outputs[0].full_name, container,
to=onnx_proto.TensorProto.INT64)
else:
apply_reshape(scope, final_label_name,
operator.outputs[0].full_name, container,
desired_shape=(-1,))
register_converter('SklearnSGDClassifier',
convert_sklearn_sgd_classifier,
options={'zipmap': [True, False, 'columns'],
'nocl': [True, False],
'output_class_labels': [False, True],
'raw_scores': [True, False]})
| 45.584507
| 79
| 0.67434
|
35dc4c726607e7f3270797b75df0ffc69048fccd
| 7,612
|
py
|
Python
|
ryu/services/protocols/bgp/utils/validation.py
|
vinaykothiyal/ryu
|
32551989c649311854215df29860ccb272c105c0
|
[
"Apache-2.0"
] | 9
|
2018-04-11T12:53:08.000Z
|
2021-12-14T01:41:22.000Z
|
ryu/services/protocols/bgp/utils/validation.py
|
vinaykothiyal/ryu
|
32551989c649311854215df29860ccb272c105c0
|
[
"Apache-2.0"
] | 1
|
2019-05-20T13:23:28.000Z
|
2020-12-20T09:06:52.000Z
|
ryu/services/protocols/bgp/utils/validation.py
|
vinaykothiyal/ryu
|
32551989c649311854215df29860ccb272c105c0
|
[
"Apache-2.0"
] | 2
|
2020-10-20T13:52:45.000Z
|
2021-06-26T02:21:58.000Z
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module provides utilities for validation.
"""
import numbers
import re
import socket
import netaddr
def is_valid_mac(mac):
"""Returns True if the given MAC address is valid.
The given MAC address should be a colon hexadecimal notation string.
Samples:
- valid address: aa:bb:cc:dd:ee:ff, 11:22:33:44:55:66
- invalid address: aa:bb:cc:dd, 11-22-33-44-55-66, etc.
"""
return bool(re.match(r'^' + r'[\:\-]'.join([r'([0-9a-f]{2})'] * 6)
+ r'$', mac.lower()))
def is_valid_ip_prefix(prefix, bits):
"""Returns True if *prefix* is a valid IPv4 or IPv6 address prefix.
*prefix* should be a number between 0 to *bits* length.
"""
try:
# Prefix should be a number
prefix = int(prefix)
except ValueError:
return False
# Prefix should be a number between 0 to *bits*
return 0 <= prefix <= bits
def is_valid_ipv4(ipv4):
"""Returns True if given is a valid ipv4 address.
Given value should be a dot-decimal notation string.
Samples:
- valid address: 10.0.0.1, 192.168.0.1
- invalid address: 11.0.0, 192:168:0:1, etc.
"""
return netaddr.valid_ipv4(ipv4)
def is_valid_ipv4_prefix(ipv4_prefix):
"""Returns True if *ipv4_prefix* is a valid prefix with mask.
Samples:
- valid prefix: 1.1.1.0/32, 244.244.244.1/10
- invalid prefix: 255.2.2.2/2, 2.2.2/22, etc.
"""
if not isinstance(ipv4_prefix, str):
return False
tokens = ipv4_prefix.split('/')
if len(tokens) != 2:
return False
# Validate address/mask and return
return is_valid_ipv4(tokens[0]) and is_valid_ip_prefix(tokens[1], 32)
def is_valid_ipv6(ipv6):
"""Returns True if given `ipv6` is a valid IPv6 address
"""
return netaddr.valid_ipv6(ipv6)
def is_valid_ipv6_prefix(ipv6_prefix):
"""Returns True if given `ipv6_prefix` is a valid IPv6 prefix."""
# Validate input type
if not isinstance(ipv6_prefix, str):
return False
tokens = ipv6_prefix.split('/')
if len(tokens) != 2:
return False
# Validate address/mask and return
return is_valid_ipv6(tokens[0]) and is_valid_ip_prefix(tokens[1], 128)
def is_valid_old_asn(asn):
"""Returns True if the given AS number is Two Octet."""
return isinstance(asn, numbers.Integral) and 0 <= asn <= 0xffff
def is_valid_asn(asn):
"""Returns True if the given AS number is Two or Four Octet."""
return isinstance(asn, numbers.Integral) and 0 <= asn <= 0xffffffff
def is_valid_vpnv4_prefix(prefix):
"""Returns True if given prefix is a string represent vpnv4 prefix.
Vpnv4 prefix is made up of RD:Ipv4, where RD is represents route
distinguisher and Ipv4 represents valid dot-decimal ipv4 notation string.
"""
if not isinstance(prefix, str):
return False
# Split the prefix into route distinguisher and IP
tokens = prefix.split(':', 2)
if len(tokens) != 3:
return False
# Validate route distinguisher
if not is_valid_route_dist(':'.join([tokens[0], tokens[1]])):
return False
# Validate IPv4 prefix and return
return is_valid_ipv4_prefix(tokens[2])
def is_valid_vpnv6_prefix(prefix):
"""Returns True if given prefix is a string represent vpnv6 prefix.
Vpnv6 prefix is made up of RD:Ipv6, where RD is represents route
distinguisher and Ipv6 represents valid colon hexadecimal notation string.
"""
if not isinstance(prefix, str):
return False
# Split the prefix into route distinguisher and IP
tokens = prefix.split(':', 2)
if len(tokens) != 3:
return False
# Validate route distinguisher
if not is_valid_route_dist(':'.join([tokens[0], tokens[1]])):
return False
# Validate IPv6 prefix and return
return is_valid_ipv6_prefix(tokens[2])
def is_valid_med(med):
"""Returns True if value of *med* is valid as per RFC.
According to RFC MED is a four octet non-negative integer and
value '((2 ** 32) - 1) = 0xffffffff' denotes an "infinity" metric.
"""
return isinstance(med, numbers.Integral) and 0 <= med <= 0xffffffff
def is_valid_mpls_label(label):
"""Validates `label` according to MPLS label rules
RFC says:
This 20-bit field.
A value of 0 represents the "IPv4 Explicit NULL Label".
A value of 1 represents the "Router Alert Label".
A value of 2 represents the "IPv6 Explicit NULL Label".
A value of 3 represents the "Implicit NULL Label".
Values 4-15 are reserved.
"""
if (not isinstance(label, numbers.Integral) or
(4 <= label <= 15) or
(label < 0 or label > 2 ** 20)):
return False
return True
def is_valid_mpls_labels(labels):
"""Returns True if the given value is a list of valid MPLS labels.
"""
if not isinstance(labels, (list, tuple)):
return False
for label in labels:
if not is_valid_mpls_label(label):
return False
return True
def is_valid_route_dist(route_dist):
"""Validates *route_dist* as string representation of route distinguisher.
Returns True if *route_dist* is as per our convention of RD, else False.
Our convention is to represent RD as a string in format:
*admin_sub_field:assigned_num_field* and *admin_sub_field* can be valid
IPv4 string representation.
Valid examples: '65000:222', '1.2.3.4:4432'.
Invalid examples: '1.11.1: 333'
"""
# TODO(PH): Provide complete implementation.
return is_valid_ext_comm_attr(route_dist)
def is_valid_ext_comm_attr(attr):
"""Validates *attr* as string representation of RT or SOO.
Returns True if *attr* is as per our convention of RT or SOO, else
False. Our convention is to represent RT/SOO is a string with format:
*global_admin_part:local_admin_path*
"""
if not isinstance(attr, str):
return False
tokens = attr.rsplit(':', 1)
if len(tokens) != 2:
return False
try:
if '.' in tokens[0]:
if not is_valid_ipv4(tokens[0]):
return False
else:
int(tokens[0])
int(tokens[1])
except (ValueError, socket.error):
return False
return True
def is_valid_esi(esi):
"""Returns True if the given EVPN Ethernet SegmentEthernet ID is valid."""
if isinstance(esi, numbers.Integral):
return 0 <= esi <= 0xffffffffffffffffff
return isinstance(esi, dict)
def is_valid_ethernet_tag_id(etag_id):
"""Returns True if the given EVPN Ethernet Tag ID is valid.
Ethernet Tag ID should be a 32-bit field number.
"""
return isinstance(etag_id, numbers.Integral) and 0 <= etag_id <= 0xffffffff
def is_valid_vni(vni):
"""Returns True if the given Virtual Network Identifier for VXLAN
is valid.
Virtual Network Identifier should be a 24-bit field number.
"""
return isinstance(vni, numbers.Integral) and 0 <= vni <= 0xffffff
| 28.942966
| 79
| 0.665659
|
ce626fbf862ccb7ba395a1da1f82cbe6c51aa978
| 7,080
|
py
|
Python
|
electrumx/lib/tx.py
|
Electrum-RVN-SIG/electrumx-ravencoin
|
6b888a9246257037c95cdaffcce3386181050398
|
[
"MIT"
] | 11
|
2021-04-03T21:29:44.000Z
|
2022-03-28T09:37:34.000Z
|
electrumx/lib/tx.py
|
Electrum-RVN-SIG/electrumx-ravencoin
|
6b888a9246257037c95cdaffcce3386181050398
|
[
"MIT"
] | 4
|
2021-05-14T13:10:01.000Z
|
2021-12-03T15:12:23.000Z
|
electrumx/lib/tx.py
|
Electrum-RVN-SIG/electrumx-ravencoin
|
6b888a9246257037c95cdaffcce3386181050398
|
[
"MIT"
] | 4
|
2021-04-05T22:07:06.000Z
|
2021-09-23T08:04:48.000Z
|
# Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
'''Transaction-related classes and functions.'''
from collections import namedtuple
from electrumx.lib.hash import double_sha256, hash_to_hex_str
from electrumx.lib.util import (
unpack_le_int32_from, unpack_le_int64_from, unpack_le_uint16_from,
unpack_be_uint16_from,
unpack_le_uint32_from, unpack_le_uint64_from, pack_le_int32, pack_varint,
pack_le_uint32, pack_le_int64, pack_varbytes,
)
ZERO = bytes(32)
MINUS_1 = 4294967295
class Tx(namedtuple("Tx", "version inputs outputs locktime witness")):
'''Class representing a transaction.'''
def serialize(self):
return b''.join((
pack_le_int32(self.version),
pack_varint(len(self.inputs)),
b''.join(tx_in.serialize() for tx_in in self.inputs),
pack_varint(len(self.outputs)),
b''.join(tx_out.serialize() for tx_out in self.outputs),
pack_le_uint32(self.locktime)
))
class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
'''Class representing a transaction input.'''
def __str__(self):
script = self.script.hex()
prev_hash = hash_to_hex_str(self.prev_hash)
return ("Input({}, {:d}, script={}, sequence={:d})"
.format(prev_hash, self.prev_idx, script, self.sequence))
def is_generation(self):
'''Test if an input is generation/coinbase like'''
return self.prev_idx == MINUS_1 and self.prev_hash == ZERO
def serialize(self):
return b''.join((
self.prev_hash,
pack_le_uint32(self.prev_idx),
pack_varbytes(self.script),
pack_le_uint32(self.sequence),
))
class TxOutput(namedtuple("TxOutput", "value pk_script")):
def serialize(self):
return b''.join((
pack_le_int64(self.value),
pack_varbytes(self.pk_script),
))
class Deserializer:
'''Deserializes transactions.
This code is highly optimised and very performance sensitive.
'''
def __init__(self, buf, start=0):
self.view = memoryview(buf)
self.cursor = start
def read_tx(self):
'''Return a deserialized transaction.'''
tx, self.cursor, hash = read_tx(self.view, self.cursor)
return tx
def read_tx_and_hash(self):
'''Return a (deserialized TX, tx_hash) pair.
The hash needs to be reversed for human display; for efficiency
we process it in the natural serialized order.
'''
start = self.cursor
tx, end, hash = read_tx(self.view, self.cursor)
self.cursor = end
return tx, hash if hash else double_sha256(self.view[start:end])
def read_varint(self):
value, self.cursor = read_varint(self.view, self.cursor)
return value
def read_varint(buf, cursor):
n = buf[cursor]
cursor += 1
if n < 253:
return n, cursor
if n == 253:
return read_le_uint16(buf, cursor)
if n == 254:
return read_le_uint32(buf, cursor)
return read_le_uint64(buf, cursor)
def read_varbytes(buf, cursor):
size, cursor = read_varint(buf, cursor)
end = cursor + size
return buf[cursor: end], end
def read_le_uint16(buf, cursor):
result, = unpack_le_uint16_from(buf, cursor)
return result, cursor + 2
def read_le_uint32(buf, cursor):
result, = unpack_le_uint32_from(buf, cursor)
return result, cursor + 4
def read_le_uint64(buf, cursor):
result, = unpack_le_uint64_from(buf, cursor)
return result, cursor + 8
def read_le_int32(buf, cursor):
result, = unpack_le_int32_from(buf, cursor)
return result, cursor + 4
def read_le_int64(buf, cursor):
result, = unpack_le_int64_from(buf, cursor)
return result, cursor + 8
def read_input(buf, cursor):
start = cursor
cursor += 32
prev_hash = buf[start: cursor]
prev_idx, cursor = read_le_uint32(buf, cursor)
script, cursor = read_varbytes(buf, cursor)
sequence, cursor = read_le_uint32(buf, cursor)
return TxInput(prev_hash, prev_idx, script, sequence), cursor
def read_output(buf, cursor):
value, cursor = read_le_int64(buf, cursor)
pk_script, cursor = read_varbytes(buf, cursor)
return TxOutput(value, pk_script), cursor
def read_witness(buf, cursor, input_len):
ret = []
for _ in range(input_len):
wit_for_in, cursor = read_varint(buf, cursor)
app_val = []
for _ in range(wit_for_in):
data, cursor = read_varbytes(buf, cursor)
app_val.append(data.hex())
ret.append(app_val)
return ret, cursor
def read_many(buf, cursor, reader):
count, cursor = read_varint(buf, cursor)
items = []
append = items.append
for _ in range(count):
item, cursor = reader(buf, cursor)
append(item)
return items, cursor
def read_tx(buf, cursor):
'''Deserialize a transaction from a buffer. Return a (tx, cursor) pair.
If the buffer does not hold the whole transaction, raises struct.error or IndexError.
'''
start = cursor
version, cursor = read_le_int32(buf, cursor)
original = bytes(buf[start:cursor])
# Check if flag, if true, has witness info
check_flag = buf[cursor:cursor+2].hex() == '0001'
if check_flag:
flag, cursor = read_le_uint16(buf, cursor)
start = cursor
inputs, cursor = read_many(buf, cursor, read_input)
outputs, cursor = read_many(buf, cursor, read_output)
original += bytes(buf[start:cursor])
witness = None
if check_flag:
witness, cursor = read_witness(buf, cursor, len(inputs))
start = cursor
locktime, cursor = read_le_uint32(buf, cursor)
original += bytes(buf[start:cursor])
return Tx(version, inputs, outputs, locktime, witness), cursor, double_sha256(original) if check_flag else None
| 29.873418
| 115
| 0.674718
|
770583139946b58e26a0db9a06fb877e5e353899
| 611
|
py
|
Python
|
wafer/sponsors/urls.py
|
drnlm/wafer
|
1d843190428c401df06fcdfb89d1f9d9af67229e
|
[
"ISC"
] | 41
|
2015-03-16T17:47:00.000Z
|
2022-01-07T04:31:21.000Z
|
wafer/sponsors/urls.py
|
drnlm/wafer
|
1d843190428c401df06fcdfb89d1f9d9af67229e
|
[
"ISC"
] | 338
|
2015-03-15T17:26:36.000Z
|
2021-12-02T04:34:53.000Z
|
wafer/sponsors/urls.py
|
drnlm/wafer
|
1d843190428c401df06fcdfb89d1f9d9af67229e
|
[
"ISC"
] | 28
|
2015-07-27T14:11:13.000Z
|
2020-11-16T03:50:30.000Z
|
from django.conf.urls import url, include
from rest_framework import routers
from wafer.sponsors.views import (
ShowSponsors, SponsorView, ShowPackages, SponsorViewSet, PackageViewSet)
router = routers.DefaultRouter()
router.register(r'sponsors', SponsorViewSet)
router.register(r'packages', PackageViewSet)
urlpatterns = [
url(r'^$', ShowSponsors.as_view(),
name='wafer_sponsors'),
url(r'^(?P<pk>\d+)/$', SponsorView.as_view(), name='wafer_sponsor'),
url(r'^packages/$', ShowPackages.as_view(),
name='wafer_sponsorship_packages'),
url(r'^api/', include(router.urls)),
]
| 30.55
| 76
| 0.713584
|
8be38663aba02eac0a7fd1c4a6883c9b7416aa50
| 9,190
|
py
|
Python
|
pettingzoo/magent/magent_env.py
|
QiyaoWei/PettingZoo
|
d27bdfe3507df48ff4e9ce04804d1dd1087a2dea
|
[
"Apache-2.0"
] | null | null | null |
pettingzoo/magent/magent_env.py
|
QiyaoWei/PettingZoo
|
d27bdfe3507df48ff4e9ce04804d1dd1087a2dea
|
[
"Apache-2.0"
] | null | null | null |
pettingzoo/magent/magent_env.py
|
QiyaoWei/PettingZoo
|
d27bdfe3507df48ff4e9ce04804d1dd1087a2dea
|
[
"Apache-2.0"
] | null | null | null |
import math
import warnings
import magent
import numpy as np
from gym.spaces import Box, Discrete
from gym.utils import seeding
from pettingzoo import AECEnv
from pettingzoo.magent.render import Renderer
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.env import ParallelEnv
def make_env(raw_env):
def env_fn(**kwargs):
env = raw_env(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
return env_fn
class magent_parallel_env(ParallelEnv):
def __init__(self, env, active_handles, names, map_size, max_cycles, reward_range, minimap_mode, extra_features):
self.map_size = map_size
self.max_cycles = max_cycles
self.minimap_mode = minimap_mode
self.extra_features = extra_features
self.env = env
self.handles = active_handles
self._all_handles = self.env.get_handles()
env.reset()
self.generate_map()
self.team_sizes = team_sizes = [env.get_num(handle) for handle in self.handles]
self.agents = [f"{names[j]}_{i}" for j in range(len(team_sizes)) for i in range(team_sizes[j])]
self.possible_agents = self.agents[:]
num_actions = [env.get_action_space(handle)[0] for handle in self.handles]
action_spaces_list = [Discrete(num_actions[j]) for j in range(len(team_sizes)) for i in range(team_sizes[j])]
# may change depending on environment config? Not sure.
team_obs_shapes = self._calc_obs_shapes()
state_shape = self._calc_state_shape()
observation_space_list = [Box(low=0., high=2., shape=team_obs_shapes[j], dtype=np.float32) for j in range(len(team_sizes)) for i in range(team_sizes[j])]
self.state_space = Box(low=0., high=2., shape=state_shape, dtype=np.float32)
reward_low, reward_high = reward_range
if extra_features:
for space in observation_space_list:
idx = space.shape[2] - 3 if minimap_mode else space.shape[2] - 1
space.low[:, :, idx] = reward_low
space.high[:, :, idx] = reward_high
idx_state = self.state_space.shape[2] - 1 if minimap_mode else self.state_space.shape[2] - 1
self.state_space.low[:, :, idx_state] = reward_low
self.state_space.high[:, :, idx_state] = reward_high
self.action_spaces = {agent: space for agent, space in zip(self.agents, action_spaces_list)}
self.observation_spaces = {agent: space for agent, space in zip(self.agents, observation_space_list)}
self._zero_obs = {agent: np.zeros_like(space.low) for agent, space in self.observation_spaces.items()}
self.base_state = np.zeros(self.state_space.shape, dtype='float32')
walls = self.env._get_walls_info()
wall_x, wall_y = zip(*walls)
self.base_state[wall_x, wall_y, 0] = 1
self._renderer = None
self.frames = 0
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def seed(self, seed=None):
if seed is None:
seed = seeding.create_seed(seed, max_bytes=4)
self.env.set_seed(seed)
def _calc_obs_shapes(self):
view_spaces = [self.env.get_view_space(handle) for handle in self.handles]
feature_spaces = [self.env.get_feature_space(handle) for handle in self.handles]
assert all(len(tup) == 3 for tup in view_spaces)
assert all(len(tup) == 1 for tup in feature_spaces)
feat_size = [[fs[0]] for fs in feature_spaces]
for feature_space in feat_size:
if not self.extra_features:
feature_space[0] = 2 if self.minimap_mode else 0
obs_spaces = [(view_space[:2] + (view_space[2] + feature_space[0],)) for view_space, feature_space in zip(view_spaces, feat_size)]
return obs_spaces
def _calc_state_shape(self):
feature_spaces = [
self.env.get_feature_space(handle) for handle in self._all_handles
]
self._minimap_features = 2 if self.minimap_mode else 0
# map channel and agent pair channel. Remove global agent position when minimap mode and extra features
state_depth = (
(max(feature_spaces)[0] - self._minimap_features) * self.extra_features
+ 1
+ len(self._all_handles) * 2
)
return (self.map_size, self.map_size, state_depth)
def render(self, mode="human"):
if self._renderer is None:
self._renderer = Renderer(self.env, self.map_size, mode)
assert mode == self._renderer.mode, "mode must be consistent across render calls"
return self._renderer.render(mode)
def close(self):
if self._renderer is not None:
self._renderer.close()
self._renderer = None
def reset(self):
self.agents = self.possible_agents[:]
self.env.reset()
self.frames = 0
self.all_dones = {agent: False for agent in self.possible_agents}
self.generate_map()
return self._observe_all()
def _observe_all(self):
observes = [None] * self.max_num_agents
for handle in self.handles:
ids = self.env.get_agent_id(handle)
view, features = self.env.get_observation(handle)
if self.minimap_mode and not self.extra_features:
features = features[:, -2:]
if self.minimap_mode or self.extra_features:
feat_reshape = np.expand_dims(np.expand_dims(features, 1), 1)
feat_img = np.tile(feat_reshape, (1, view.shape[1], view.shape[2], 1))
fin_obs = np.concatenate([view, feat_img], axis=-1)
else:
fin_obs = np.copy(view)
for id, obs in zip(ids, fin_obs):
observes[id] = obs
ret_agents = set(self.agents)
return {agent: obs if obs is not None else self._zero_obs[agent] for agent, obs in zip(self.possible_agents, observes) if agent in ret_agents}
def _all_rewards(self):
rewards = np.zeros(self.max_num_agents)
for handle in self.handles:
ids = self.env.get_agent_id(handle)
rewards[ids] = self.env.get_reward(handle)
ret_agents = set(self.agents)
return {agent: float(rew) for agent, rew in zip(self.possible_agents, rewards) if agent in ret_agents}
def _all_dones(self, step_done=False):
dones = np.ones(self.max_num_agents, dtype=bool)
if not step_done:
for handle in self.handles:
ids = self.env.get_agent_id(handle)
dones[ids] = ~self.env.get_alive(handle)
ret_agents = set(self.agents)
return {agent: bool(done) for agent, done in zip(self.possible_agents, dones) if agent in ret_agents}
def state(self):
'''
Returns an observation of the global environment
'''
state = np.copy(self.base_state)
for handle in self._all_handles:
view, features = self.env.get_observation(handle)
pos = self.env.get_pos(handle)
pos_x, pos_y = zip(*pos)
state[pos_x, pos_y, 1 + handle.value * 2] = 1
state[pos_x, pos_y, 2 + handle.value * 2] = view[:, view.shape[1] // 2, view.shape[2] // 2, 2]
if self.extra_features:
add_zeros = np.zeros(
(
features.shape[0],
state.shape[2]
- (1 + len(self.team_sizes) * 2 + features.shape[1] - self._minimap_features),
)
)
rewards = features[:, -1 - self._minimap_features]
actions = features[:, :-1 - self._minimap_features]
actions = np.concatenate((actions, add_zeros), axis=1)
rewards = rewards.reshape(len(rewards), 1)
state_features = np.hstack((actions, rewards))
state[pos_x, pos_y, 1 + len(self.team_sizes) * 2:] = state_features
return state
def step(self, all_actions):
action_list = [0] * self.max_num_agents
for i, agent in enumerate(self.possible_agents):
if agent in all_actions:
action_list[i] = all_actions[agent]
all_actions = np.asarray(action_list, dtype=np.int32)
start_point = 0
for i in range(len(self.handles)):
size = self.team_sizes[i]
self.env.set_action(self.handles[i], all_actions[start_point:(start_point + size)])
start_point += size
self.frames += 1
done = self.env.step() or self.frames >= self.max_cycles
all_infos = {agent: {} for agent in self.agents}
all_dones = self._all_dones(done)
all_rewards = self._all_rewards()
all_observes = self._observe_all()
self.all_dones = all_dones
self.env.clear_dead()
self.agents = [agent for agent in self.agents if not self.all_dones[agent]]
return all_observes, all_rewards, all_dones, all_infos
| 42.35023
| 161
| 0.625898
|
fb679d1bcd1c26bf07b933c9ef665030f96dcef2
| 1,985
|
py
|
Python
|
supervisor/dbus/network/configuration.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 597
|
2017-04-27T15:10:08.000Z
|
2019-12-18T16:02:57.000Z
|
supervisor/dbus/network/configuration.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 1,056
|
2020-01-30T09:59:44.000Z
|
2022-03-31T10:15:32.000Z
|
supervisor/dbus/network/configuration.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 295
|
2020-02-03T11:30:42.000Z
|
2022-03-31T18:53:14.000Z
|
"""NetworkConnection object4s for Network Manager."""
from ipaddress import IPv4Address, IPv4Interface, IPv6Address, IPv6Interface
from typing import Optional, Union
import attr
@attr.s(slots=True)
class IpConfiguration:
"""NetworkSettingsIPConfig object for Network Manager."""
gateway: Optional[Union[IPv6Address, IPv6Address]] = attr.ib()
nameservers: list[Union[IPv6Address, IPv6Address]] = attr.ib()
address: list[Union[IPv4Interface, IPv6Interface]] = attr.ib()
@attr.s(slots=True)
class DNSConfiguration:
"""DNS configuration Object."""
nameservers: list[Union[IPv4Address, IPv6Address]] = attr.ib()
domains: list[str] = attr.ib()
interface: str = attr.ib()
priority: int = attr.ib()
vpn: bool = attr.ib()
@attr.s(slots=True)
class ConnectionProperties:
"""Connection Properties object for Network Manager."""
id: Optional[str] = attr.ib()
uuid: Optional[str] = attr.ib()
type: Optional[str] = attr.ib()
interface_name: Optional[str] = attr.ib()
@attr.s(slots=True)
class WirelessProperties:
"""Wireless Properties object for Network Manager."""
ssid: Optional[str] = attr.ib()
assigned_mac: Optional[str] = attr.ib()
mode: Optional[str] = attr.ib()
powersave: Optional[int] = attr.ib()
@attr.s(slots=True)
class WirelessSecurityProperties:
"""Wireless Security Properties object for Network Manager."""
auth_alg: Optional[str] = attr.ib()
key_mgmt: Optional[str] = attr.ib()
psk: Optional[str] = attr.ib()
@attr.s(slots=True)
class EthernetProperties:
"""Ethernet properties object for Network Manager."""
assigned_mac: Optional[str] = attr.ib()
@attr.s(slots=True)
class VlanProperties:
"""Ethernet properties object for Network Manager."""
id: Optional[int] = attr.ib()
parent: Optional[str] = attr.ib()
@attr.s(slots=True)
class IpProperties:
"""IP properties object for Network Manager."""
method: Optional[str] = attr.ib()
| 25.779221
| 76
| 0.690176
|
03736ec8e09e1e2d2110e8ca83063b568379f0cd
| 8,921
|
py
|
Python
|
core/jobs/jobs_manager_test.py
|
lheureuxe13/oppia
|
7110e3e5d5a53527c31d7b33e14d25e8d5b981f9
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:53.000Z
|
2022-02-06T13:00:14.000Z
|
core/jobs/jobs_manager_test.py
|
lheureuxe13/oppia
|
7110e3e5d5a53527c31d7b33e14d25e8d5b981f9
|
[
"Apache-2.0"
] | 80
|
2020-10-31T09:14:46.000Z
|
2021-01-12T23:38:15.000Z
|
core/jobs/jobs_manager_test.py
|
lheureuxe13/oppia
|
7110e3e5d5a53527c31d7b33e14d25e8d5b981f9
|
[
"Apache-2.0"
] | 1
|
2020-10-02T13:28:26.000Z
|
2020-10-02T13:28:26.000Z
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.jobs_manager."""
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import datetime
from unittest import mock
from core import feconf
from core.domain import beam_job_services
from core.jobs import base_jobs
from core.jobs import job_options
from core.jobs import jobs_manager
from core.jobs.types import job_run_result
from core.storage.beam_job import gae_models as beam_job_models
from core.tests import test_utils
import apache_beam as beam
from apache_beam import runners
from google.cloud import dataflow
class WorkingJob(base_jobs.JobBase):
"""Simple job that outputs string literals."""
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
return (
self.pipeline
| beam.Create([job_run_result.JobRunResult(stdout='o', stderr='e')])
)
class FailingJob(base_jobs.JobBase):
"""Simple job that always raises an exception."""
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
raise Exception('uh-oh')
class RunJobTests(test_utils.GenericTestBase):
def test_working_sync_job(self) -> None:
run = jobs_manager.run_job(WorkingJob, True, namespace=self.namespace)
self.assertEqual(run.latest_job_state, 'DONE')
run_model = beam_job_models.BeamJobRunModel.get(run.id)
self.assertEqual(run, run_model)
self.assertEqual(
beam_job_services.get_beam_job_run_result(run.id).to_dict(),
{'stdout': 'o', 'stderr': 'e'})
def test_failing_sync_job(self) -> None:
run = jobs_manager.run_job(FailingJob, True, namespace=self.namespace)
self.assertEqual(run.latest_job_state, 'FAILED')
run_model = beam_job_models.BeamJobRunModel.get(run.id)
self.assertEqual(run, run_model)
self.assertIn(
'uh-oh',
beam_job_services.get_beam_job_run_result(run.id).stderr)
def test_async_job(self) -> None:
mock_run_result = mock.Mock()
mock_run_result.has_job = True
mock_run_result.job_id.return_value = '123'
mock_run_result.state = 'PENDING'
pipeline = beam.Pipeline(
runner=runners.DirectRunner(),
options=job_options.JobOptions(namespace=self.namespace))
with self.swap_to_always_return(pipeline, 'run', value=mock_run_result):
run = jobs_manager.run_job(WorkingJob, False, pipeline=pipeline)
self.assertEqual(run.dataflow_job_id, '123')
self.assertEqual(run.latest_job_state, 'PENDING')
def test_async_job_that_does_not_start(self) -> None:
mock_run_result = mock.Mock()
mock_run_result.has_job = False
mock_run_result.job_id.return_value = None
mock_run_result.state = 'UNKNOWN'
pipeline = beam.Pipeline(
runner=runners.DirectRunner(),
options=job_options.JobOptions(namespace=self.namespace))
with self.swap_to_always_return(pipeline, 'run', value=mock_run_result):
run = jobs_manager.run_job(WorkingJob, False, pipeline=pipeline)
self.assertIsNone(run.dataflow_job_id)
self.assertEqual(run.latest_job_state, 'FAILED')
result = beam_job_services.get_beam_job_run_result(run.id)
self.assertIn('Failed to deploy WorkingJob', result.stderr)
class RefreshStateOfBeamJobRunModelTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.run_model = beam_job_services.create_beam_job_run_model(
'WorkingJob', dataflow_job_id='123')
self.dataflow_job = dataflow.Job(
id='123',
project_id=feconf.OPPIA_PROJECT_ID,
location=feconf.GOOGLE_APP_ENGINE_REGION,
current_state=dataflow.JobState.JOB_STATE_PENDING,
current_state_time=datetime.datetime.utcnow())
self.dataflow_client_mock = mock.Mock()
self.dataflow_client_mock.get_job.return_value = self.dataflow_job
self.exit_stack = contextlib.ExitStack()
self.exit_stack.enter_context(self.swap_to_always_return(
dataflow, 'JobsV1Beta3Client', value=self.dataflow_client_mock))
def tearDown(self) -> None:
try:
self.exit_stack.close()
finally:
super().tearDown()
def test_sync_job(self) -> None:
self.run_model.dataflow_job_id = None
jobs_manager.refresh_state_of_beam_job_run_model(self.run_model)
self.assertEqual(self.run_model.latest_job_state, 'UNKNOWN')
def test_job_with_outdated_status(self) -> None:
self.run_model.latest_job_state = 'PENDING'
self.dataflow_job.current_state = dataflow.JobState.JOB_STATE_RUNNING
jobs_manager.refresh_state_of_beam_job_run_model(self.run_model)
self.assertEqual(self.run_model.latest_job_state, 'RUNNING')
def test_job_with_failed_status(self) -> None:
self.run_model.latest_job_state = 'RUNNING'
self.dataflow_job.current_state = dataflow.JobState.JOB_STATE_FAILED
jobs_manager.refresh_state_of_beam_job_run_model(self.run_model)
self.assertEqual(self.run_model.latest_job_state, 'FAILED')
result = beam_job_services.get_beam_job_run_result(self.run_model.id)
self.assertIn(self.dataflow_job.id, result.stderr)
def test_job_with_cancelling_status_but_job_is_cancelled(self) -> None:
self.run_model.latest_job_state = 'CANCELLING'
self.dataflow_job.current_state = dataflow.JobState.JOB_STATE_CANCELLED
jobs_manager.refresh_state_of_beam_job_run_model(self.run_model)
self.assertEqual(self.run_model.latest_job_state, 'CANCELLED')
def test_job_with_cancelling_status_but_job_is_running(self) -> None:
self.run_model.latest_job_state = 'CANCELLING'
self.dataflow_job.current_state = dataflow.JobState.JOB_STATE_RUNNING
jobs_manager.refresh_state_of_beam_job_run_model(self.run_model)
self.assertEqual(self.run_model.latest_job_state, 'CANCELLING')
def test_failed_api_call_logs_the_exception(self) -> None:
self.run_model.latest_job_state = 'PENDING'
self.dataflow_client_mock.get_job.side_effect = Exception('uh-oh')
with self.capture_logging() as logs:
jobs_manager.refresh_state_of_beam_job_run_model(self.run_model)
self.assertGreater(len(logs), 0)
self.assertIn('uh-oh', logs[0])
self.assertEqual(self.run_model.latest_job_state, 'UNKNOWN')
class CancelJobTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.run_model = beam_job_services.create_beam_job_run_model(
'WorkingJob', dataflow_job_id='123')
self.dataflow_job = dataflow.Job(
id='123',
project_id=feconf.OPPIA_PROJECT_ID,
location=feconf.GOOGLE_APP_ENGINE_REGION,
current_state=dataflow.JobState.JOB_STATE_CANCELLING,
current_state_time=datetime.datetime.utcnow())
self.dataflow_client_mock = mock.Mock()
self.dataflow_client_mock.update_job.return_value = self.dataflow_job
self.exit_stack = contextlib.ExitStack()
self.exit_stack.enter_context(self.swap_to_always_return(
dataflow, 'JobsV1Beta3Client', value=self.dataflow_client_mock))
def tearDown(self) -> None:
try:
self.exit_stack.close()
finally:
super().tearDown()
def test_sync_job(self) -> None:
self.run_model.dataflow_job_id = None
with self.assertRaisesRegexp(ValueError, 'must not be None'): # type: ignore[no-untyped-call]
jobs_manager.cancel_job(self.run_model)
def test_job_with_cancelling_status(self) -> None:
self.run_model.latest_job_state = 'RUNNING'
jobs_manager.cancel_job(self.run_model)
self.assertEqual(self.run_model.latest_job_state, 'CANCELLING')
def test_failed_api_call_logs_the_exception(self) -> None:
self.dataflow_client_mock.update_job.side_effect = Exception('uh-oh')
with self.capture_logging() as logs:
jobs_manager.cancel_job(self.run_model)
self.assertGreater(len(logs), 0)
self.assertIn('uh-oh', logs[0])
| 35.971774
| 101
| 0.709338
|
1bee2a7f19b88d204126f75fc3e577c6bbda66dc
| 2,214
|
py
|
Python
|
examples/twisted/wamp/rpc/slowsquare/backend.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 1,670
|
2015-10-12T15:46:22.000Z
|
2022-03-30T22:12:53.000Z
|
examples/twisted/wamp/rpc/slowsquare/backend.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 852
|
2015-10-16T22:11:03.000Z
|
2022-03-27T07:57:01.000Z
|
examples/twisted/wamp/rpc/slowsquare/backend.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 790
|
2015-10-15T08:46:12.000Z
|
2022-03-30T12:22:13.000Z
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks, \
returnValue
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from autobahn.twisted.util import sleep
class Component(ApplicationSession):
"""
A math service application component.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
def square(x):
return x * x
yield self.register(square, 'com.math.square')
@inlineCallbacks
def slowsquare(x, delay=1):
yield sleep(delay)
returnValue(x * x)
yield self.register(slowsquare, 'com.math.slowsquare')
print("procedures registered")
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
| 34.59375
| 79
| 0.671635
|
551edee1eabe80d45ff1e5017d5c2a6df62c5efa
| 1,321
|
py
|
Python
|
test/salt_master_files/tasks/custom_send_commands.py
|
dmulyalin/salt-nornir
|
2fbbe136f5a462e7981ce638a3dd15463de63d18
|
[
"MIT"
] | 12
|
2020-08-24T10:44:59.000Z
|
2022-03-23T03:41:20.000Z
|
test/salt_master_files/tasks/custom_send_commands.py
|
dmulyalin/salt-nornir
|
2fbbe136f5a462e7981ce638a3dd15463de63d18
|
[
"MIT"
] | 3
|
2021-02-15T20:55:20.000Z
|
2021-09-26T22:50:02.000Z
|
test/salt_master_files/tasks/custom_send_commands.py
|
dmulyalin/salt-nornir
|
2fbbe136f5a462e7981ce638a3dd15463de63d18
|
[
"MIT"
] | 1
|
2021-05-23T15:47:10.000Z
|
2021-05-23T15:47:10.000Z
|
import time
from nornir.core.task import Result, Task
from nornir_netmiko.tasks import netmiko_send_command
import logging
log = logging.getLogger(__name__)
# define connection name for RetryRunner to properly detect it using:
# connection_name = task.task.__globals__.get("CONNECTION_NAME", None)
CONNECTION_NAME = "netmiko"
def task(task, commands, interval=0.01, **kwargs):
"""
Nornir Task function to send show commands to devices using
``nornir_netmiko.tasks.netmiko_send_command`` plugin
:param kwargs: might contain ``netmiko_kwargs`` argument dictionary
with parameters for ``nornir_netmiko.tasks.netmiko_send_command``
method
:param commands: (list) commands to send to device
:param interval: (int) interval between sending commands, default 0.01s
:return result: Nornir result object with task execution results named
after commands
"""
# run interval sanity check
interval = interval if isinstance(interval, (int, float)) else 0.01
# run commands
for command in commands:
task.run(
task=netmiko_send_command,
command_string=command,
name=command,
**kwargs.get("netmiko_kwargs", {})
)
time.sleep(interval)
return Result(host=task.host, skip_results=True)
| 33.025
| 75
| 0.704769
|
1414e9b3a932702cd8f0890ebe3e93aed1bba77b
| 2,588
|
py
|
Python
|
sis_provisioner/views/jobs.py
|
uw-it-aca/canvas-sis-provisioner
|
28f54ce101704a8feaaea1e514618aba5a330a25
|
[
"Apache-2.0"
] | null | null | null |
sis_provisioner/views/jobs.py
|
uw-it-aca/canvas-sis-provisioner
|
28f54ce101704a8feaaea1e514618aba5a330a25
|
[
"Apache-2.0"
] | 451
|
2016-10-25T22:08:19.000Z
|
2022-03-23T20:27:52.000Z
|
sis_provisioner/views/jobs.py
|
uw-it-aca/canvas-sis-provisioner
|
28f54ce101704a8feaaea1e514618aba5a330a25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from logging import getLogger
from sis_provisioner.models import Job
from sis_provisioner.views.admin import RESTDispatch, get_user
from django.utils.timezone import utc
from datetime import datetime
import json
logger = getLogger(__name__)
class JobView(RESTDispatch):
""" Retrieves a Job model.
GET returns 200 with Job details.
PUT returns 200.
"""
def get(self, request, *args, **kwargs):
job_id = kwargs['job_id']
try:
job = Job.objects.get(id=job_id)
return self.json_response(job.json_data())
except Job.DoesNotExist:
return self.error_response(404, "Job %s not found" % job_id)
def put(self, request, *args, **kwargs):
if not self.can_manage_jobs(request):
return self.error_response(401, "Unauthorized")
job_id = kwargs['job_id']
try:
job = Job.objects.get(id=job_id)
data = json.loads(request.body).get('job', {})
if 'is_active' in data:
job.is_active = data['is_active']
job.changed_by = get_user(request)
job.changed_date = datetime.utcnow().replace(tzinfo=utc)
job.save()
logger.info('%s %s Job "%s"' % (
job.changed_by,
'enabled' if job.is_active else 'disabled',
job.name))
return self.json_response({'job': job.json_data()})
except Job.DoesNotExist:
return self.error_response(404, "Job %s not found" % job_id)
def delete(self, request, *args, **kwargs):
if not self.can_manage_jobs(request):
return self.error_response(401, "Unauthorized")
job_id = kwargs['job_id']
try:
job = Job.objects.get(id=job_id)
job.delete()
logger.info('%s deleted Job "%s"' % (job.changed_by, job.name))
return self.json_response({'job': job.json_data()})
except Job.DoesNotExist:
return self.error_response(404, "Job %s not found" % job_id)
class JobListView(RESTDispatch):
""" Retrieves a list of Jobs.
"""
def get(self, request, *args, **kwargs):
read_only = not self.can_manage_jobs(request)
jobs = []
for job in Job.objects.all().order_by('title'):
data = job.json_data()
data['read_only'] = read_only
jobs.append(data)
return self.json_response({'jobs': jobs})
| 32.759494
| 75
| 0.59119
|
b1729ff14db41db6202ea932bf46a6208f0bff2f
| 8,429
|
py
|
Python
|
paper-launch-scripts/graph_results_utils.py
|
JBLanier/pipeline-psro
|
33bc41892fb80a03604b118d5256555fb65d1c32
|
[
"MIT"
] | 26
|
2020-11-04T13:50:58.000Z
|
2022-03-11T08:09:00.000Z
|
paper-launch-scripts/graph_results_utils.py
|
JBLanier/distributed-rl-for-imperfect-info-games
|
e150e99e433cadae27aa3ae5f6c7134f7e5c6fda
|
[
"MIT"
] | null | null | null |
paper-launch-scripts/graph_results_utils.py
|
JBLanier/distributed-rl-for-imperfect-info-games
|
e150e99e433cadae27aa3ae5f6c7134f7e5c6fda
|
[
"MIT"
] | 13
|
2020-12-07T11:39:37.000Z
|
2021-11-04T15:59:17.000Z
|
import ray
from ray.rllib.agents.trainer import with_common_config, with_base_config
from ray.rllib.models.catalog import MODEL_DEFAULTS
from ray.rllib.utils import try_import_tf
import json
import os
import pandas as pd
import multiprocessing
from itertools import repeat
from mprl.scripts.poker_parallel_algos.utils.policy_config_keys import POKER_ARCH1_MODEL_CONFIG_KEY
from mprl.rl.envs.opnspl.measure_exploitability_eval_callback import measure_exploitability_nonlstm
from mprl.utility_services.cloud_storage import maybe_download_object, connect_storage_client, BUCKET_NAME
from mprl.rl.sac.sac_policy import SACDiscreteTFPolicy
from mprl.rl.common.stratego_preprocessor import STRATEGO_PREPROCESSOR, StrategoDictFlatteningPreprocessor
from mprl.rl.envs.opnspl.poker_multiagent_env import POKER_ENV, KUHN_POKER, LEDUC_POKER, PARTIALLY_OBSERVABLE, PokerMultiAgentEnv
from mprl.rl.common.sac_stratego_model import SAC_STRATEGO_MODEL
from mprl.scripts.poker_parallel_algos.utils.metanash import get_fp_metanash_for_payoff_table
from mprl.utility_services.payoff_table import PayoffTable
tf = try_import_tf()
POLICY_CLASS = SACDiscreteTFPolicy
POLICY_CLASS_NAME = SACDiscreteTFPolicy.__name__
MODEL_CONFIG_KEY = POKER_ARCH1_MODEL_CONFIG_KEY
def get_stats_for_single_payoff_table(payoff_table_key, experiment_name, poker_game_version, model_config_key):
POKER_ENV_CONFIG = {
'version': poker_game_version,
}
storage_client = connect_storage_client()
# If you use ray for more than just this single example fn, you'll need to move ray.init to the top of your main()
ray.init(address=os.getenv('RAY_HEAD_NODE'), ignore_reinit_error=True, local_mode=True)
model_config_file_path, _ = maybe_download_object(storage_client=storage_client,
bucket_name=BUCKET_NAME,
object_name=model_config_key,
force_download=False)
with open(model_config_file_path, 'r') as config_file:
model_config = json.load(fp=config_file)
example_env = PokerMultiAgentEnv(env_config=POKER_ENV_CONFIG)
obs_space = example_env.observation_space
act_space = example_env.action_space
preprocessor = StrategoDictFlatteningPreprocessor(obs_space=obs_space)
graph = tf.Graph()
sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}), graph=graph)
def fetch_logits(policy):
return {
"behaviour_logits": policy.model.last_output(),
}
_policy_cls = POLICY_CLASS.with_updates(
extra_action_fetches_fn=fetch_logits
)
with graph.as_default():
with sess.as_default():
policy = _policy_cls(
obs_space=preprocessor.observation_space,
action_space=act_space,
config=with_common_config({
'model': with_base_config(base_config=MODEL_DEFAULTS, extra_config=model_config),
'env': POKER_ENV,
'env_config': POKER_ENV_CONFIG,
'custom_preprocessor': STRATEGO_PREPROCESSOR}))
def set_policy_weights(weights_key):
weights_file_path, _ = maybe_download_object(storage_client=storage_client,
bucket_name=BUCKET_NAME,
object_name=weights_key,
force_download=False)
policy.load_model_weights(weights_file_path)
payoff_table_local_path, _ = maybe_download_object(storage_client=storage_client,
bucket_name=BUCKET_NAME,
object_name=payoff_table_key,
force_download=False)
payoff_table = PayoffTable.from_dill_file(dill_file_path=payoff_table_local_path)
stats_out = {
'payoff_table_key': [],
'experiment_name': [],
'num_policies': [],
'exploitability': [],
'total_steps': [],
'total_episodes': [],
}
exploitability_per_generation = []
total_steps_per_generation = []
total_episodes_per_generation = []
num_policies_per_generation = []
for i, n_policies in enumerate(range(1,payoff_table.size() + 1)):
metanash_probs = get_fp_metanash_for_payoff_table(payoff_table=payoff_table,
fp_iters=40000,
accepted_opponent_policy_class_names=[POLICY_CLASS_NAME],
accepted_opponent_model_config_keys=[POKER_ENV_CONFIG],
add_payoff_matrix_noise_std_dev=0.000,
mix_with_uniform_dist_coeff=None,
only_first_n_policies=n_policies,
p_or_lower_rounds_to_zero=0.0)
policy_weights_keys = payoff_table.get_ordered_keys_in_payoff_matrix()
policy_dict = {key: prob for key, prob in zip(policy_weights_keys, metanash_probs)}
exploitability_this_gen = measure_exploitability_nonlstm(rllib_policy=policy,
poker_game_version=poker_game_version,
policy_mixture_dict=policy_dict,
set_policy_weights_fn=set_policy_weights)
print(f"{experiment_name}: {n_policies} policies, {exploitability_this_gen} exploitability")
policy_added_this_gen = payoff_table.get_policy_for_index(i)
latest_policy_tags = policy_added_this_gen.tags
steps_prefix = "timesteps: "
latest_policy_steps = int([tag for tag in latest_policy_tags if steps_prefix in tag][0][len(steps_prefix):])
episodes_prefix = "episodes: "
latest_policy_episodes = int([tag for tag in latest_policy_tags if episodes_prefix in tag][0][len(episodes_prefix):])
if i > 0:
total_steps_this_generation = latest_policy_steps + total_steps_per_generation[i-1]
total_episodes_this_generation = latest_policy_episodes + total_episodes_per_generation[i-1]
else:
total_steps_this_generation = latest_policy_steps
total_episodes_this_generation = latest_policy_episodes
exploitability_per_generation.append(exploitability_this_gen)
total_steps_per_generation.append(total_steps_this_generation)
total_episodes_per_generation.append(total_episodes_this_generation)
num_policies_per_generation.append(n_policies)
num_new_entries = len(exploitability_per_generation)
stats_out['payoff_table_key'] = stats_out['payoff_table_key'] + [payoff_table_key] * num_new_entries
stats_out['experiment_name'] = stats_out['experiment_name'] + [experiment_name] * num_new_entries
stats_out['num_policies'] = stats_out['num_policies'] + num_policies_per_generation
stats_out['exploitability'] = stats_out['exploitability'] + exploitability_per_generation
stats_out['total_steps'] = stats_out['total_steps'] + total_steps_per_generation
stats_out['total_episodes'] = stats_out['total_episodes'] + total_episodes_per_generation
return stats_out
def get_exploitability_stats_over_time_for_payoff_tables_all_same_poker_version(
payoff_table_keys, exp_names, poker_game_version, model_config_key):
num_processes = max(multiprocessing.cpu_count()//2, 1)
with multiprocessing.get_context("spawn").Pool(processes=num_processes) as pool:
results = pool.starmap(func=get_stats_for_single_payoff_table,
iterable=zip(payoff_table_keys, exp_names, repeat(poker_game_version), repeat(model_config_key)))
pool.close()
pool.join()
combined_stats = {}
for result in results:
for key, val in result.items():
if key not in combined_stats:
combined_stats[key] = val
else:
combined_stats[key] = [*combined_stats[key], *val]
return pd.DataFrame(combined_stats).drop_duplicates()
| 48.442529
| 129
| 0.664492
|
9140914e9c17cdded39ee39d2ae9df4e327bb35c
| 3,167
|
py
|
Python
|
cachetclient/v1/metric_points.py
|
zipy124/cachet-client
|
b8962ab2e3c188752d5fdf3b532e9f143298b125
|
[
"MIT"
] | null | null | null |
cachetclient/v1/metric_points.py
|
zipy124/cachet-client
|
b8962ab2e3c188752d5fdf3b532e9f143298b125
|
[
"MIT"
] | null | null | null |
cachetclient/v1/metric_points.py
|
zipy124/cachet-client
|
b8962ab2e3c188752d5fdf3b532e9f143298b125
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import Generator
from cachetclient.base import Manager, Resource
from cachetclient import utils
class MetricPoint(Resource):
@property
def id(self) -> int:
"""int: unique id of the metric point"""
return self.get('id')
@property
def metric_id(self) -> int:
"""int: Get or set metic id for this metric point"""
return self.get('metric_id')
@metric_id.setter
def metric_id(self, value: int):
self._data['metric_id'] = value
@property
def value(self) -> float:
"""float: Value to plot on the metric graph"""
return self.get('value')
@value.setter
def value(self, value: float):
self._data['value'] = value
@property
def created_at(self) -> datetime:
"""datetime: When the metric point was created"""
return utils.to_datetime(self.get('created_at'))
@property
def updated_at(self) -> datetime:
"""datetime: Last time the issue was updated"""
return utils.to_datetime(self.get('updated_at'))
@property
def counter(self) -> int:
"""int: Show the actual calculated value"""
return self.get('counter')
@counter.setter
def counter(self, value: float):
self._data['counter'] = value
@property
def calculated_value(self) -> float:
"""float: The calculated value on metric graph"""
return self.get('calculated_value')
@calculated_value.setter
def calculated_value(self, value: float):
self._data['calculated_value'] = value
class MetricPointsManager(Manager):
resource_class = MetricPoint
path = 'metrics/{}/points'
def create(self, *, metric_id: int, value: float) -> MetricPoint:
"""
Create an metric point
Keyword Args:
metric_id (int): The metric to tag with the point
value (fload): Metric point value for graph
Returns:
:py:data:`MetricPoint` instance
"""
return self._create(
self.path.format(metric_id),
{
'value': value
}
)
def count(self, metric_id) -> int:
"""
Count the number of metric points for a metric
Args:
metric_id (int): The metric
Returns:
int: Number of metric points for the metric
"""
return self._count(self.path.format(metric_id))
def list(self, metric_id: int, page: int = 1, per_page: int = 20) -> Generator[MetricPoint, None, None]:
"""
List updates for a metric
Args:
metric_id: The metric id to list updates
Keyword Args:
page (int): The first page to request
per_page (int): Entries per page
Return:
Generator of :py:data:`MetricPoint`
"""
yield from self._list_paginated(self.path.format(metric_id), page=page, per_page=per_page)
def delete(self, metric_id: int, point_id: int) -> None:
"""
Delete a metric point
"""
self._delete(self.path.format(metric_id), point_id)
| 27.301724
| 108
| 0.595832
|
0b0c7c0f52eb5ea8173f4271e90896b1a0dbf5ea
| 6,870
|
py
|
Python
|
tests/unit/test_version.py
|
chalbersma/salt
|
c29514d41c12a79f9b261613cd4c0b722cff3467
|
[
"Apache-2.0"
] | 1
|
2020-01-31T14:43:07.000Z
|
2020-01-31T14:43:07.000Z
|
tests/unit/test_version.py
|
chalbersma/salt
|
c29514d41c12a79f9b261613cd4c0b722cff3467
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_version.py
|
chalbersma/salt
|
c29514d41c12a79f9b261613cd4c0b722cff3467
|
[
"Apache-2.0"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
tests.unit.version_test
~~~~~~~~~~~~~~~~~~~~~~~
Test salt's regex git describe version parsing
'''
# Import python libs
from __future__ import absolute_import
import re
# Import Salt Testing libs
from tests.support.unit import TestCase
from tests.support.mock import MagicMock, patch
# Import Salt libs
from salt.version import SaltStackVersion, versions_report
import salt.version
class VersionTestCase(TestCase):
def test_version_parsing(self):
strip_initial_non_numbers_regex = re.compile(r'(?:[^\d]+)?(?P<vs>.*)')
expect = (
('v0.12.0-19-g767d4f9', (0, 12, 0, 0, '', 0, 19, 'g767d4f9'), None),
('v0.12.0-85-g2880105', (0, 12, 0, 0, '', 0, 85, 'g2880105'), None),
('debian/0.11.1+ds-1-3-ga0afcbd',
(0, 11, 1, 0, '', 0, 3, 'ga0afcbd'), '0.11.1-3-ga0afcbd'),
('0.12.1', (0, 12, 1, 0, '', 0, 0, None), None),
('0.12.1', (0, 12, 1, 0, '', 0, 0, None), None),
('0.17.0rc1', (0, 17, 0, 0, 'rc', 1, 0, None), None),
('v0.17.0rc1-1-g52ebdfd', (0, 17, 0, 0, 'rc', 1, 1, 'g52ebdfd'), None),
('v2014.1.4.1', (2014, 1, 4, 1, '', 0, 0, None), None),
('v2014.1.4.1rc3-n/a-abcdefff', (2014, 1, 4, 1, 'rc', 3, -1, 'abcdefff'), None),
('v3.4.1.1', (3, 4, 1, 1, '', 0, 0, None), None),
('v3000', (3000, None, None, 0, '', 0, 0, None), '3000'),
('v3000rc1', (3000, None, None, 0, 'rc', 1, 0, None), '3000rc1'),
)
for vstr, full_info, version in expect:
saltstack_version = SaltStackVersion.parse(vstr)
self.assertEqual(
saltstack_version.full_info, full_info
)
if version is None:
version = strip_initial_non_numbers_regex.search(vstr).group('vs')
self.assertEqual(saltstack_version.string, version)
def test_version_comparison(self):
examples = (
('debian/0.11.1+ds-1-3-ga0afcbd', '0.11.1+ds-2'),
('v0.12.0-85-g2880105', 'v0.12.0-19-g767d4f9'),
('v0.17.0rc1-1-g52ebdfd', '0.17.0rc1'),
('v0.17.0', 'v0.17.0rc1'),
('Hydrogen', '0.17.0'),
('Helium', 'Hydrogen'),
('v2014.1.4.1-n/a-abcdefff', 'v2014.1.4.1rc3-n/a-abcdefff'),
('v2014.1.4.1-1-abcdefff', 'v2014.1.4.1-n/a-abcdefff'),
('v2016.12.0rc1', 'v2016.12.0b1'),
('v2016.12.0beta1', 'v2016.12.0alpha1'),
('v2016.12.0alpha1', 'v2016.12.0alpha0'),
('v3000.1', 'v3000'),
('v3000rc2', 'v3000rc1'),
('v3001', 'v3000'),
('v4023rc1', 'v4022rc1'),
('v3000', 'v3000rc1'),
('v3000', 'v2019.2.1'),
('v3000.1', 'v2019.2.1'),
# we created v3000.0rc1 tag on repo
# but we should not be using this
# version scheme in the future
# but still adding test for it
('v3000', 'v3000.0rc1'),
)
for higher_version, lower_version in examples:
self.assertTrue(SaltStackVersion.parse(higher_version) > lower_version)
self.assertTrue(SaltStackVersion.parse(lower_version) < higher_version)
assert SaltStackVersion.parse(lower_version) != higher_version
def test_unparsable_version(self):
with self.assertRaises(ValueError):
SaltStackVersion.from_name('Drunk')
with self.assertRaises(ValueError):
SaltStackVersion.parse('Drunk')
def test_sha(self):
'''
test matching sha's
'''
exp_ret = (
('d6cd1e2bd19e03a81132a23b2025920577f84e37', True),
('2880105', True),
('v3000.0.1', False),
('v0.12.0-85-g2880105', False)
)
for commit, exp in exp_ret:
ret = SaltStackVersion.git_sha_regex.match(commit)
if exp:
assert ret
else:
assert not ret
def test_version_report_lines(self):
'''
Validate padding in versions report is correct
'''
# Get a set of all version report name lenghts including padding
line_lengths = set([
len(line.split(':')[0]) for line in list(versions_report())[4:]
if line != ' ' and line != 'System Versions:'
])
# Check that they are all the same size (only one element in the set)
assert len(line_lengths) == 1
def test_string_new_version(self):
'''
Validate string property method
using new versioning scheme
'''
maj_ver = '3000'
ver = SaltStackVersion(major=maj_ver)
assert not ver.minor
assert not ver.bugfix
assert maj_ver == ver.string
def test_string_new_version_minor(self):
'''
Validate string property method
using new versioning scheme alongside
minor version
'''
maj_ver = 3000
min_ver = 1
ver = SaltStackVersion(major=maj_ver, minor=min_ver)
assert ver.minor == min_ver
assert not ver.bugfix
assert ver.string == '{0}.{1}'.format(maj_ver, min_ver)
def test_string_old_version(self):
'''
Validate string property method
using old versioning scheme alongside
minor version
'''
maj_ver = '2019'
min_ver = '2'
ver = SaltStackVersion(major=maj_ver, minor=min_ver)
assert ver.bugfix == 0
assert ver.string == '{0}.{1}.0'.format(maj_ver, min_ver)
def test_discover_version(self):
'''
Test call to __discover_version
when using different versions
'''
version = {('3000', None):
{(b'v3000.0rc2-12-g44fe283a77\n', '3000rc2-12-g44fe283a77'),
(b'v3000', '3000'),
(b'1234567', '3000-n/a-1234567'), },
(2019, 2):
{(b'v2019.2.0rc2-12-g44fe283a77\n', '2019.2.0rc2-12-g44fe283a77'),
(b'v2019.2.0', '2019.2.0'),
(b'afc9830198dj', '2019.2.0-n/a-afc9830198dj'), },
}
for maj_min, test_v in version.items():
for tag_ver, exp in version[maj_min]:
salt_ver = SaltStackVersion(major=maj_min[0], minor=maj_min[1], bugfix=None)
attrs = {'communicate.return_value': (tag_ver, b''),
'returncode.return_value': 0}
proc_ret = MagicMock(**attrs)
proc_mock = patch('subprocess.Popen', return_value=proc_ret)
patch_os = patch('os.path.exists', return_value=True)
with proc_mock, patch_os:
ret = getattr(salt.version, '__discover_version')(salt_ver)
assert ret == exp
| 37.540984
| 92
| 0.542649
|
2d033305c49366be8fb3f73d6b469375f8089d8c
| 24,810
|
py
|
Python
|
tools/nntool/graph/types/others.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/graph/types/others.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/graph/types/others.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import math
import numpy as np
from expressions.symbolic.basic import (Abs, Ceil, Cos, Exp, Log, Max, Min,
Neg, Pow, Round, RSqrt, Sin, Sqrt)
from graph.dim import Dim
from utils.real_transpose import real_transpose
from .base import (CanFuseToExpression, ComparableParameters,
InsensitiveToQuantization, NNNodeRef,
NoSizeChangeParameters, Parameters, SensitiveToOrder,
SingleInputAndOutput, cls_op_name, expression_op, nargs,
not_generated)
LOG = logging.getLogger("nntool." + __name__)
@cls_op_name('transpose')
class TransposeParameters(Parameters, SingleInputAndOutput, InsensitiveToQuantization, ComparableParameters):
def __init__(self, *args, transpose=None, block_search_up=False, block_search_down=False, **kwargs):
super(TransposeParameters, self).__init__(*args, **kwargs)
self._transpose = tuple(transpose)
self.block_search_up = block_search_up
self.block_search_down = block_search_down
@property
def transpose(self):
return self._transpose
@transpose.setter
def transpose(self, val):
self._transpose = val
@property
def graph_anon_label(self):
return ['Transpose', f'{self.transpose}']
@property
def graph_label(self):
return [f'{self.CLS_OP_NAME}({self.name})', f'{self.transpose}']
def get_parameter_size(self):
return 0
def permute(self, val):
return [val[i] for i in self.transpose]
def does_nothing(self):
if not self.transpose:
return True
if not self.in_dims or not self.in_dims[0]:
return False
shape = self.in_dims[0].shape
trans = self.transpose
shape_idx = [idx if dim > 1 else None for idx, dim in enumerate(shape)]
shape_trans = [shape_idx[idx]
for idx in trans if shape_idx[idx] is not None]
return shape_trans == sorted(shape_trans)
@property
def is_not_generated(self):
return self.does_nothing()
def is_same_operation_as(self, G, other):
if not isinstance(other, TransposeParameters):
return False
if self.transpose is None:
return other.transpose is None
if other.transpose is None:
return self.transpose is None
return tuple(self.transpose) == tuple(other.transpose)
@property
def can_equalize(self):
return False
def real_shape(self):
return real_transpose(self.in_dims[0].shape, self.transpose)
@property
def transpose_dimension(self):
if self._transpose is None:
return 1
return len(self.transpose)
def get_output_size(self, in_dims):
out_dim = in_dims[0].clone()
if self.transpose:
out_dim = out_dim.transpose(self.transpose)
return [out_dim]
def __str__(self):
return "T {} {}".format(
self.transpose and ','.join(
[str(i) for i in self.transpose]) or "None",
self.at_options
)
@cls_op_name('copy')
class CopyParameters(Parameters, InsensitiveToQuantization):
def __init__(self, *args, **kwargs):
super(CopyParameters, self).__init__(*args, **kwargs)
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def get_output_size(self, in_dims):
return [in_dims[0].clone()]
def __str__(self):
return ""
@cls_op_name('expand')
class ExpandParameters(Parameters, InsensitiveToQuantization):
def __init__(self, *args, shape=None, **kwargs):
super(ExpandParameters, self).__init__(*args, **kwargs)
self.shape = shape
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def get_output_size(self, in_dims):
in_shape = list(in_dims[0].shape)
exp_shape = list(self.shape)
if len(in_shape) > len(exp_shape):
exp_shape = in_shape[:(
len(in_shape) - len(exp_shape)):] + exp_shape
elif len(exp_shape) > len(in_shape):
in_shape = exp_shape[:(len(exp_shape) - len(in_shape)):] + in_shape
out_shape = []
for exp_dim, in_dim in zip(exp_shape, in_shape):
if in_dim == 1:
out_shape.append(exp_dim)
elif exp_dim == 1:
out_shape.append(in_dim)
elif in_dim != exp_dim:
raise ValueError(
f'{self.name} invalid expand {in_dims[0]} {self.shape}')
else:
out_shape.append(in_dim)
return [Dim.unnamed(out_shape)]
def __str__(self):
return f"{self.shape}"
@cls_op_name('quantize')
class QuantizeParameters(Parameters):
def __init__(self, *args, from_qtype=None, to_qtype=None,
inserted_by_quantizer=False, **kwargs):
super(QuantizeParameters, self).__init__(*args, **kwargs)
self.from_qtype = from_qtype
self.to_qtype = to_qtype
self.inserted_by_quantizer = inserted_by_quantizer
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def get_output_size(self, in_dims):
return [in_dims[0].clone()]
def __str__(self):
return f"{self.from_qtype} --> {self.to_qtype}"
@cls_op_name('reverse')
class ReverseParameters(Parameters, InsensitiveToQuantization):
def __init__(self, *args, axis=0, **kwargs):
super(ReverseParameters, self).__init__(*args, **kwargs)
self.axis = axis
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def get_output_size(self, in_dims):
return [in_dims[0].clone()]
def __str__(self):
return "A {}".format(self.axis)
@cls_op_name('concat')
@nargs({'*'})
@not_generated
class ConcatParameters(Parameters, SensitiveToOrder):
def __init__(self, *args, axis=None, axis_hint=None, **kwargs):
super(ConcatParameters, self).__init__(*args, **kwargs)
self._axis = axis
self._axis_hint = axis_hint
@property
def graph_label(self):
return [self.name, f'Axis {self.axis}']
@property
def graph_anon_label(self):
return ['Concat', f'Axis {self.axis}']
@property
def axis(self):
return self._axis
@axis.setter
def axis(self, val):
self._axis = val
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def get_output_size(self, in_dims):
if in_dims[0].is_named and self._axis_hint:
self._axis = in_dims[0].get_order_idx(self._axis_hint)
out_dim = Dim.combine([in_dim for in_dim in in_dims], self.axis)
return [out_dim]
def __str__(self):
return "A {} {}".format(
self.axis,
self.at_options
)
@cls_op_name('split')
@not_generated
class SplitParameters(Parameters, SensitiveToOrder):
def __init__(self, *args,
act_slices=None,
out_shapes=None,
axis=None,
**kwargs):
super(SplitParameters, self).__init__(*args, **kwargs)
self.act_slices = act_slices
self.out_shapes = out_shapes
self.axis = axis
def __call__(self, *args, **kwargs):
noderef = super(SplitParameters, self).__call__(*args, **kwargs)
return tuple(NNNodeRef(self, i, noderef.ref[1]) for i in range(len(self.act_slices)))
@property
def graph_label(self):
return [self.name, f'Axis {self.axis}']
@property
def graph_anon_label(self):
return ['Split', f'Axis {self.axis}']
def numpy_split(self, arr: np.ndarray):
slice_specs = [tuple([slice(elem[0], elem[1], elem[2])
for elem in act_slice])
for act_slice in self.act_slices]
return [arr[spec] for spec in slice_specs]
@staticmethod
def get_splits(in_shape, axis, splits=None, num_splits=None):
assert splits or num_splits, "no split parameters provided"
assert in_shape[axis] is not None, "split on undefined axis"
in_idx = 0
act_slices = []
out_shapes = []
if splits:
if in_shape[axis] is not None and any(split == -1 for split in splits):
rest_sz = sum(split for split in splits if split > 0)
splits = (split if split > 0 else in_shape[axis] - rest_sz for split in splits)
for sz in splits:
act_slices.append([(in_idx, in_idx + sz, 1) if idx == axis else (0, shape, 1)
for idx, shape in enumerate(in_shape)
if shape is not None])
out_shapes.append([sz if shape is not None and idx == axis else shape
for idx, shape in enumerate(in_shape)])
in_idx += sz
elif num_splits:
assert in_shape[axis] % num_splits == 0, "dimension of split is not divisible by number of splits"
sz = in_shape[axis] // num_splits
while in_idx < in_shape[axis]:
act_slices.append([(in_idx, in_idx + sz, 1) if idx == axis else (0, shape, 1)
for idx, shape in enumerate(in_shape)
if shape is not None])
out_shapes.append([sz if shape is not None and idx == axis else shape
for idx, shape in enumerate(in_shape)])
in_idx += sz
count_nones = sum(1 if dim is None else 0 for dim in in_shape[:axis:])
axis -= count_nones
return act_slices, out_shapes, axis
@property
def num_splits(self):
return len(self.act_slices)
def transpose_params(self, order):
self.act_slices = [
[act_slice[idx] for idx in order] for act_slice in self.act_slices
]
self.out_shapes = [
[shape[idx] for idx in order] for shape in self.out_shapes
]
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
out_size = [Dim.unnamed(shape) for shape in self.out_shapes]
return out_size
@property
def can_equalize(self):
return False
def __str__(self):
return "A {} {}".format(
self.axis,
self.at_options
)
@cls_op_name('gather')
class GatherParameters(Parameters, SingleInputAndOutput, SensitiveToOrder, InsensitiveToQuantization):
def __init__(self, *args,
axis=None,
indices=None,
**kwargs):
super(GatherParameters, self).__init__(*args, **kwargs)
self.axis = axis
self.indices = np.array(indices)
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
in_dim = in_dims[0]
new_shape = in_dim.shape[:self.axis:] + \
list(self.indices.shape) + in_dim.shape[self.axis + 1:]
return [Dim.unnamed(new_shape)]
@property
def rank(self):
return len(self.in_dims[0].shape) + len(self.indices.shape) - 1
@property
def can_equalize(self):
return False
def __str__(self):
return "A %s I %s" % (self.axis, self.indices)
@cls_op_name('strided_slice')
class StridedSliceParameters(Parameters, SingleInputAndOutput, ComparableParameters, InsensitiveToQuantization):
def __init__(self, *args,
act_slice=None,
out_shape=None,
**kwargs):
super(StridedSliceParameters, self).__init__(*args, **kwargs)
self.act_slice = act_slice
self.slice_shape = tuple(int(abs(math.ceil((sl[1] - sl[0])/sl[2]))) for sl in self.act_slice)
self.out_shape = tuple(out_shape)
@property
def graph_label(self):
return [self.name] + ["(%s,%s,%s)" % elem for elem in self.act_slice]
@property
def graph_anon_label(self):
return ['Slice'] + ["(%s,%s,%s)" % elem for elem in self.act_slice]
def numpy_slice(self, arr: np.ndarray):
slice_spec = [slice(elem[0], elem[1], elem[2])
for elem in self.act_slice if len(elem) == 3]
return arr[tuple(slice_spec)].reshape(self.out_shape)
def only_slices_axis(self, axis):
"""check if there is a slice on only one axis"""
in_shape = self.in_dims[0].shape
return all(sl[0] == 0 and sl[1] == in_shape[idx] and sl[2] == 1
for idx, sl in enumerate(self.act_slice) if idx != axis)
def is_unit_slice(self, axis):
"""check if the slice on one axis returns shape of 1"""
slce = self.act_slice[axis]
if slce[1] > slce[0]:
return slce[1] - slce[0] == 1 and slce[2] == 1
else:
return slce[0] - slce[1] == 2 and slce[2] == -1
def is_same_operation_as(self, G, other):
if not isinstance(other, StridedSliceParameters):
return False
if tuple(self.out_shape) != tuple(other.out_shape):
return False
if len(self.act_slice) != len(other.act_slice):
return False
return all(tuple(elem) == tuple(oelem) for elem, oelem in zip(self.act_slice, other.act_slice))
def only_slices(self, axis):
return all(dim == self.act_slice[idx][1] and self.act_slice[idx][0] == 0 and self.act_slice[idx][2] == 1
for idx, dim in enumerate(self.in_dims[0].shape) if axis != idx)
@property
def post_slice_shape(self):
return [(sl[1] - sl[0])//sl[2] for sl in self.act_slice]
@property
def changes_shape(self):
return len(self.post_slice_shape) > len(self.out_shape)
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
return [Dim.unnamed(self.out_shape)]
@property
def can_equalize(self):
return False
def __str__(self):
return ",".join("(%s,%s,%s)" % elem for elem in self.act_slice)
@cls_op_name('pad')
class PadParameters(Parameters, SingleInputAndOutput):
def __init__(self, name, padding=None, pad_vals=None, in_dims_hint=None, out_dims_hint=None):
super(PadParameters, self).__init__(name,
in_dims_hint=in_dims_hint,
out_dims_hint=out_dims_hint)
self.padding = padding
self.pad_vals = pad_vals
@property
def graph_label(self):
return [self.name, f'Pad {self.padding}']
@property
def graph_anon_label(self):
return ['Pad', f'{self.padding}']
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
out_dim = in_dims[0].clone()
for idx, vals in enumerate(self.padding):
out_dim[idx] += sum(vals)
return [out_dim]
@property
def can_equalize(self):
return True
def __str__(self):
return "PAD {}".format(self.padding)
@nargs({2})
class BinaryOpParameters(CanFuseToExpression, Parameters):
def __new__(cls, *args, op_type="maximum", **kwargs):
if cls is BinaryOpParameters:
for subcls in BinaryOpParameters.__subclasses__():
if op_type == subcls.CLS_OP_NAME:
return super(BinaryOpParameters, cls).__new__(subcls)
raise ValueError(f'binary op {op_type} not found')
return super(BinaryOpParameters, cls).__new__(cls, **kwargs)
def __init__(self, *args, op_type="maximum", **kwargs):
super(BinaryOpParameters, self).__init__(*args, **kwargs)
self._op_type = op_type
@property
def op_type(self):
return self._op_type
def get_output_size(self, in_dims):
assert len(in_dims) == 2
out_dim = in_dims[0].clone()
return [out_dim]
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def __str__(self):
return "{} {}".format(
self._op_type,
self.at_options
)
@cls_op_name('maximum')
@expression_op(Max)
class MaxOpParameters(BinaryOpParameters, InsensitiveToQuantization):
pass
@cls_op_name('minimum')
@expression_op(Min)
class MinOpParameters(BinaryOpParameters, InsensitiveToQuantization):
pass
@cls_op_name('pow')
@expression_op(Pow)
class PowOpParameters(BinaryOpParameters):
pass
class UnaryOpParameters(CanFuseToExpression, Parameters):
def __new__(cls, *args, op_type="sqrt", **kwargs):
if cls == UnaryOpParameters:
for subcls in UnaryOpParameters.__subclasses__():
if op_type == subcls.CLS_OP_NAME:
return super(UnaryOpParameters, cls).__new__(subcls)
raise ValueError(f'unary op {op_type} not found')
return super(UnaryOpParameters, cls).__new__(cls)
def __init__(self, *args, op_type=None, **kwargs):
super(UnaryOpParameters, self).__init__(*args, **kwargs)
self._op_type = op_type
@property
def op_type(self):
return self._op_type
def get_output_size(self, in_dims):
assert len(in_dims) == 1
out_dim = in_dims[0].clone()
return [out_dim]
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def __str__(self):
return "{} {}".format(
self._op_type,
self.at_options
)
@cls_op_name('round')
@expression_op(Round)
class RoundOpParameters(UnaryOpParameters):
pass
@cls_op_name('ceil')
@expression_op(Ceil)
class CeilOpParameters(UnaryOpParameters):
pass
@cls_op_name('sqrt')
@expression_op(Sqrt)
class SqrtOpParameters(UnaryOpParameters):
pass
@cls_op_name('rsqrt')
@expression_op(RSqrt)
class RSqrtOpParameters(UnaryOpParameters):
pass
@cls_op_name('exp')
@expression_op(Exp)
class ExpOpParameters(UnaryOpParameters):
pass
@cls_op_name('log')
@expression_op(Log)
class LogOpParameters(UnaryOpParameters):
pass
@cls_op_name('sin')
@expression_op(Sin)
class SinOpParameters(UnaryOpParameters):
pass
@cls_op_name('cos')
@expression_op(Cos)
class CosOpParameters(UnaryOpParameters):
pass
@cls_op_name('abs')
@expression_op(Abs)
class AbsOpParameters(UnaryOpParameters, InsensitiveToQuantization):
pass
@cls_op_name('neg')
@expression_op(Neg)
class NegOpParameters(UnaryOpParameters, InsensitiveToQuantization):
pass
@cls_op_name('reshape')
@not_generated
class ReshapeParameters(Parameters, SingleInputAndOutput, InsensitiveToQuantization, ComparableParameters):
def __init__(self, *args, old_shape=None, shape=None, **kwargs):
super(ReshapeParameters, self).__init__(
*args, **kwargs)
if not isinstance(shape, Dim):
shape = Dim.unnamed(shape)
if old_shape is not None and not isinstance(old_shape, Dim):
old_shape = Dim.unnamed(old_shape)
assert shape.is_ordered and (old_shape is None or old_shape.is_ordered)
self._shape = shape
self._old_shape = old_shape
@property
def graph_label(self):
return [self.name, f'{self.old_shape} to {self.shape}']
@property
def graph_anon_label(self):
return ['Reshape', f'{self.old_shape} to {self.shape}']
def does_nothing(self):
return self.shape.layout_shape == self.old_shape.layout_shape
def get_parameter_size(self):
return 0
def exp_red_pattern(self):
""" If the reshape is an expand or reduce dim i.e. adds or removes 1 size axes then
return a pattern with True indicating an added axis, False a removed axis and None
an unchanged axis"""
if not self.does_nothing():
return None
res = []
s1 = self._old_shape.shape.copy()
s2 = self._shape.shape.copy()
while s1 and s2:
if not s1:
top = s2.pop(0)
assert top == 1
res.append(True)
elif not s2:
top = s1.pop(0)
assert top == 1
res.append(False)
else:
if s1[0] == s2[0]:
s1.pop(0)
s2.pop(0)
res.append(None)
elif s1[0] == 1:
s1.pop(0)
res.append(False)
elif s2[0] == 1:
s2.pop(0)
res.append(True)
else:
raise ValueError('shape issue in exp_red_pattern')
return res
def is_same_operation_as(self, G, other):
if not isinstance(other, ReshapeParameters):
return False
if tuple(self.old_shape.shape) != tuple(other.old_shape.shape):
return False
if tuple(self.shape.shape) != tuple(other.shape.shape):
return False
return True
def get_output_size(self, in_dims):
assert len(in_dims) == 1
in_dim = in_dims[0]
self.old_shape = in_dim
if in_dim.size() != self.shape.size():
raise NotImplementedError("bad reshape %s: in dim %s does not match reshape %s" %
(self.name, in_dim, self.shape))
out = self.shape.clone()
return [out]
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val):
assert val.is_ordered
self._shape = val
@property
def old_shape(self):
return self._old_shape
@old_shape.setter
def old_shape(self, val):
assert val.is_ordered
self._old_shape = val
@property
def can_equalize(self):
return False
def __str__(self):
return f"{self.old_shape}->{self.shape}"
# pylint: disable=abstract-method
@cls_op_name('noop')
class NoOPParameters(NoSizeChangeParameters, SingleInputAndOutput, InsensitiveToQuantization):
def __init__(self, name, desc=""):
super(NoOPParameters, self).__init__(name)
self._desc = desc
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def compute_load(self):
return 0
def __str__(self):
return "NOOP {}".format(
self._desc
)
class UnexecutableOpParameters(Parameters):
pass
@cls_op_name('UNSUPPORTED')
class UnconvertedOpParameters(UnexecutableOpParameters):
def __init__(self, name, indicated_op_name=None, expected_inputs=None,
indicated_outputs=None, info=None, **kwargs):
super(UnconvertedOpParameters, self).__init__(name, **kwargs)
self.info = info
self.expected_inputs = expected_inputs
self.indicated_outputs = indicated_outputs
self.indicated_op_name = indicated_op_name
def get_output_size(self, in_dims):
if self.indicated_outputs:
return self.indicated_outputs
if len(in_dims) == 1:
return [in_dims[0]]
return [Dim.unknown()]
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def __str__(self):
return "UNSUPPORTED OP: %s" % self.indicated_op_name
@cls_op_name('UNKNOWN')
class UnknownOpParameters(UnexecutableOpParameters):
def __init__(self, name, info):
super(UnknownOpParameters, self).__init__(name)
self.info = info
def get_output_size(self, in_dims):
if len(in_dims) == 1:
return [in_dims[0]]
return [Dim.unknown()]
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def __str__(self):
return "Unknown"
| 29.291617
| 112
| 0.614349
|
2d97f1f1fcd58b5351ccdd24f3a5646c518cd8b1
| 2,513
|
py
|
Python
|
tests/components/sense/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/sense/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/sense/test_config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test the Sense config flow."""
from unittest.mock import patch
from sense_energy import SenseAPITimeoutException, SenseAuthenticationException
from openpeerpower import config_entries, setup
from openpeerpower.components.sense.const import DOMAIN
async def test_form(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("sense_energy.ASyncSenseable.authenticate", return_value=True,), patch(
"openpeerpower.components.sense.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{"timeout": "6", "email": "test-email", "password": "test-password"},
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "test-email"
assert result2["data"] == {
"timeout": 6,
"email": "test-email",
"password": "test-password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(opp):
"""Test we handle invalid auth."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"sense_energy.ASyncSenseable.authenticate",
side_effect=SenseAuthenticationException,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{"timeout": "6", "email": "test-email", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(opp):
"""Test we handle cannot connect error."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"sense_energy.ASyncSenseable.authenticate",
side_effect=SenseAPITimeoutException,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{"timeout": "6", "email": "test-email", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
| 33.506667
| 86
| 0.653402
|
6eddcecb8718a60b01d1593b405ae518b2f770d9
| 696
|
py
|
Python
|
src/apim/apim_open_admin_ui.py
|
paul-mateos/azure-apim-deployment-utils
|
969fedfa5b5c910ffd3aa04ede265ab18c1844a3
|
[
"Apache-2.0"
] | 14
|
2016-03-16T16:46:46.000Z
|
2022-03-26T02:28:26.000Z
|
src/apim/apim_open_admin_ui.py
|
paul-mateos/azure-apim-deployment-utils
|
969fedfa5b5c910ffd3aa04ede265ab18c1844a3
|
[
"Apache-2.0"
] | 2
|
2016-04-10T08:28:26.000Z
|
2018-01-30T07:58:46.000Z
|
src/apim/apim_open_admin_ui.py
|
paul-mateos/azure-apim-deployment-utils
|
969fedfa5b5c910ffd3aa04ede265ab18c1844a3
|
[
"Apache-2.0"
] | 8
|
2016-06-24T23:11:33.000Z
|
2021-08-13T13:36:45.000Z
|
import os
import sys
import webbrowser
import token_factory
def open_admin_ui(base_dir, instance):
instances_json = os.path.join(base_dir, 'instances.json')
tf = token_factory.create_token_factory_from_file(instances_json)
sso_url = tf.get_admin_sso_link(instance)
webbrowser.open(sso_url)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:"
print " python apim_open_admin_ui.py <config dir> <instance>"
print ""
print " If <instance> is not supplied, 'apim' is assumed."
sys.exit(1)
instance = 'apim'
if len(sys.argv) >= 3:
instance = sys.argv[2]
open_admin_ui(sys.argv[1], instance)
| 26.769231
| 70
| 0.656609
|
5058ad89feddfec41dba023d614f91fa1391a52f
| 516
|
py
|
Python
|
settings/check_python_version.py
|
JagritiG/data-middleware
|
e51cedf173e487d270f42c993e5e7f79f85bd263
|
[
"MIT"
] | null | null | null |
settings/check_python_version.py
|
JagritiG/data-middleware
|
e51cedf173e487d270f42c993e5e7f79f85bd263
|
[
"MIT"
] | null | null | null |
settings/check_python_version.py
|
JagritiG/data-middleware
|
e51cedf173e487d270f42c993e5e7f79f85bd263
|
[
"MIT"
] | null | null | null |
# Python version compatibility check
import sys
try:
if not sys.version_info.major == 3 and sys.version_info.minor >= 7:
print("Python 3.7 or higher is required.")
print("You are using Python {}.{}.".format(sys.version_info.major, sys.version_info.minor))
else:
print("You are using Python {}.{}.".format(sys.version_info.major, sys.version_info.minor))
print("Required python version is already installed")
except Exception as e:
print(e)
finally:
sys.exit(1)
| 24.571429
| 99
| 0.672481
|
4e0dce19093ebaa8f6fc14f593cf85dffe811ad8
| 13
|
py
|
Python
|
i2i/__init__.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | 1
|
2019-08-29T01:35:12.000Z
|
2019-08-29T01:35:12.000Z
|
i2i/__init__.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | null | null | null |
i2i/__init__.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | null | null | null |
name = "i2i"
| 6.5
| 12
| 0.538462
|
b471b135365e4e5bb48ae74b62641731e0768f68
| 355
|
py
|
Python
|
app/recipe/urls.py
|
ricardolira/recipe-app-api
|
749c39af92383ecfa208636fc2b09deaaefeeb2a
|
[
"MIT"
] | null | null | null |
app/recipe/urls.py
|
ricardolira/recipe-app-api
|
749c39af92383ecfa208636fc2b09deaaefeeb2a
|
[
"MIT"
] | null | null | null |
app/recipe/urls.py
|
ricardolira/recipe-app-api
|
749c39af92383ecfa208636fc2b09deaaefeeb2a
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipes', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| 22.1875
| 55
| 0.771831
|
f3184081495a358988f9bc219ec1e786fbce9e0e
| 819
|
py
|
Python
|
Samples/BasicDrawing.py
|
zaml/ComputerVision
|
01afc8ee8c11c472a8251eabd3721d180223c2e8
|
[
"MIT"
] | null | null | null |
Samples/BasicDrawing.py
|
zaml/ComputerVision
|
01afc8ee8c11c472a8251eabd3721d180223c2e8
|
[
"MIT"
] | null | null | null |
Samples/BasicDrawing.py
|
zaml/ComputerVision
|
01afc8ee8c11c472a8251eabd3721d180223c2e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 11:25:22 2018
@author: juanzamora
"""
# -*- coding: utf-8 -*-
import numpy as np
import cv2
# Create a black image
img = np.zeros((512,512,3), np.uint8)
# Draw a diagonal blue line with thickness of 5 px
cv2.line(img,(0,0),(511,511),(255,0,0),5)
# Draw rectangle
cv2.rectangle(img,(384,0),(510,128),(0,255,0),3)
# Draw Circle
cv2.circle(img,(447,63), 63, (0,0,255), -1)
# Draw Ellipse
cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
# Draw Polygon
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
# Adding Text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
cv2.imshow("foo2",img)
cv2.waitKey()
| 20.475
| 71
| 0.647131
|
ccaa4949470ab15aae865a1d20f7487a5cc0ee0c
| 6,623
|
py
|
Python
|
robotframework_pykafka/kafka_helper.py
|
invadergir/robotframework-pykafka
|
0d290d0883c468785ca38fb9aae62cc13c3a04ae
|
[
"Apache-2.0"
] | 9
|
2018-08-30T00:08:51.000Z
|
2022-01-17T00:50:56.000Z
|
robotframework_pykafka/kafka_helper.py
|
invadergir/robotframework-pykafka
|
0d290d0883c468785ca38fb9aae62cc13c3a04ae
|
[
"Apache-2.0"
] | 2
|
2018-10-10T20:54:26.000Z
|
2018-10-10T20:57:02.000Z
|
robotframework_pykafka/kafka_helper.py
|
invadergir/robotframework-pykafka
|
0d290d0883c468785ca38fb9aae62cc13c3a04ae
|
[
"Apache-2.0"
] | 2
|
2020-02-18T00:09:40.000Z
|
2020-10-13T12:20:43.000Z
|
#!/bin/python3
import json
import os
import time
from pykafka import KafkaClient
from pykafka.common import OffsetType
from utils import *
##################################################
# Kafka helper class.
class kafka_helper:
##################################################
# Constructor. The broker hostname and version are set in this order if
# specified:
# 1. constructor parameters if they are non-None and non-empty
# 2. environment variables KAFKA_HOST and KAFKA_BROKER_VERSION
# 3. default values (localhost and 2.3)
def __init__(self, kafkaBrokerHostname = None, kafkaBrokerVersion = None):
# Determine the kafka host
self._kafkaHost = ""
if kafkaBrokerHostname:
self._kafkaHost = kafkaBrokerHostname
else:
try:
self._kafkaHost = os.environ['KAFKA_HOST']
except KeyError as e:
# Default it to localhost if not specified
self._kafkaHost = "localhost:9092"
except Exception as e:
raise e
# Determine the kafka version to use. Default to 2.3.0 if not specified.
self._kafkaBrokerVersion = ""
if kafkaBrokerVersion:
self._kafkaBrokerVersion = kafkaBrokerVersion
else:
try:
self._kafkaBrokerVersion = os.environ['KAFKA_BROKER_VERSION']
except KeyError as e:
# Default it if not specified
self._kafkaBrokerVersion = "2.3.0"
except Exception as e:
raise e
# Get a kafka client
self._client = KafkaClient(hosts = self._kafkaHost, broker_version = self._kafkaBrokerVersion)
self._producers = dict()
##################################################
# Cache the producers; key is topicName
def _getProducer(self, topicName):
if (topicName in self._producers):
return self._producers[topicName]
else:
topic = self._client.topics[topicName]
prod = topic.get_sync_producer()
self._producers[topicName] = prod
return prod
##################################################
# Cache the consumers, key is (topicName, consumerGroupName)
_consumers = dict()
# If the consumerGroupName is not specified, it defaults to the topicName.
# Specify the consumerGroupName if you want to have more than one consumer of
# the same topic.
def _getConsumer(self, topicName, consumerGroupName = None, setOffsetToEarliest = False):
assert(topicName)
cgn = ""
if consumerGroupName:
cgn = consumerGroupName
else:
cgn = topicName
top = topicName
if ((top, cgn) in self._consumers):
return self._consumers[(top, cgn)]
else:
topic = self._client.topics[top]
offsetType = OffsetType.LATEST
if setOffsetToEarliest:
offsetType = OffsetType.EARLIEST
c = topic.get_simple_consumer(
consumer_group = cgn,
auto_offset_reset = offsetType,
auto_commit_enable = True,
reset_offset_on_start = True,
consumer_timeout_ms = 1000)
self._consumers[(top, cgn)] = c
return c
##################################################
# Produce function
# key may be None, I guess
def produce(self, topicName, key, value):
assert(topicName)
assert(value)
k = toStr(key)
v = toStr(value)
top = toStr(topicName)
producer = self._getProducer(top)
#convert input message values to byte
return producer.produce(bytes(v, 'utf-8'), bytes(k, 'utf-8')) #, datetime.datetime.now()) ### TODO - for certain broker versions (less than 1.0?) you have to provide a time?
##################################################
## TODO - needed?
#def cleanup():
# producer.stop()
##################################################
# Set offset to latest. Deletes and recreates the cached consumer.
def setConsumerOffsetToLatest(self, topicName, consumerGroupName = None):
assert(topicName)
top = toStr(topicName)
cgn = toStr(consumerGroupName)
log("Resetting offset to latest for topic "+top+" and consumer group "+str(cgn))
if (top, cgn) in self._consumers:
del self._consumers[(top, cgn)]
self._getConsumer(top, cgn, setOffsetToEarliest = False)
##################################################
# Set offset to earliest. Deletes and recreates the cached consumer.
def setConsumerOffsetToEarliest(self, topicName, consumerGroupName = None):
assert(topicName)
top = toStr(topicName)
cgn = toStr(consumerGroupName)
log("Resetting offset to earliest for topic "+top+" and consumer group "+str(cgn))
if (top, cgn) in self._consumers:
del self._consumers[(top, cgn)]
self._getConsumer(top, cgn, setOffsetToEarliest = True)
##################################################
# Consume a message from a topic as a unicode string.
# If the consumerGroupName is not specified, it defaults to the topicName.
# Specify the consumerGroupName if you want to have more than one consumer of
# the same topic.
# Returns one message (key-value tuple), or None if no message available.
def consumeString(self, topicName, consumerGroupName = None):
assert(topicName)
top = toStr(topicName)
cgn = toStr(consumerGroupName)
consumer = self._getConsumer(top, cgn)
msg = consumer.consume()
if None == msg:
return None
else:
return (msg.partition_key, msg.value) # , msg.offset)
##################################################
# Consume a message from a topic, converting it to JSON
# For our purposes, "JSON" is defined as a dict that has been
# deserialized from JSON. (Converted from json.loads("{}"))
# Returns a key-value tuple where key is a unicode string and
def consumeJson(self, topicName, consumerGroupName = None):
cm = self.consumeString(topicName, consumerGroupName)
if cm == None:
return None
else:
# Modify the returned message, decode the byte msg and convert string value to a dict:
newValue = json.loads((cm[1].decode("utf-8")))
tup = (cm[0].decode("utf-8"), newValue)
return tup
| 37.418079
| 181
| 0.571493
|
b7be271943f714774b3dc511dc0475a332b9a089
| 16,825
|
py
|
Python
|
tests/test_sbuttons.py
|
eberver/hippy
|
9a6e5f503e9bf4a74ae4b3144dc53eca0e75651f
|
[
"MIT"
] | 3
|
2020-09-17T04:30:54.000Z
|
2020-10-27T17:18:07.000Z
|
tests/test_sbuttons.py
|
eberver/hippy
|
9a6e5f503e9bf4a74ae4b3144dc53eca0e75651f
|
[
"MIT"
] | null | null | null |
tests/test_sbuttons.py
|
eberver/hippy
|
9a6e5f503e9bf4a74ae4b3144dc53eca0e75651f
|
[
"MIT"
] | 3
|
2020-09-16T18:57:19.000Z
|
2022-02-18T07:24:50.000Z
|
# Copyright 2016-2020 HP Development Company, L.P.
# SPDX-License-Identifier: MIT
#
""" Pytests for the Hippy sbuttons.
"""
from __future__ import division, absolute_import, print_function
import random
import threading
import pytest
from hippy import SButtons
from hippy import PySproutError
import check_device_types
import check_system_types
device_name = 'sbuttons'
notifications = []
condition = threading.Condition()
# pylint: disable=redefined-outer-name
@pytest.fixture
def get_buttons(request, index):
"""
A pytest fixture to initialize and return the SButtons object with
the given index.
"""
buttons = SButtons(index)
try:
buttons.open()
except RuntimeError:
pytest.skip("Could not open SButtons connection")
def fin():
buttons.unsubscribe()
buttons.close()
request.addfinalizer(fin)
return buttons
def test_info(get_buttons):
"""
Tests the sbuttons' info method
"""
buttons = get_buttons
info = buttons.info()
check_device_types.check_DeviceInfo(info)
vid_pid = (info['vendor_id'], info['product_id'])
assert vid_pid == check_device_types.Devices.sbuttons.value
serial = info['serial']
assert serial == "Not Available"
def test_open_and_close(get_buttons):
"""
Tests the sbuttons' open, open_count, and close methods.
"""
buttons = get_buttons
connected = buttons.is_device_connected()
assert connected is True
assert buttons.open_count() == 1
count = buttons.close()
assert isinstance(count, int)
assert count == 0
assert buttons.open_count() == 0
with pytest.raises(PySproutError) as execinfo:
# Any call should fail
buttons.led_on_off_rate()
assert execinfo.value.message == 'Device is not open'
count = buttons.open()
assert isinstance(count, int)
assert count == 1
assert buttons.open_count() == 1
# Any call should now work
buttons.led_on_off_rate()
def test_led_state(get_buttons):
"""
Tests the sbuttons' led_state method.
"""
buttons = get_buttons
orig_states = {}
# Store original values
for led in SButtons.ButtonID:
orig_states[led] = buttons.led_state(led)
assert isinstance(orig_states[led]['color'], SButtons.LEDColor)
assert isinstance(orig_states[led]['mode'], SButtons.LEDMode)
assert isinstance(orig_states[led]['color'].value, str)
assert isinstance(orig_states[led]['color'].value, str)
# Test setting each LED to each mode and each color
for led in SButtons.ButtonID:
for mode in SButtons.LEDMode:
for color in SButtons.LEDColor:
state = {'color': color, 'mode': mode}
set_state = buttons.led_state(led, state)
assert set_state == state
assert buttons.led_state(led) == state
# Test setting by the string values rather than the enum
for led in ['left', 'center', 'right']:
for mode in ['breath', 'controlled_off', 'controlled_on', 'off',
'on', 'pulse']:
for color in ['white_orange', 'orange', 'white']:
state = {'color': color, 'mode': mode}
set_state = buttons.led_state(led, state)
assert set_state['color'].value == color
assert set_state['mode'].value == mode
get_state = buttons.led_state(led)
assert get_state['color'].value == color
assert get_state['mode'].value == mode
# Test only passing in the mode or the color
for led in SButtons.ButtonID:
state = buttons.led_state(led)
for mode in SButtons.LEDMode:
set_state = buttons.led_state(led, {'mode': mode})
state['mode'] = mode
assert set_state == state
assert buttons.led_state(led) == state
for color in SButtons.LEDColor:
set_state = buttons.led_state(led, {'color': color})
state['color'] = color
assert set_state == state
assert buttons.led_state(led) == state
# Verify invalid parameters throw the proper errors
with pytest.raises(PySproutError) as execinfo:
buttons.led_state('left', 'bad')
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_state('left', {'fake': SButtons.LEDColor.orange})
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_state('left', {})
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(ValueError):
buttons.led_state(33)
with pytest.raises(ValueError):
buttons.led_state('moo')
with pytest.raises(ValueError):
buttons.led_state('right', {'mode': 'fake_value'})
with pytest.raises(ValueError):
buttons.led_state('right', {'color': 'on'})
# Send a bad value to SoHal (bypassing the hippy enum check) and makes
# sure SoHal throws an error...
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', 33) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', 'moo') # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', ['left', {'fake': 'orange'}]) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', ['left', {'mode': 'orange'}]) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', ['left', {'mode': 2}]) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', ['left', {'color': 12}]) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', ['left', {'color': 'green'}]) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons._send_msg('led_state', ['left', 10]) # pylint: disable=protected-access
assert 'Invalid parameter' in execinfo.value.message
# Reset original values
for led in SButtons.ButtonID:
buttons.led_state(led, {'color': orig_states[led]['color'],
'mode': orig_states[led]['mode']})
# Check that values were reset
for led in SButtons.ButtonID:
state = buttons.led_state(led)
assert state['color'] == orig_states[led]['color']
assert state['mode'] == orig_states[led]['mode']
def test_led_pulse_rate(get_buttons):
"""
Tests the sbuttons' led_pulse_rate method.
"""
buttons = get_buttons
# Store original value
orig_rate = buttons.led_pulse_rate()
assert isinstance(orig_rate, int)
# Valid pulse_rate range is 1 to 20
for rate in range(1, 21):
set_rate = buttons.led_pulse_rate(rate)
assert set_rate == rate
assert buttons.led_pulse_rate() == rate
# Test out of range values
with pytest.raises(PySproutError) as execinfo:
buttons.led_pulse_rate(0)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_pulse_rate(21)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_pulse_rate(random.randint(22, 100))
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_pulse_rate(random.randint(-100, -1))
assert 'Parameter out of range' in execinfo.value.message
# Test invalid parameters
with pytest.raises(PySproutError) as execinfo:
buttons.led_pulse_rate('moo')
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_pulse_rate({})
assert 'Invalid parameter' in execinfo.value.message
# Reset value and confirm
set_rate = buttons.led_pulse_rate(orig_rate)
assert set_rate == orig_rate
assert buttons.led_pulse_rate() == orig_rate
def test_led_on_off_rate(get_buttons):
"""
Tests the sbuttons' led_on_off_rate method.
"""
buttons = get_buttons
# Store original value
orig_rate = buttons.led_on_off_rate()
assert isinstance(orig_rate, int)
# Valid pulse_rate range is 1 to 20
for rate in range(1, 21):
set_rate = buttons.led_on_off_rate(rate)
assert set_rate == rate
assert buttons.led_on_off_rate() == rate
# Test out of range values
with pytest.raises(PySproutError) as execinfo:
buttons.led_on_off_rate(0)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_on_off_rate(21)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_on_off_rate(random.randint(22, 100))
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_on_off_rate(random.randint(-100, -1))
assert 'Parameter out of range' in execinfo.value.message
# Test invalid values
with pytest.raises(PySproutError) as execinfo:
buttons.led_on_off_rate('bad')
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.led_on_off_rate({})
assert 'Invalid parameter' in execinfo.value.message
# Reset value and confirm
set_rate = buttons.led_on_off_rate(orig_rate)
assert set_rate == orig_rate
assert buttons.led_on_off_rate() == orig_rate
def test_hold_threshold(get_buttons):
"""
Tests the sbuttons' hold_threshold method.
"""
buttons = get_buttons
# Store original value
orig_count = buttons.hold_threshold()
assert isinstance(orig_count, int)
# Set new valid value
new_hold_count = random.randint(10, 255)
set_count = buttons.hold_threshold(new_hold_count)
assert set_count == new_hold_count
assert buttons.hold_threshold() == new_hold_count
# Test the edge cases
set_count = buttons.hold_threshold(10)
assert set_count == 10
assert buttons.hold_threshold() == 10
set_count = buttons.hold_threshold(255)
assert set_count == 255
assert buttons.hold_threshold() == 255
# Test out of range values
with pytest.raises(PySproutError) as execinfo:
buttons.hold_threshold(9)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.hold_threshold(256)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.hold_threshold(random.randint(-10, 8))
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.hold_threshold(random.randint(257, 500))
assert 'Parameter out of range' in execinfo.value.message
# Test invalid values
with pytest.raises(PySproutError) as execinfo:
buttons.hold_threshold('bad')
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.hold_threshold({})
assert 'Invalid parameter' in execinfo.value.message
# Reset the original value
set_count = buttons.hold_threshold(orig_count)
assert set_count == orig_count
assert buttons.hold_threshold() == orig_count
def test_factory_default(get_buttons):
"""
Tests the sbuttons' factory_default method.
"""
buttons = get_buttons
buttons.factory_default()
assert buttons.hold_threshold() == 121
assert buttons.led_on_off_rate() == 2
assert buttons.led_pulse_rate() == 4
for led in SButtons.ButtonID:
state = buttons.led_state(led)
assert state['color'] == SButtons.LEDColor.white_orange
assert state['mode'] == SButtons.LEDMode.off
def test_temperatures(get_buttons):
"""
Tests the sbuttons' temperatures method.
"""
buttons = get_buttons
temperatures = buttons.temperatures()
info = buttons.info()
check_system_types.check_TemperatureInfoList(temperatures, [info])
def callback(method, params):
"""
This callback method is registered to receive notifications from SoHal
as part of the notifications test. For each notification, hippy calls this
method from a new thread. To ensure thread safety, this method acquires
the condition lock and then appends the notification to the end of the
notifications list.
"""
condition.acquire()
notifications.append((method, params))
condition.notify()
condition.release()
def get_notification():
"""
This is a helper method used by test_notifications. This method returns
a notification off of the notifications list (and removes that notice
from the list). If the list is empty, it waits for up to 2 seconds to
receive a notification.
"""
condition.acquire()
if not notifications:
ret = condition.wait(2)
if not ret:
condition.release()
raise TimeoutError("Timed out while waiting for notification")
notice = notifications.pop(0)
condition.release()
return notice
def test_notifications(get_buttons):
"""
This method tests the sbuttons.on_*** notifications received from SoHal.
"""
buttons = get_buttons
val = buttons.subscribe(callback)
assert isinstance(val, int)
assert val == 1
name = buttons._object_name
# Notifications are never sent as '@0' even if we sent the command with @0
if '@0' in name:
name = 'sbuttons'
# TODO(EB) We'll need a manual test for on_button_press, on_suspend,
# on_resume, on_device_connected, and on_device_disconnected
buttons.close()
notification = get_notification()
assert notification == ('{}.on_open_count'.format(name), 0)
notification = get_notification()
assert notification == ('{}.on_close'.format(name), None)
buttons.open()
notification = get_notification()
assert notification == ('{}.on_open'.format(name), None)
notification = get_notification()
assert notification == ('{}.on_open_count'.format(name), 1)
buttons.hold_threshold(50)
notification = get_notification()
assert notification == ('{}.on_hold_threshold'.format(name), 50)
buttons.led_on_off_rate(15)
notification = get_notification()
assert notification == ('{}.on_led_on_off_rate'.format(name), 15)
buttons.led_pulse_rate(12)
notification = get_notification()
assert notification == ('{}.on_led_pulse_rate'.format(name), 12)
button = SButtons.ButtonID.center
state = {'color': SButtons.LEDColor.orange, 'mode': SButtons.LEDMode.pulse}
buttons.led_state(button, state)
notification = get_notification()
assert notification == ('{}.on_led_state'.format(name), [button, state])
buttons.factory_default()
notification = get_notification()
assert notification == ('{}.on_factory_default'.format(name), None)
val = buttons.unsubscribe()
assert isinstance(val, int)
assert val == 0
# Now make sure we aren't getting notification callbacks anymore...
buttons.hold_threshold(30)
with pytest.raises(TimeoutError) as execinfo:
notification = get_notification()
assert 'Timed out while waiting for notification' in execinfo.value.args[0]
# Verify hippy raises errors if we call subscribe with invalid parameters
with pytest.raises(PySproutError) as execinfo:
buttons.subscribe('string')
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.subscribe(buttons)
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.subscribe({})
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
buttons.subscribe(3)
assert 'Invalid parameter' in execinfo.value.message
| 35.646186
| 104
| 0.685944
|
d81d562f8581415bce053924c15537c5ea9784f3
| 3,853
|
py
|
Python
|
data/eos/Tomida+Hori_2016/hydrostatic2.py
|
applejwjcat/dispatch
|
4fad06ee952de181f6c51b91f179d6396bdfb333
|
[
"BSD-3-Clause"
] | null | null | null |
data/eos/Tomida+Hori_2016/hydrostatic2.py
|
applejwjcat/dispatch
|
4fad06ee952de181f6c51b91f179d6396bdfb333
|
[
"BSD-3-Clause"
] | null | null | null |
data/eos/Tomida+Hori_2016/hydrostatic2.py
|
applejwjcat/dispatch
|
4fad06ee952de181f6c51b91f179d6396bdfb333
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 16:54:41 2018
@author: Aake
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as pl
import EOS
from scaling import scaling,cgs
#%% Void object
class void():
pass
evol=void()
#%% Soft gravity
sc=scaling()
m_planet=5.0
a_planet=1.0
def force(r,rsm):
if r>rsm:
f=cgs.grav*cgs.m_earth*m_planet/r**2
else:
f=cgs.grav*cgs.m_earth*m_planet/rsm**2*(4.*(r/rsm)-3.*(r/rsm)**2)
return f
#%%a
title='EOS = Tomida & Hori'
#title='EOS = ideal gas gamma=1.4'
pl.figure(1); pl.clf()
a_planet=1.0 # orbital radius
r_start=1.00 # start of integration, in units of R_Hill
T_start=200 # disk temperature
d_start=1e-10 # disk density
dlnd=0.02
dlnd=0.05
dlnd=0.1
T=None
fsm=0.0
r=1.0
#masses=(0.1,0.2,0.4,0.6,0.8,1.0)
#masses=(0.4,1.0)
masses=[1.]
evol.mass=[0.0]
evol.temp=[0.0]
for m_planet in masses:
r_p=cgs.r_earth*m_planet**(1./3.)
r_n=r_p
r_H=a_planet*cgs.au*(cgs.m_earth*m_planet/(3.*cgs.m_sun))**(1./3.)
xlabel='r/r_H'
for i in range(1,2):
dd=[]
TT=[]
for T_start in (100.,200.,300.):
rsm=fsm*r_H
root='m={:3.1f}'.format(m_planet)
if i==0:
eos=EOS.eos_i(mu=2.35)
file=open(root+"_i.atm","w")
else:
eos=EOS.eos_t()
file=open(root+"_t.atm","w")
file.write('Pressure Temperature\n')
d1=d_start
T1=T_start
P1=eos.pressure(T1,d1)
gamma1=eos.gamma(T1,d1)
r1=r_start*r_H
#r1=243*cgs.r_earth
def vdrag(r,d):
t_stop=3e7*1e-12/d
return force(r,rsm)*t_stop/1e5
vd=[vdrag(r1,d1)]
if T is not None:
Tp=T
rp=r
d=[d1]; P=[P1]; T=[T1]; r=[r1/r_n]; gamma=[gamma1]; dm=[0.]
n=0
tau=0.0
while (r1>r_p):
r0=r1
d0=d1
P0=P1
T0=T1
gamma0=eos.gamma(T0,d0)
g0=force(r0,rsm)*r0*d0/P0
d1=d0*np.exp(dlnd)
for iter in range(5):
gamma1=eos.gamma(T1,d1)
gam=0.5*(gamma0+gamma1)
dlnT=dlnd*(gam-1.0)
T1=T0*np.exp(dlnT)
P1=eos.pressure(T1,d1)
f1=force(r1,rsm)
g1=f1*r1*d1/P1
g=0.5*(g0+g1)
dlnP=np.log(P1/P0)
dlnr=-dlnP/g
r1=r0*np.exp(dlnr)
r.append(r1/r_n); d.append(d1); P.append(P1); T.append(T1); gamma.append(gamma1)
dm.append(0.5*(d1+d0)*4.*np.pi*(0.5*(r0+r1))**2*(r0-r1))
n+=1
tau+=(r0-r1)*(d1+d0)/2.0
vd.append(vdrag(r1,d1))
#print(n,r1/cgs.r_earth,d1,P1,T1,gamma1,vd[-1])
print('{:4d} {:12.3e} {:15.5e} {:13.2e}'.format(n,r1/cgs.r_earth,T1,f1))
file.write('{:15.5e} {:15.5e}\n'.format(P1,T1))
file.close()
dm[0]=dm[1]
pl.loglog(P,T)
pl.loglog(P[-1],T[-1],'o')
pl.xlabel('P')
pl.ylabel('T')
pl.tight_layout()
pl.draw()
pl.pause(0.001)
dd.append(T_start)
TT.append(T1)
#%%
pl.figure(1)
pl.clf()
pl.title('T(P) for disk temperature = 100, 200, 300 K')
pl.xlabel('P [cgs]')
pl.savefig('T-P disk temperature dependence')
#%%
pl.figure(2)
pl.clf()
pl.plot(dd,TT,'-o')
pl.xlabel('disk T')
pl.ylabel('surface temperature')
pl.ylim(2600,3600)
pl.title('M = 1.0, Tomida & Hori EOS');
pl.savefig('bottom T dependence on disk temperature')
| 26.756944
| 96
| 0.481962
|
60345a2227c94fa4efef517f51201ec125421281
| 1,408
|
py
|
Python
|
saefportal/analyzer/migrations/0004_auto_20200828_1100.py
|
harry-consulting/SAEF1
|
055d6e492ba76f90e3248b9da2985fdfe0c6b430
|
[
"BSD-2-Clause"
] | 4
|
2020-12-16T13:14:26.000Z
|
2022-03-26T08:54:12.000Z
|
saefportal/analyzer/migrations/0004_auto_20200828_1100.py
|
harry-consulting/SAEF1
|
055d6e492ba76f90e3248b9da2985fdfe0c6b430
|
[
"BSD-2-Clause"
] | 1
|
2022-03-26T09:09:04.000Z
|
2022-03-26T09:09:04.000Z
|
saefportal/analyzer/migrations/0004_auto_20200828_1100.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2020-12-16T13:20:17.000Z
|
2020-12-16T13:20:17.000Z
|
# Generated by Django 3.0.3 on 2020-08-28 09:00
import analyzer.enums
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0003_auto_20200827_1504'),
]
operations = [
migrations.AlterField(
model_name='analyzesession',
name='analyzer_type',
field=models.CharField(choices=[('ANALYZE DATASET', 'ANALYZE_DATASET'), ('ANALYZE JOB', 'ANALYZE_JOB'), ('ANALYZE APPLICATION', 'ANALYZE_APPLICATION'), ('ANALYZE DATASET HISTORY', 'ANALYZE_DATASET_HISTORY'), ('ANALYZE JOB HISTORY', 'ANALYZE_JOB_HISTORY'), ('ANALYZE APPLICATION HISTORY', 'ANALYZE_APPLICATION_HISTORY'), ('EXTERNAL', 'EXTERNAL')], default=analyzer.enums.AnalyzerTask['ANALYZE_DATASET'], max_length=32),
),
migrations.AlterField(
model_name='ratiocolumn',
name='dataset_ratio',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='analyzer.ActualDatasetProfile'),
),
migrations.AlterField(
model_name='ratiocount',
name='dataset_ratio',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='analyzer.ActualDatasetProfile'),
),
migrations.DeleteModel(
name='DatasetComparisonRatio',
),
]
| 41.411765
| 430
| 0.666903
|
c12e36d69300e63805eb5720971687a167117c2a
| 15,469
|
py
|
Python
|
holdit/__main__.py
|
caltechlibrary/holdit
|
474165764e3514303dfd118d1beb0b6570fb6e13
|
[
"BSD-3-Clause"
] | 2
|
2019-01-31T21:47:13.000Z
|
2020-11-18T04:28:58.000Z
|
holdit/__main__.py
|
caltechlibrary/holdit
|
474165764e3514303dfd118d1beb0b6570fb6e13
|
[
"BSD-3-Clause"
] | 4
|
2018-10-04T17:56:48.000Z
|
2019-01-10T03:20:13.000Z
|
holdit/__main__.py
|
caltechlibrary/holdit
|
474165764e3514303dfd118d1beb0b6570fb6e13
|
[
"BSD-3-Clause"
] | null | null | null |
'''
__main__: main command-line interface to Hold It!
Hold It! generates a printable Word document containing recent hold requests and
also update the relevant Google spreadsheet used for tracking requests.
By default, Hold It! uses a GUI dialog to get the user's Caltech access login
name and password. If the -G option is given (/G on Windows), it will not
use a GUI dialog, and will instead use the operating system's
keyring/keychain functionality to get a user name and password. If the
information does not exist from a previous run of Hold It!, it will query the
user interactively for the user name and password, and (unless the -K or /K
argument is given) store them in the user's keyring/keychain so that it does
not have to ask again in the future. It is also possible to supply the
information directly on the command line using the -u and -p options (or /u
and /p on Windows), but this is discouraged because it is insecure on
multiuser computer systems.
To reset the user name and password (e.g., if a mistake was made the last
time and the wrong credentials were stored in the keyring/keychain system),
use the -R (or /R on Windows) command-line argument to a command. This
argument will make Hold It! query for the user name and password again even if
an entry already exists in the keyring or keychain.
By default, Hold It! looks for a .docx file named "template.docx" in the
directory where Hold It! is located, and uses that as the template for record
printing. If given the -t option followed by a file name (/t on Windows), it
will look for the named file instead. If it is not given an explicit
template file and it cannot find a file "template.docx", Hold It! will use a
built-in default template file.
By default, Hold It! will also open the Google spreadsheet used by the
Circulation staff to track hold requests. This action is inhibited if given
the -S option (/S on Windows). The Google spreadsheet is always updated in
any case.
Hold It! will write the output to a file named "holds_print_list.docx" in the
user's Desktop directory, unless the -o option (/o on Windows) is given with
an explicit file path to use instead.
If given the -V option (/V on Windows), this program will print version
information and exit without doing anything else.
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2018 by the California Institute of Technology. This code is
open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from docxtpl import DocxTemplate
import os
import os.path as path
import plac
import sys
import time
from threading import Thread
import traceback
import holdit
from holdit.control import HoldItControlGUI, HoldItControlCLI
from holdit.access import AccessHandlerGUI, AccessHandlerCLI
from holdit.progress import ProgressIndicatorGUI, ProgressIndicatorCLI
from holdit.messages import MessageHandlerGUI, MessageHandlerCLI
from holdit.config import Config
from holdit.records import records_diff, records_filter
from holdit.tind import records_from_tind
from holdit.google_sheet import records_from_google, update_google, open_google
from holdit.generate import printable_doc
from holdit.network import network_available
from holdit.files import readable, writable, open_file, rename_existing, file_in_use
from holdit.files import desktop_path, module_path, holdit_path, delete_existing
from holdit.exceptions import *
from holdit.debug import set_debug, log
# The following is for fixing blurry fonts and controls in wxPython on Windows,
# based on the solution by Nairen Zheng posted to Stack Overflow on
# 2019-01-18: https://stackoverflow.com/a/54247018/743730.
if sys.platform.startswith('win'):
import ctypes
try:
ctypes.windll.shcore.SetProcessDpiAwareness(1)
except:
pass
# Main program.
# ......................................................................
@plac.annotations(
pswd = ('Caltech access user password', 'option', 'p'),
user = ('Caltech access user name', 'option', 'u'),
output = ('write the output to the file "O"', 'option', 'o'),
template = ('use file "F" as the TIND record print template', 'option', 't'),
debug = ('turn on debugging (console only)', 'flag', 'D'),
no_color = ('do not color-code terminal output (default: do)', 'flag', 'C'),
no_gui = ('do not start the GUI interface (default: do)', 'flag', 'G'),
no_keyring = ('do not use a keyring (default: do)', 'flag', 'K'),
no_sheet = ('do not open the spreadsheet (default: open it)', 'flag', 'S'),
reset = ('reset keyring-stored user name and password', 'flag', 'R'),
version = ('print version info and exit', 'flag', 'V'),
)
def main(user = 'U', pswd = 'P', output='O', template='F',
no_color=False, no_gui=False, no_keyring=False, no_sheet=False,
reset=False, debug=False, version=False):
'''Generates a printable Word document containing recent hold requests and
also update the relevant Google spreadsheet used for tracking requests.
By default, Hold It! uses a GUI dialog to get the user's Caltech access login
name and password. If the -G option is given (/G on Windows), it will not
use a GUI dialog, and will instead use the operating system's
keyring/keychain functionality to get a user name and password. If the
information does not exist from a previous run of Hold It!, it will query the
user interactively for the user name and password, and (unless the -K or /K
argument is given) store them in the user's keyring/keychain so that it does
not have to ask again in the future. It is also possible to supply the
information directly on the command line using the -u and -p options (or /u
and /p on Windows), but this is discouraged because it is insecure on
multiuser computer systems.
To reset the user name and password (e.g., if a mistake was made the last
time and the wrong credentials were stored in the keyring/keychain system),
use the -R (or /R on Windows) command-line argument to a command. This
argument will make Hold It! query for the user name and password again even if
an entry already exists in the keyring or keychain.
By default, Hold It! looks for a .docx file named "template.docx" in the
directory where Hold It! is located, and uses that as the template for record
printing. If given the -t option followed by a file name (/t on Windows), it
will look for the named file instead. If it is not given an explicit
template file and it cannot find a file "template.docx", Hold It! will use a
built-in default template file.
By default, Hold It! will also open the Google spreadsheet used by the
Circulation staff to track hold requests. This action is inhibited if given
the -S option (/S on Windows). The Google spreadsheet is always updated in
any case.
Hold It! will write the output to a file named "holds_print_list.docx" in the
user's Desktop directory, unless the -o option (/o on Windows) is given with
an explicit file path to use instead.
If given the -V option (/V on Windows), this program will print version
information and exit without doing anything else.
'''
# Our defaults are to do things like color the output, which means the
# command line flags make more sense as negated values (e.g., "no-color").
# However, dealing with negated variables in our code is confusing, so:
use_color = not no_color
use_keyring = not no_keyring
use_gui = not no_gui
view_sheet = not no_sheet
# We use default values that provide more intuitive help text printed by
# plac. Rewrite the values to things we actually use.
if user == 'U':
user = None
if pswd == 'P':
pswd = None
if template == 'F':
template = None
if output == 'O':
output = None
# Process the version argument first, because it causes an early exit.
if version:
print('{} version {}'.format(holdit.__title__, holdit.__version__))
print('Author: {}'.format(holdit.__author__))
print('URL: {}'.format(holdit.__url__))
print('License: {}'.format(holdit.__license__))
sys.exit()
# Configure debug logging if it's turned on.
if debug:
set_debug(True)
# Switch between different ways of getting information from/to the user.
if use_gui:
controller = HoldItControlGUI()
accesser = AccessHandlerGUI(user, pswd)
notifier = MessageHandlerGUI()
tracer = ProgressIndicatorGUI()
else:
controller = HoldItControlCLI()
accesser = AccessHandlerCLI(user, pswd, use_keyring, reset)
notifier = MessageHandlerCLI(use_color)
tracer = ProgressIndicatorCLI(use_color)
# Start the worker thread.
if __debug__: log('Starting main body thread')
controller.start(MainBody(template, output, view_sheet, debug,
controller, tracer, accesser, notifier))
class MainBody(Thread):
'''Main body of Hold It! implemented as a Python thread.'''
def __init__(self, template, output, view_sheet, debug,
controller, tracer, accesser, notifier):
'''Initializes main thread object but does not start the thread.'''
Thread.__init__(self, name = "MainBody")
self._template = template
self._output = output
self._view_sheet = view_sheet
self._debug = debug
self._controller = controller
self._tracer = tracer
self._accesser = accesser
self._notifier = notifier
if controller.is_gui:
# Only make this a daemon thread when using the GUI; for CLI, it
# must not be a daemon thread or else Hold It! exits immediately.
self.daemon = True
def run(self):
# Set shortcut variables for better code readability below.
template = self._template
output = self._output
view_sheet = self._view_sheet
debug = self._debug
controller = self._controller
accesser = self._accesser
notifier = self._notifier
tracer = self._tracer
# Preliminary sanity checks. Do this here because we need the notifier
# object to be initialized based on whether we're using GUI or CLI.
tracer.start('Performing initial checks')
if not network_available():
notifier.fatal('No network connection.')
# Let's do this thing.
try:
config = Config(path.join(module_path(), "holdit.ini"))
# The default template is expected to be inside the Hold It module.
# If the user supplies a template, we use it instead.
tracer.update('Getting output template')
template_file = config.get('holdit', 'template')
template_file = path.abspath(path.join(module_path(), template_file))
if template:
temp = path.abspath(template)
if readable(temp):
if __debug__: log('Using user-supplied template "{}"'.format(temp))
template_file = temp
else:
notifier.warn('File "{}" not readable -- using default.'.format(template))
else:
# Check for "template.docx" in the Hold It installation dir.
temp = path.abspath(path.join(holdit_path(), "template.docx"))
if readable(temp):
if __debug__: log('Using template found at "{}"'.format(temp))
template_file = temp
# Sanity check against possible screwups in creating the Hold It! app.
# Do them here so that we can fail early if we know we can't finish.
if not readable(template_file):
notifier.fatal('Template doc file "{}" not readable.'.format(template_file))
sys.exit()
if not writable(desktop_path()):
notifier.fatal('Output folder "{}" not writable.'.format(desktop_path()))
sys.exit()
# Get the data.
spreadsheet_id = config.get('holdit', 'spreadsheet_id')
tracer.update('Connecting to TIND')
tind_records = records_from_tind(accesser, notifier, tracer)
tracer.update('Connecting to Google')
google_records = records_from_google(spreadsheet_id, accesser.user, notifier)
missing_records = records_diff(google_records, tind_records)
new_records = list(filter(records_filter('all'), missing_records))
if __debug__: log('diff + filter => {} records'.format(len(new_records)))
if len(new_records) > 0:
# Update the spreadsheet with new records.
tracer.update('Updating Google spreadsheet')
update_google(spreadsheet_id, new_records, accesser.user, notifier)
# Write a printable report.
tracer.update('Generating printable document')
if not output:
output = path.join(desktop_path(), "holds_print_list.docx")
if path.exists(output):
rename_existing(output)
if file_in_use(output):
details = '{} appears to be open in another program'.format(output)
notifier.warn('Cannot write Word doc -- is it still open?', details)
else:
result = printable_doc(new_records, template_file)
result.save(output)
tracer.update('Opening Word document for printing')
open_file(output)
else:
tracer.update('No new hold requests were found in TIND.')
# Open the spreadsheet too, if requested.
if isinstance(notifier, MessageHandlerGUI):
if notifier.yes_no('Open the tracking spreadsheet?'):
open_google(spreadsheet_id)
elif view_sheet:
open_google(spreadsheet_id)
except (KeyboardInterrupt, UserCancelled) as err:
tracer.stop('Quitting.')
controller.stop()
except ServiceFailure:
tracer.stop('Stopping due to a problem connecting to services')
controller.stop()
except Exception as err:
if debug:
import pdb; pdb.set_trace()
tracer.stop('Stopping due to error')
notifier.fatal(holdit.__title__ + ' encountered an error',
str(err) + '\n' + traceback.format_exc())
controller.stop()
else:
tracer.stop('Done')
controller.stop()
# On windows, we want the command-line args to use slash intead of hyphen.
if sys.platform.startswith('win'):
main.prefix_chars = '/'
# Main entry point.
# ......................................................................
# The following allows users to invoke this using "python3 -m holdit".
if __name__ == '__main__':
plac.call(main)
# For Emacs users
# ......................................................................
# Local Variables:
# mode: python
# python-indent-offset: 4
# End:
| 44.451149
| 94
| 0.654793
|
16dbf9b74077f3825358d3a49b9cb272806b8cd3
| 1,256
|
py
|
Python
|
build/catkin_generated/generate_cached_setup.py
|
Louis-AD-git/racecar_ws
|
3c5cb561d1aee11d80a7f3847e0334e93f345513
|
[
"MIT"
] | null | null | null |
build/catkin_generated/generate_cached_setup.py
|
Louis-AD-git/racecar_ws
|
3c5cb561d1aee11d80a7f3847e0334e93f345513
|
[
"MIT"
] | null | null | null |
build/catkin_generated/generate_cached_setup.py
|
Louis-AD-git/racecar_ws
|
3c5cb561d1aee11d80a7f3847e0334e93f345513
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/lzh/racecar_ws/devel/env.sh')
output_filename = '/home/lzh/racecar_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 40.516129
| 102
| 0.730096
|
79c0a6a7d5470ddd65878a50ceaf3c677e4f2d0d
| 1,835
|
py
|
Python
|
decorators_lab/solution/decorators.py
|
jeremyosborne/examples-python
|
5900b3a4f47d59de0a32d3257a8b90a44e80fdcd
|
[
"MIT"
] | null | null | null |
decorators_lab/solution/decorators.py
|
jeremyosborne/examples-python
|
5900b3a4f47d59de0a32d3257a8b90a44e80fdcd
|
[
"MIT"
] | null | null | null |
decorators_lab/solution/decorators.py
|
jeremyosborne/examples-python
|
5900b3a4f47d59de0a32d3257a8b90a44e80fdcd
|
[
"MIT"
] | null | null | null |
'''
>>> data = '{"username": "oscar", "password": "trashcan", "account": 1234, "amount": 12.03}'
>>> deposit(data)
'OK'
>>> data = '{"username": "oscar", "password": "trash", "account": 1234, "amount": 14.98}'
>>> deposit(data)
'Invalid Password'
>>> data = '{"username": "oscar", "password": "trashcan", "account": 1234, "amount": 4.12}'
>>> withdraw(data)
'OK'
>>> data = '{"username": "oscar", "password": "trashcan", "account": 1235, "amount": 2.54}'
>>> withdraw(data)
'Invalid Account'
>>> data = '{"username": "oscar", "password": "trashcan", "account": 1234}'
>>> balance(data)
'7.91'
>>> data = '{"username": "oscar", "password": "trashcan"}'
>>> balance(data)
'No Account Number Provided'
Hint: that's json data
'''
import json
from decimal import *
account = {
"username": "oscar",
"password": "trashcan",
"account": 1234,
"balance": Decimal("0.00"),
}
def data_parser(f):
def parse(j):
trans = json.loads(j)
return f(trans)
return parse
def validate(f):
def validate(trans):
if not "account" in trans:
return 'No Account Number Provided'
elif trans["password"] != account["password"]:
return 'Invalid Password'
elif trans["account"] != account["account"]:
return 'Invalid Account'
else:
return f(trans)
return validate
@data_parser
@validate
def deposit(transaction):
global account
account["balance"] += Decimal(str(transaction["amount"]))
return 'OK'
@data_parser
@validate
def withdraw(transaction):
global total
account["balance"] -= Decimal(str(transaction["amount"]))
return 'OK'
@data_parser
@validate
def balance(transaction):
return str(account["balance"])
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| 21.337209
| 92
| 0.608174
|
d053fb347f94455a77946d5626da7366884130e7
| 256
|
py
|
Python
|
Python3/scripts-python3/desafios/mundo01/desafio020.py
|
Joey-Resende/Estudando-Python
|
d38212dd6753b35d4f8d2aae79ec41f92cf16d58
|
[
"MIT"
] | null | null | null |
Python3/scripts-python3/desafios/mundo01/desafio020.py
|
Joey-Resende/Estudando-Python
|
d38212dd6753b35d4f8d2aae79ec41f92cf16d58
|
[
"MIT"
] | null | null | null |
Python3/scripts-python3/desafios/mundo01/desafio020.py
|
Joey-Resende/Estudando-Python
|
d38212dd6753b35d4f8d2aae79ec41f92cf16d58
|
[
"MIT"
] | null | null | null |
from random import shuffle
a1 = str(input('Primeiro aluno: '))
a2 = str(input('Segundo aluno: '))
a3 = str(input('Terceiro aluno: '))
a4 = str(input('Quarto aluno: '))
lista = [a1, a2, a3, a4]
shuffle(lista)
print(f'A ordem de apresentação será {lista}')
| 25.6
| 46
| 0.671875
|
c56f7f2bf7ffdef07b6c5dbfe8256ee6529636ad
| 2,199
|
py
|
Python
|
bot.py
|
polyzer/telegram-bot-football-players-prediction
|
6962daa38e4dbd898c26091abede97be52778108
|
[
"MIT"
] | null | null | null |
bot.py
|
polyzer/telegram-bot-football-players-prediction
|
6962daa38e4dbd898c26091abede97be52778108
|
[
"MIT"
] | null | null | null |
bot.py
|
polyzer/telegram-bot-football-players-prediction
|
6962daa38e4dbd898c26091abede97be52778108
|
[
"MIT"
] | null | null | null |
import telebot
import tensorflow as tf
from tensorflow.keras.models import load_model
import keras
import cv2
import skimage.transform
import re
import json
import decorator
import os
from flask import Flask, request
api_key = "1045675720:AAGV3UhR4Ks4mkV-x1ZTfjF8et0Iudp3hbk"
bot = telebot.TeleBot(api_key)
server = Flask(__name__)
@decorator.decorator
def errLog(func, *args, **kwargs):
result = None
try:
result = func(*args, **kwargs)
except Exception as e:
print(e.__repr__())
return result
@bot.message_handler(commands=['start'])
def start_message(message):
bot.send_message(message.chat.id, 'Привет, ты написал мне /start А теперь пришли мне boxid футболиста, а я попробую угадать его label.')
@errLog
def processPhotoMessage(message):
print('message.photo =', message.photo)
fileID = message.photo[-1].file_id
print('fileID =', fileID)
file = bot.get_file(fileID)
print('file.file_path =', file.file_path)
downloaded_file = bot.download_file(file.file_path)
# downloaded_file = cv2.fromarray(downloaded_file)
# img = cv2.imdecode(downloaded_file, 1)
with open("image.png", 'wb') as new_file:
new_file.write(downloaded_file)
img = cv2.imread("image.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = skimage.transform.resize(img, (83, 45))
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])
label = model.predict_classes(img)[0]
bot.send_message(message.chat.id, str(label))
print(f"Label {label} was sended!")
@bot.message_handler(content_types=['photo'])
def photo(message):
processPhotoMessage(message)
@server.route('/' + api_key, methods=['POST'])
def getMessage():
bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode("utf-8"))])
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url='https://stormy-brook-78919.herokuapp.com/' + api_key)
return "!", 200
if __name__ == '__main__':
model = load_model("model_100.h5")
print("it's started")
server.debug = True
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
#bot.polling()
| 30.123288
| 140
| 0.699409
|
cef34af9d3906846bff89729e02c0addd5a5210b
| 5,785
|
py
|
Python
|
praw/models/listing/mixins/base.py
|
NedJunk/praw
|
dd75d91e5574f1499cbef445dd68eb71445629df
|
[
"BSD-2-Clause"
] | null | null | null |
praw/models/listing/mixins/base.py
|
NedJunk/praw
|
dd75d91e5574f1499cbef445dd68eb71445629df
|
[
"BSD-2-Clause"
] | null | null | null |
praw/models/listing/mixins/base.py
|
NedJunk/praw
|
dd75d91e5574f1499cbef445dd68eb71445629df
|
[
"BSD-2-Clause"
] | null | null | null |
"""Provide the BaseListingMixin class."""
from typing import Any, Dict, Iterator, Union
from urllib.parse import urljoin
from ....util import _deprecate_args
from ...base import PRAWBase
from ..generator import ListingGenerator
class BaseListingMixin(PRAWBase):
"""Adds minimum set of methods that apply to all listing objects."""
VALID_TIME_FILTERS = {"all", "day", "hour", "month", "week", "year"}
@staticmethod
def _validate_time_filter(time_filter):
"""Validate ``time_filter``.
:raises: :py:class:`ValueError` if ``time_filter`` is not valid.
"""
if time_filter not in BaseListingMixin.VALID_TIME_FILTERS:
valid_time_filters = ", ".join(BaseListingMixin.VALID_TIME_FILTERS)
raise ValueError(f"time_filter must be one of: {valid_time_filters}")
def _prepare(self, *, arguments, sort):
"""Fix for :class:`.Redditor` methods that use a query param rather than subpath."""
if self.__dict__.get("_listing_use_sort"):
self._safely_add_arguments(arguments=arguments, key="params", sort=sort)
return self._path
return urljoin(self._path, sort)
@_deprecate_args("time_filter")
def controversial(
self,
*,
time_filter: str = "all",
**generator_kwargs: Union[str, int, Dict[str, str]],
) -> Iterator[Any]:
"""Return a :class:`.ListingGenerator` for controversial items.
:param time_filter: Can be one of: ``"all"``, ``"day"``, ``"hour"``,
``"month"``, ``"week"``, or ``"year"`` (default: ``"all"``).
:raises: :py:class:`ValueError` if ``time_filter`` is invalid.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
This method can be used like:
.. code-block:: python
reddit.domain("imgur.com").controversial(time_filter="week")
reddit.multireddit(redditor="samuraisam", name="programming").controversial(
time_filter="day"
)
reddit.redditor("spez").controversial(time_filter="month")
reddit.redditor("spez").comments.controversial(time_filter="year")
reddit.redditor("spez").submissions.controversial(time_filter="all")
reddit.subreddit("all").controversial(time_filter="hour")
"""
self._validate_time_filter(time_filter)
self._safely_add_arguments(
arguments=generator_kwargs, key="params", t=time_filter
)
url = self._prepare(arguments=generator_kwargs, sort="controversial")
return ListingGenerator(self._reddit, url, **generator_kwargs)
def hot(self, **generator_kwargs: Union[str, int, Dict[str, str]]) -> Iterator[Any]:
"""Return a :class:`.ListingGenerator` for hot items.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
This method can be used like:
.. code-block:: python
reddit.domain("imgur.com").hot()
reddit.multireddit(redditor="samuraisam", name="programming").hot()
reddit.redditor("spez").hot()
reddit.redditor("spez").comments.hot()
reddit.redditor("spez").submissions.hot()
reddit.subreddit("all").hot()
"""
generator_kwargs.setdefault("params", {})
url = self._prepare(arguments=generator_kwargs, sort="hot")
return ListingGenerator(self._reddit, url, **generator_kwargs)
def new(self, **generator_kwargs: Union[str, int, Dict[str, str]]) -> Iterator[Any]:
"""Return a :class:`.ListingGenerator` for new items.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
This method can be used like:
.. code-block:: python
reddit.domain("imgur.com").new()
reddit.multireddit(redditor="samuraisam", name="programming").new()
reddit.redditor("spez").new()
reddit.redditor("spez").comments.new()
reddit.redditor("spez").submissions.new()
reddit.subreddit("all").new()
"""
generator_kwargs.setdefault("params", {})
url = self._prepare(arguments=generator_kwargs, sort="new")
return ListingGenerator(self._reddit, url, **generator_kwargs)
@_deprecate_args("time_filter")
def top(
self,
*,
time_filter: str = "all",
**generator_kwargs: Union[str, int, Dict[str, str]],
) -> Iterator[Any]:
"""Return a :class:`.ListingGenerator` for top items.
:param time_filter: Can be one of: ``"all"``, ``"day"``, ``"hour"``,
``"month"``, ``"week"``, or ``"year"`` (default: ``"all"``).
:raises: :py:class:`ValueError` if ``time_filter`` is invalid.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
This method can be used like:
.. code-block:: python
reddit.domain("imgur.com").top(time_filter="week")
reddit.multireddit(redditor="samuraisam", name="programming").top(time_filter="day")
reddit.redditor("spez").top(time_filter="month")
reddit.redditor("spez").comments.top(time_filter="year")
reddit.redditor("spez").submissions.top(time_filter="all")
reddit.subreddit("all").top(time_filter="hour")
"""
self._validate_time_filter(time_filter)
self._safely_add_arguments(
arguments=generator_kwargs, key="params", t=time_filter
)
url = self._prepare(arguments=generator_kwargs, sort="top")
return ListingGenerator(self._reddit, url, **generator_kwargs)
| 38.566667
| 96
| 0.623336
|
339eff7b937ab870972ef1b0fa9b35f66a854025
| 819
|
py
|
Python
|
pyopenproject/business/services/command/root/find.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 5
|
2021-02-25T15:54:28.000Z
|
2021-04-22T15:43:36.000Z
|
pyopenproject/business/services/command/root/find.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 7
|
2021-03-15T16:26:23.000Z
|
2022-03-16T13:45:18.000Z
|
pyopenproject/business/services/command/root/find.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 6
|
2021-06-18T18:59:11.000Z
|
2022-03-27T04:58:52.000Z
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.root.root_command import RootCommand
from pyopenproject.model.root import Root
class Find(RootCommand):
def __init__(self, connection):
"""Constructor for class Find, from RootCommand
:param connection: The connection data
"""
super().__init__(connection)
def execute(self):
try:
json_obj = GetRequest(self.connection, f"{self.CONTEXT}").execute()
return Root(json_obj)
except RequestError as re:
raise BusinessError("Error finding root") from re
| 35.608696
| 82
| 0.737485
|
93202c2043a98b8b46f7238e1f83a70f2fa90207
| 9,910
|
py
|
Python
|
notebooks-text-format/matrix_factorization_recommender.py
|
arpitvaghela/probml-notebooks
|
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
|
[
"MIT"
] | 166
|
2021-07-16T17:33:09.000Z
|
2022-03-30T03:35:34.000Z
|
notebooks-text-format/matrix_factorization_recommender.py
|
arpitvaghela/probml-notebooks
|
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
|
[
"MIT"
] | 29
|
2021-07-21T16:31:51.000Z
|
2022-03-31T19:50:13.000Z
|
notebooks-text-format/matrix_factorization_recommender.py
|
arpitvaghela/probml-notebooks
|
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
|
[
"MIT"
] | 48
|
2021-07-17T08:26:18.000Z
|
2022-03-31T03:36:18.000Z
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nirzu97/pyprobml/blob/matrix-factorization/notebooks/matrix_factorization_recommender.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="NTsyA3nxgIhT"
# # Matrix Factorization for Movie Lens Recommendations
#
# This notebook is based on code from Nick Becker
#
# https://github.com/beckernick/matrix_factorization_recommenders/blob/master/matrix_factorization_recommender.ipynb
#
#
#
#
#
#
# + [markdown] id="nf5GiG3YgIhd"
# # Setting Up the Ratings Data
#
# We read the data directly from MovieLens website, since they don't allow redistribution. We want to include the metadata (movie titles, etc), not just the ratings matrix.
#
# + id="aH_UwaAsh1LP"
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="0Pa5k76tYztd" outputId="15e6753a-4b12-4459-fcbe-87262c71c2b7"
# !wget http://files.grouplens.org/datasets/movielens/ml-100k.zip
# !ls
# !unzip ml-100k
folder = 'ml-100k'
# + colab={"base_uri": "https://localhost:8080/"} id="THfvnkzah3nv" outputId="e7310704-fb63-49bd-cf4b-ab568d65532e"
# !wget http://files.grouplens.org/datasets/movielens/ml-1m.zip
# !unzip ml-1m
# !ls
folder = 'ml-1m'
# + id="J_zij7tJgIhd"
ratings_list = [ [int(x) for x in i.strip().split("::")] for i in open(os.path.join(folder,'ratings.dat'), 'r').readlines()]
users_list = [i.strip().split("::") for i in open(os.path.join(folder, 'users.dat'), 'r').readlines()]
movies_list = [i.strip().split("::") for i in open(os.path.join(folder, 'movies.dat'), 'r', encoding="latin-1").readlines()]
# + id="R8JnjoDVgIhe"
ratings_df = pd.DataFrame(ratings_list, columns = ['UserID', 'MovieID', 'Rating', 'Timestamp'], dtype = int)
movies_df = pd.DataFrame(movies_list, columns = ['MovieID', 'Title', 'Genres'])
movies_df['MovieID'] = movies_df['MovieID'].apply(pd.to_numeric)
# + id="L06ZLb4CgIhf" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="21273c16-64a4-4ef7-ae6b-bc2544284b6c"
movies_df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="Tv9rqPfoxvXo" outputId="70b15305-7226-4008-e4f8-13e0c06117e0"
def get_movie_name(movies_df, movie_id_str):
ndx = (movies_df['MovieID']==int(movie_id_str))
name = movies_df['Title'][ndx].to_numpy()[0]
return name
print(get_movie_name(movies_df, 1))
print(get_movie_name(movies_df, "527"))
# + colab={"base_uri": "https://localhost:8080/"} id="mrqetJo14NEe" outputId="f7276962-607a-47c0-a6d2-4322a4dab187"
def get_movie_genres(movies_df, movie_id_str):
ndx = (movies_df['MovieID']==int(movie_id_str))
name = movies_df['Genres'][ndx].to_numpy()[0]
return name
print(get_movie_genres(movies_df, 1))
print(get_movie_genres(movies_df, "527"))
# + id="a3fua44igIhg" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="ee59b580-a2fd-4917-d7fa-93c70b2d71af"
ratings_df.head()
# + [markdown] id="Qmf6YmHEgIhh"
# These look good, but I want the format of my ratings matrix to be one row per user and one column per movie. I'll `pivot` `ratings_df` to get that and call the new variable `R`.
# + id="Jmysfzc4gIhh" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="600d38df-73df-4fbb-db65-b2cfcd2d62f1"
R_df = ratings_df.pivot(index = 'UserID', columns ='MovieID', values = 'Rating').fillna(0)
R_df.head()
# + [markdown] id="h_4z9YWTgIhh"
# The last thing I need to do is de-mean the data (normalize by each users mean) and convert it from a dataframe to a numpy array.
# + id="k3GGGqwAgIhi" colab={"base_uri": "https://localhost:8080/"} outputId="7d350a7c-0d61-432c-fdfc-2db708b046eb"
R = R_df.to_numpy()
user_ratings_mean = np.mean(R, axis = 1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
print(R.shape)
print(np.count_nonzero(R))
# + [markdown] id="ktEjpdh2gIhi"
# # Singular Value Decomposition
#
# Scipy and Numpy both have functions to do the singular value decomposition. I'm going to use the Scipy function `svds` because it let's me choose how many latent factors I want to use to approximate the original ratings matrix (instead of having to truncate it after).
# + id="DMFgd5IIgIhi"
from scipy.sparse.linalg import svds
U, sigma, Vt = svds(R_demeaned, k = 50)
sigma = np.diag(sigma)
# + id="arTEARPGgIhj" colab={"base_uri": "https://localhost:8080/"} outputId="6576c695-c993-4843-8dfd-2b429e3d66b4"
latents = [10, 20, 50]
errors = []
for latent_dim in latents:
U, sigma, Vt = svds(R_demeaned, k = latent_dim)
sigma = np.diag(sigma)
Rpred = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
Rpred[Rpred < 0] = 0
Rpred[Rpred > 5] = 5
err = (np.sqrt(np.sum(np.power(R - Rpred, 2))))
errors.append(err)
print(errors)
# + [markdown] id="bhBscFmXgIhk"
# # Making Predictions from the Decomposed Matrices
#
# I now have everything I need to make movie ratings predictions for every user. I can do it all at once by following the math and matrix multiply $U$, $\Sigma$, and $V^{T}$ back to get the rank $k=50$ approximation of $R$.
#
# I also need to add the user means back to get the actual star ratings prediction.
# + id="gQyqTbUCgIhk"
all_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
# + [markdown] id="1bZkdk_GgIhk"
# # Making Movie Recommendations
# Finally, it's time. With the predictions matrix for every user, I can build a function to recommend movies for any user. All I need to do is return the movies with the highest predicted rating that the specified user hasn't already rated. Though I didn't use actually use any explicit movie content features (such as genre or title), I'll merge in that information to get a more complete picture of the recommendations.
#
# I'll also return the list of movies the user has already rated, for the sake of comparison.
# + id="NWmGciBegIhl" colab={"base_uri": "https://localhost:8080/", "height": 245} outputId="577f2bdc-214d-4b22-e62f-2ea9aecbd126"
preds_df = pd.DataFrame(all_user_predicted_ratings, columns = R_df.columns)
preds_df.head()
# + id="ggAv-Y_GgIhl"
def recommend_movies(preds_df, userID, movies_df, original_ratings_df, num_recommendations=5):
# Get and sort the user's predictions
user_row_number = userID - 1 # UserID starts at 1, not 0
sorted_user_predictions = preds_df.iloc[user_row_number].sort_values(ascending=False) # UserID starts at 1
# Get the user's data and merge in the movie information.
user_data = original_ratings_df[original_ratings_df.UserID == (userID)]
user_full = (user_data.merge(movies_df, how = 'left', left_on = 'MovieID', right_on = 'MovieID').
sort_values(['Rating'], ascending=False)
)
print('User {0} has already rated {1} movies.'.format(userID, user_full.shape[0]))
print('Recommending highest {0} predicted ratings movies not already rated.'.format(num_recommendations))
# Recommend the highest predicted rating movies that the user hasn't seen yet.
recommendations = (movies_df[~movies_df['MovieID'].isin(user_full['MovieID'])].
merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',
left_on = 'MovieID',
right_on = 'MovieID').
rename(columns = {user_row_number: 'Predictions'}).
sort_values('Predictions', ascending = False).
iloc[:num_recommendations, :-1]
)
return user_full, recommendations
# + id="T6wmnxuTgIhl" colab={"base_uri": "https://localhost:8080/"} outputId="2a4d693e-7497-4200-af26-9282fd9b7266"
already_rated, predictions = recommend_movies(preds_df, 837, movies_df, ratings_df, 10)
# + [markdown] id="XdIpIY9ZgIhm"
# So, how'd I do?
# + id="PfP2cSPMgIhm" colab={"base_uri": "https://localhost:8080/", "height": 345} outputId="e28e4c9e-6ac3-4e64-bab4-5de77931b6fc"
already_rated.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="7uNLhyK3Z95t" outputId="a385ec2b-e18b-4bd3-cc0e-1336654bc3d1"
df = already_rated[['MovieID', 'Title', 'Genres']].copy()
df.head(10)
# + id="eFx8wgwYgIhn" colab={"base_uri": "https://localhost:8080/", "height": 345} outputId="ed30c0d3-685e-4f39-cb48-73e0efba0108"
predictions
# + [markdown] id="u2ZnPxdzgIhn"
# Pretty cool! These look like pretty good recommendations. It's also good to see that, though I didn't actually use the genre of the movie as a feature, the truncated matrix factorization features "picked up" on the underlying tastes and preferences of the user. I've recommended some film-noirs, crime, drama, and war movies - all of which were genres of some of this user's top rated movies.
# + [markdown] id="fKyoDci9tu8K"
# # Visualizing true and predicted ratings matrix
# + colab={"base_uri": "https://localhost:8080/"} id="46qng2bFwYXf" outputId="3cb85f4a-9ef5-493c-bb8d-73d8f44e5658"
Rpred = all_user_predicted_ratings
Rpred[Rpred < 0] = 0
Rpred[Rpred > 5] = 5
print(np.linalg.norm(R - Rpred, ord='fro'))
print(np.sqrt(np.sum(np.power(R - Rpred, 2))))
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="pSk8UdyetzUk" outputId="421a5b15-c691-4464-db0f-155a902e67bc"
import matplotlib.pyplot as plt
nusers = 20
nitems = 20
plt.figure(figsize=(10,10))
plt.imshow(R[:nusers, :nitems], cmap='jet')
plt.xlabel('item')
plt.ylabel('user')
plt.title('True ratings')
plt.colorbar()
plt.figure(figsize=(10,10))
plt.imshow(Rpred[:nusers, :nitems], cmap='jet')
plt.xlabel('item')
plt.ylabel('user')
plt.title('Predcted ratings')
plt.colorbar()
| 40.950413
| 421
| 0.711806
|
9190b033f4412087f4ac2dec131c53671df74e7b
| 26,551
|
py
|
Python
|
tests/components/script/test_init.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 5
|
2020-10-08T12:59:44.000Z
|
2021-12-28T06:46:25.000Z
|
tests/components/script/test_init.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 75
|
2020-08-05T07:22:42.000Z
|
2022-03-23T21:54:57.000Z
|
tests/components/script/test_init.py
|
winning1120xx/home-assistant
|
53d4c0ce2d374b5e97bbdc37742656c27adf8eea
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""The tests for the Script component."""
# pylint: disable=protected-access
import asyncio
import unittest
from unittest.mock import Mock, patch
import pytest
from homeassistant.components import logbook, script
from homeassistant.components.script import DOMAIN, EVENT_SCRIPT_STARTED
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
)
from homeassistant.core import (
Context,
CoreState,
HomeAssistant,
State,
callback,
split_entity_id,
)
from homeassistant.exceptions import ServiceNotFound
from homeassistant.helpers import template
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component, setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_mock_service, get_test_home_assistant, mock_restore_cache
from tests.components.logbook.test_init import MockLazyEventPartialState
ENTITY_ID = "script.test"
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
"""Toggle the script.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
"""Reload script component.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_passing_variables(self):
"""Test different ways of passing in variables."""
mock_restore_cache(self.hass, ())
calls = []
context = Context()
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register("test", "script", record_call)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {
"sequence": {
"service": "test.script",
"data_template": {"hello": "{{ greeting }}"},
}
}
}
},
)
turn_on(self.hass, ENTITY_ID, {"greeting": "world"}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data["hello"] == "world"
self.hass.services.call(
"script", "test", {"greeting": "universe"}, context=context
)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data["hello"] == "universe"
@pytest.mark.parametrize("toggle", [False, True])
async def test_turn_on_off_toggle(hass, toggle):
"""Verify turn_on, turn_off & toggle services."""
event = "test_event"
event_mock = Mock()
hass.bus.async_listen(event, event_mock)
was_on = False
@callback
def state_listener(entity_id, old_state, new_state):
nonlocal was_on
was_on = True
async_track_state_change(hass, ENTITY_ID, state_listener, to_state="on")
if toggle:
turn_off_step = {"service": "script.toggle", "entity_id": ENTITY_ID}
else:
turn_off_step = {"service": "script.turn_off", "entity_id": ENTITY_ID}
assert await async_setup_component(
hass,
"script",
{
"script": {
"test": {
"sequence": [{"event": event}, turn_off_step, {"event": event}]
}
}
},
)
assert not script.is_on(hass, ENTITY_ID)
if toggle:
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_ID}
)
else:
await hass.services.async_call(DOMAIN, split_entity_id(ENTITY_ID)[1])
await hass.async_block_till_done()
assert not script.is_on(hass, ENTITY_ID)
assert was_on
assert event_mock.call_count == 1
invalid_configs = [
{"test": {}},
{"test hello world": {"sequence": [{"event": "bla"}]}},
{"test": {"sequence": {"event": "test_event", "service": "homeassistant.turn_on"}}},
]
@pytest.mark.parametrize("value", invalid_configs)
async def test_setup_with_invalid_configs(hass, value):
"""Test setup with invalid configs."""
assert await async_setup_component(
hass, "script", {"script": value}
), f"Script loaded with wrong config {value}"
assert len(hass.states.async_entity_ids("script")) == 0
@pytest.mark.parametrize("running", ["no", "same", "different"])
async def test_reload_service(hass, running):
"""Verify the reload service."""
event = "test_event"
event_flag = asyncio.Event()
@callback
def event_handler(event):
event_flag.set()
hass.bus.async_listen_once(event, event_handler)
hass.states.async_set("test.script", "off")
assert await async_setup_component(
hass,
"script",
{
"script": {
"test": {
"sequence": [
{"event": event},
{"wait_template": "{{ is_state('test.script', 'on') }}"},
]
}
}
},
)
assert hass.states.get(ENTITY_ID) is not None
assert hass.services.has_service(script.DOMAIN, "test")
if running != "no":
_, object_id = split_entity_id(ENTITY_ID)
await hass.services.async_call(DOMAIN, object_id)
await asyncio.wait_for(event_flag.wait(), 1)
assert script.is_on(hass, ENTITY_ID)
object_id = "test" if running == "same" else "test2"
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={"script": {object_id: {"sequence": [{"delay": {"seconds": 5}}]}}},
):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
await hass.async_block_till_done()
if running != "same":
assert hass.states.get(ENTITY_ID) is None
assert not hass.services.has_service(script.DOMAIN, "test")
assert hass.states.get("script.test2") is not None
assert hass.services.has_service(script.DOMAIN, "test2")
else:
assert hass.states.get(ENTITY_ID) is not None
assert hass.services.has_service(script.DOMAIN, "test")
async def test_service_descriptions(hass):
"""Test that service descriptions are loaded and reloaded correctly."""
# Test 1: has "description" but no "fields"
assert await async_setup_component(
hass,
"script",
{
"script": {
"test": {
"description": "test description",
"sequence": [{"delay": {"seconds": 5}}],
}
}
},
)
descriptions = await async_get_all_descriptions(hass)
assert descriptions[DOMAIN]["test"]["name"] == "test"
assert descriptions[DOMAIN]["test"]["description"] == "test description"
assert not descriptions[DOMAIN]["test"]["fields"]
# Test 2: has "fields" but no "description"
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={
"script": {
"test": {
"fields": {
"test_param": {
"description": "test_param description",
"example": "test_param example",
}
},
"sequence": [{"delay": {"seconds": 5}}],
}
}
},
):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
descriptions = await async_get_all_descriptions(hass)
assert descriptions[script.DOMAIN]["test"]["description"] == ""
assert (
descriptions[script.DOMAIN]["test"]["fields"]["test_param"]["description"]
== "test_param description"
)
assert (
descriptions[script.DOMAIN]["test"]["fields"]["test_param"]["example"]
== "test_param example"
)
# Test 3: has "alias" that will be used as "name"
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={
"script": {
"test_name": {
"alias": "ABC",
"sequence": [{"delay": {"seconds": 5}}],
}
}
},
):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
descriptions = await async_get_all_descriptions(hass)
assert descriptions[DOMAIN]["test_name"]["name"] == "ABC"
# Test 4: verify that names from YAML are taken into account as well
assert descriptions[DOMAIN]["turn_on"]["name"] == "Turn on"
async def test_shared_context(hass):
"""Test that the shared context is passed down the chain."""
event = "test_event"
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(
hass, "script", {"script": {"test": {"sequence": [{"event": event}]}}}
)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, context=context
)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) == "test"
assert args[0].data.get(ATTR_ENTITY_ID) == "script.test"
# Ensure context carries through the event
args, kwargs = event_mock.call_args
assert args[0].context == context
# Ensure the script state shares the same context
state = hass.states.get("script.test")
assert state is not None
assert state.context == context
async def test_logging_script_error(hass, caplog):
"""Test logging script error."""
assert await async_setup_component(
hass,
"script",
{"script": {"hello": {"sequence": [{"service": "non.existing"}]}}},
)
with pytest.raises(ServiceNotFound) as err:
await hass.services.async_call("script", "hello", blocking=True)
assert err.value.domain == "non"
assert err.value.service == "existing"
assert "Error executing script" in caplog.text
async def test_turning_no_scripts_off(hass):
"""Test it is possible to turn two scripts off."""
assert await async_setup_component(hass, "script", {})
# Testing it doesn't raise
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {"entity_id": []}, blocking=True
)
async def test_async_get_descriptions_script(hass):
"""Test async_set_service_schema for the script integration."""
script_config = {
DOMAIN: {
"test1": {"sequence": [{"service": "homeassistant.restart"}]},
"test2": {
"description": "test2",
"fields": {
"param": {
"description": "param_description",
"example": "param_example",
}
},
"sequence": [{"service": "homeassistant.restart"}],
},
}
}
await async_setup_component(hass, DOMAIN, script_config)
descriptions = await hass.helpers.service.async_get_all_descriptions()
assert descriptions[DOMAIN]["test1"]["description"] == ""
assert not descriptions[DOMAIN]["test1"]["fields"]
assert descriptions[DOMAIN]["test2"]["description"] == "test2"
assert (
descriptions[DOMAIN]["test2"]["fields"]["param"]["description"]
== "param_description"
)
assert (
descriptions[DOMAIN]["test2"]["fields"]["param"]["example"] == "param_example"
)
async def test_extraction_functions(hass):
"""Test extraction functions."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test1": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"service": "test.script",
"data": {"entity_id": "light.in_first"},
},
{
"entity_id": "light.device_in_both",
"domain": "light",
"type": "turn_on",
"device_id": "device-in-both",
},
]
},
"test2": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"scene": "scene.hello"},
{
"entity_id": "light.device_in_both",
"domain": "light",
"type": "turn_on",
"device_id": "device-in-both",
},
{
"entity_id": "light.device_in_last",
"domain": "light",
"type": "turn_on",
"device_id": "device-in-last",
},
],
},
}
},
)
assert set(script.scripts_with_entity(hass, "light.in_both")) == {
"script.test1",
"script.test2",
}
assert set(script.entities_in_script(hass, "script.test1")) == {
"light.in_both",
"light.in_first",
}
assert set(script.scripts_with_device(hass, "device-in-both")) == {
"script.test1",
"script.test2",
}
assert set(script.devices_in_script(hass, "script.test2")) == {
"device-in-both",
"device-in-last",
}
async def test_config_basic(hass):
"""Test passing info in config."""
assert await async_setup_component(
hass,
"script",
{
"script": {
"test_script": {
"alias": "Script Name",
"icon": "mdi:party",
"sequence": [],
}
}
},
)
test_script = hass.states.get("script.test_script")
assert test_script.name == "Script Name"
assert test_script.attributes["icon"] == "mdi:party"
async def test_config_multiple_domains(hass):
"""Test splitting configuration over multiple domains."""
assert await async_setup_component(
hass,
"script",
{
"script": {
"first_script": {
"alias": "Main domain",
"sequence": [],
}
},
"script second": {
"second_script": {
"alias": "Secondary domain",
"sequence": [],
}
},
},
)
test_script = hass.states.get("script.first_script")
assert test_script
assert test_script.name == "Main domain"
test_script = hass.states.get("script.second_script")
assert test_script
assert test_script.name == "Secondary domain"
async def test_logbook_humanify_script_started_event(hass):
"""Test humanifying script started event."""
hass.config.components.add("recorder")
await async_setup_component(hass, DOMAIN, {})
await async_setup_component(hass, "logbook", {})
entity_attr_cache = logbook.EntityAttributeCache(hass)
event1, event2 = list(
logbook.humanify(
hass,
[
MockLazyEventPartialState(
EVENT_SCRIPT_STARTED,
{ATTR_ENTITY_ID: "script.hello", ATTR_NAME: "Hello Script"},
),
MockLazyEventPartialState(
EVENT_SCRIPT_STARTED,
{ATTR_ENTITY_ID: "script.bye", ATTR_NAME: "Bye Script"},
),
],
entity_attr_cache,
{},
)
)
assert event1["name"] == "Hello Script"
assert event1["domain"] == "script"
assert event1["message"] == "started"
assert event1["entity_id"] == "script.hello"
assert event2["name"] == "Bye Script"
assert event2["domain"] == "script"
assert event2["message"] == "started"
assert event2["entity_id"] == "script.bye"
@pytest.mark.parametrize("concurrently", [False, True])
async def test_concurrent_script(hass, concurrently):
"""Test calling script concurrently or not."""
if concurrently:
call_script_2 = {
"service": "script.turn_on",
"data": {"entity_id": "script.script2"},
}
else:
call_script_2 = {"service": "script.script2"}
assert await async_setup_component(
hass,
"script",
{
"script": {
"script1": {
"mode": "parallel",
"sequence": [
call_script_2,
{
"wait_template": "{{ is_state('input_boolean.test1', 'on') }}"
},
{"service": "test.script", "data": {"value": "script1"}},
],
},
"script2": {
"mode": "parallel",
"sequence": [
{"service": "test.script", "data": {"value": "script2a"}},
{
"wait_template": "{{ is_state('input_boolean.test2', 'on') }}"
},
{"service": "test.script", "data": {"value": "script2b"}},
],
},
}
},
)
service_called = asyncio.Event()
service_values = []
async def async_service_handler(service):
nonlocal service_values
service_values.append(service.data.get("value"))
service_called.set()
hass.services.async_register("test", "script", async_service_handler)
hass.states.async_set("input_boolean.test1", "off")
hass.states.async_set("input_boolean.test2", "off")
await hass.services.async_call("script", "script1")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert service_values[-1] == "script2a"
assert script.is_on(hass, "script.script1")
assert script.is_on(hass, "script.script2")
if not concurrently:
hass.states.async_set("input_boolean.test2", "on")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert service_values[-1] == "script2b"
hass.states.async_set("input_boolean.test1", "on")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert service_values[-1] == "script1"
assert concurrently == script.is_on(hass, "script.script2")
if concurrently:
hass.states.async_set("input_boolean.test2", "on")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert service_values[-1] == "script2b"
await hass.async_block_till_done()
assert not script.is_on(hass, "script.script1")
assert not script.is_on(hass, "script.script2")
async def test_script_variables(hass, caplog):
"""Test defining scripts."""
assert await async_setup_component(
hass,
"script",
{
"script": {
"script1": {
"variables": {
"this_variable": "{{this.entity_id}}",
"test_var": "from_config",
"templated_config_var": "{{ var_from_service | default('config-default') }}",
},
"sequence": [
{
"service": "test.script",
"data": {
"value": "{{ test_var }}",
"templated_config_var": "{{ templated_config_var }}",
"this_template": "{{this.entity_id}}",
"this_variable": "{{this_variable}}",
},
},
],
},
"script2": {
"variables": {
"test_var": "from_config",
},
"sequence": [
{
"service": "test.script",
"data": {
"value": "{{ test_var }}",
},
},
],
},
"script3": {
"variables": {
"test_var": "{{ break + 1 }}",
},
"sequence": [
{
"service": "test.script",
"data": {
"value": "{{ test_var }}",
},
},
],
},
}
},
)
mock_calls = async_mock_service(hass, "test", "script")
await hass.services.async_call(
"script", "script1", {"var_from_service": "hello"}, blocking=True
)
assert len(mock_calls) == 1
assert mock_calls[0].data["value"] == "from_config"
assert mock_calls[0].data["templated_config_var"] == "hello"
# Verify this available to all templates
assert mock_calls[0].data.get("this_template") == "script.script1"
# Verify this available during trigger variables rendering
assert mock_calls[0].data.get("this_variable") == "script.script1"
await hass.services.async_call(
"script", "script1", {"test_var": "from_service"}, blocking=True
)
assert len(mock_calls) == 2
assert mock_calls[1].data["value"] == "from_service"
assert mock_calls[1].data["templated_config_var"] == "config-default"
# Call script with vars but no templates in it
await hass.services.async_call(
"script", "script2", {"test_var": "from_service"}, blocking=True
)
assert len(mock_calls) == 3
assert mock_calls[2].data["value"] == "from_service"
assert "Error rendering variables" not in caplog.text
with pytest.raises(template.TemplateError):
await hass.services.async_call("script", "script3", blocking=True)
assert "Error rendering variables" in caplog.text
assert len(mock_calls) == 3
await hass.services.async_call("script", "script3", {"break": 0}, blocking=True)
assert len(mock_calls) == 4
assert mock_calls[3].data["value"] == 1
async def test_script_this_var_always(hass, caplog):
"""Test script always has reference to this, even with no variabls are configured."""
assert await async_setup_component(
hass,
"script",
{
"script": {
"script1": {
"sequence": [
{
"service": "test.script",
"data": {
"this_template": "{{this.entity_id}}",
},
},
],
},
},
},
)
mock_calls = async_mock_service(hass, "test", "script")
await hass.services.async_call("script", "script1", blocking=True)
assert len(mock_calls) == 1
# Verify this available to all templates
assert mock_calls[0].data.get("this_template") == "script.script1"
assert "Error rendering variables" not in caplog.text
async def test_script_restore_last_triggered(hass: HomeAssistant) -> None:
"""Test if last triggered is restored on start."""
time = dt_util.utcnow()
mock_restore_cache(
hass,
(
State("script.no_last_triggered", STATE_OFF),
State("script.last_triggered", STATE_OFF, {"last_triggered": time}),
),
)
hass.state = CoreState.starting
assert await async_setup_component(
hass,
"script",
{
"script": {
"no_last_triggered": {
"sequence": [{"delay": {"seconds": 5}}],
},
"last_triggered": {
"sequence": [{"delay": {"seconds": 5}}],
},
},
},
)
state = hass.states.get("script.no_last_triggered")
assert state
assert state.attributes["last_triggered"] is None
state = hass.states.get("script.last_triggered")
assert state
assert state.attributes["last_triggered"] == time
| 31.421302
| 101
| 0.536891
|
7c6230a425fecb11e3b1437d2f4bc015b7fad842
| 1,841
|
py
|
Python
|
scripts/archive/standardize_interwiki.py
|
LeesahMasko/piwikibot
|
024af387ff48c21526ee206541178157d2653ddc
|
[
"MIT"
] | null | null | null |
scripts/archive/standardize_interwiki.py
|
LeesahMasko/piwikibot
|
024af387ff48c21526ee206541178157d2653ddc
|
[
"MIT"
] | 6
|
2021-02-27T03:35:42.000Z
|
2021-03-07T22:17:40.000Z
|
scripts/archive/standardize_interwiki.py
|
LeesahMasko/piwikibot
|
024af387ff48c21526ee206541178157d2653ddc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Loop over all pages in the home wiki, standardizing the interwiki links.
Parameters:
-start: - Set from what page you want to start
"""
#
# (C) Pywikibot team, 2003-2020
#
# Distributed under the terms of the MIT license.
#
import pywikibot
from pywikibot import i18n, textlib
def main(*args):
"""Process command line arguments and run the script."""
start = '!'
# Load the default parameters and start
for arg in pywikibot.handle_args():
if arg.startswith('-start'):
if len(arg) == 6:
start = pywikibot.input('From what page do you want to start?')
else:
start = arg[7:]
site = pywikibot.Site()
comm = i18n.twtranslate(site, 'standardize_interwiki-comment')
for pl in site.allpages(start):
plname = pl.title()
pywikibot.output('\nLoading {0}...'.format(plname))
try:
oldtext = pl.get()
except pywikibot.IsRedirectPage:
pywikibot.output('{0} is a redirect!'.format(plname))
continue
old = pl.interwiki()
new = {}
for pl2 in old:
new[pl2.site] = pywikibot.Page(pl2)
newtext = textlib.replaceLanguageLinks(oldtext, new, site=site)
if new:
if oldtext != newtext:
pywikibot.showDiff(oldtext, newtext)
# Submit changes
try:
pl.put(newtext, comment=comm)
except pywikibot.LockedPage:
pywikibot.output('{0} is locked'.format(plname))
continue
else:
pywikibot.output('No changes needed.')
continue
else:
pywikibot.output('No interwiki found.')
continue
if __name__ == '__main__':
main()
| 28.765625
| 79
| 0.561108
|
b7f85567aaa55fab499d87ef2c98a85d683780ca
| 2,230
|
py
|
Python
|
Demo/sockets/mcast.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2021-12-26T22:20:34.000Z
|
2021-12-26T22:20:34.000Z
|
Demo/sockets/mcast.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | null | null | null |
Demo/sockets/mcast.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
#!/usr/bin/env python
#
# Send/receive UDP multicast packets.
# Requires that your OS kernel supports IP multicast.
#
# Usage:
# mcast -s (sender, IPv4)
# mcast -s -6 (sender, IPv6)
# mcast (receivers, IPv4)
# mcast -6 (receivers, IPv6)
MYPORT = 8123
MYGROUP_4 = '225.0.0.250'
MYGROUP_6 = 'ff15:7079:7468:6f6e:6465:6d6f:6d63:6173'
MYTTL = 1 # Increase to reach other networks
import time
import struct
import socket
import sys
def main():
group = MYGROUP_6 if "-6" in sys.argv[1:] else MYGROUP_4
if "-s" in sys.argv[1:]:
sender(group)
else:
receiver(group)
def sender(group):
addrinfo = socket.getaddrinfo(group, None)[0]
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
# Set Time-to-live (optional)
ttl_bin = struct.pack('@i', MYTTL)
if addrinfo[0] == socket.AF_INET: # IPv4
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl_bin)
else:
s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
while True:
data = repr(time.time()).encode('utf-8') + b'\0'
s.sendto(data, (addrinfo[4][0], MYPORT))
time.sleep(1)
def receiver(group):
# Look up multicast group address in name server and find out IP version
addrinfo = socket.getaddrinfo(group, None)[0]
# Create a socket
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind it to the port
s.bind(('', MYPORT))
group_bin = socket.inet_pton(addrinfo[0], addrinfo[4][0])
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = group_bin + struct.pack('@I', 0)
s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
# Loop, printing any data we receive
while True:
data, sender = s.recvfrom(1500)
while data[-1:] == '\0': data = data[:-1] # Strip trailing \0's
print(str(sender) + ' ' + repr(data))
if __name__ == '__main__':
main()
| 27.530864
| 78
| 0.645291
|
17b3e687ac48a4380d8c09fd50b9472cf230cedd
| 443
|
py
|
Python
|
term-frequency/wordcounts.py
|
paulowe/python-data-programming
|
96fdb3f888a554ac66e69e1f6958f3e0ef5b1075
|
[
"MIT"
] | null | null | null |
term-frequency/wordcounts.py
|
paulowe/python-data-programming
|
96fdb3f888a554ac66e69e1f6958f3e0ef5b1075
|
[
"MIT"
] | null | null | null |
term-frequency/wordcounts.py
|
paulowe/python-data-programming
|
96fdb3f888a554ac66e69e1f6958f3e0ef5b1075
|
[
"MIT"
] | null | null | null |
import numpy as np
import nltk
def wordcount_fn(file_uri):
nparr_words = open(file_uri, 'r')
text_as_str = nparr_words.read() #for small textfiles
text_as_list = text_as_str.split()
counts = nltk.FreqDist(text_as_list).items()
for key, val in counts:
print(str(val) + " " + str(key))
return
print(f"TF (Frequency Distribution) for each word in your file: {wordcount_fn('./textfile.txt')}")
| 26.058824
| 98
| 0.656885
|
0d6b8ea6736798e60c4456c431a0b217fd3789ca
| 85
|
py
|
Python
|
.ipynb_checkpoints/1_read-checkpoint.py
|
fadamsyah/LearnOpenCV
|
bd872e12d32720f09469d119695c2f564d209378
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/1_read-checkpoint.py
|
fadamsyah/LearnOpenCV
|
bd872e12d32720f09469d119695c2f564d209378
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/1_read-checkpoint.py
|
fadamsyah/LearnOpenCV
|
bd872e12d32720f09469d119695c2f564d209378
|
[
"MIT"
] | null | null | null |
import cv2 as cv
img = cv.imread('Resources/Photos/cat1.jpg')
cv.imshow('Cat', img)
| 17
| 44
| 0.705882
|
69319a024b67f42dbda4089c08d5e315d8dfcfe7
| 59,501
|
py
|
Python
|
test/functional/test_framework/mininode.py
|
adnetcoin/adnetcoin
|
bf95a2432f82d7d1f07842d1a4cba96736e96abd
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/mininode.py
|
adnetcoin/adnetcoin
|
bf95a2432f82d7d1f07842d1a4cba96736e96abd
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/mininode.py
|
adnetcoin/adnetcoin
|
bf95a2432f82d7d1f07842d1a4cba96736e96abd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Adnetcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a adnetcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
adnetcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 adnet in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
ADNET_REGTEST_HARDFORK_HEIGHT = 3000
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
def deser_byte_vector(f):
return deser_string(f)
def ser_byte_vector(l):
return ser_string(l)
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to adnetcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in adnetcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nHeight = header.nHeight
self.nReserved = copy.copy(header.nReserved)
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nSolution = header.nSolution
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nHeight = 0
self.nReserved = [0] * 7
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.nSolution = b""
def deserialize(self, f, legacy=True):
if legacy:
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nHeight = 0
self.nReserved = [0] * 7
self.nSolution = b""
else:
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nHeight = struct.unpack("<I", f.read(4))[0]
self.nReserved = [struct.unpack("<I", f.read(4))[0] for _ in range(7)]
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = deser_uint256(f)
self.nSolution = deser_byte_vector(f)
self.sha256 = None
self.hash = None
def serialize_header(self, legacy=True):
r = b""
if legacy:
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce & 0xFFFFFFFF)
return r
else:
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nHeight)
for i in range(7):
r += struct.pack("<I", self.nReserved[i])
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += ser_uint256(self.nNonce)
r += ser_byte_vector(self.nSolution)
return r
def serialize(self, legacy=True):
return self.serialize_header(legacy=legacy)
def calc_sha256(self):
if self.sha256 is None:
if self.nHeight < ADNET_REGTEST_HARDFORK_HEIGHT:
r = self.serialize_header(legacy=True)
else:
r = self.serialize_header(legacy=False)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nHeight=%d nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nHeight,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f, legacy=True):
super(CBlock, self).deserialize(f, legacy=legacy)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
# TODO(h4x3rotab): Not implemented for Equihash.
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
# TODO(h4x3rotab): Not implemented for Equihash.
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nHeight=%d nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nHeight,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in adnetcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a adnetcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
assert wait_until(test_function, timeout=timeout)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
assert wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
assert wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
assert wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
assert wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
assert wait_until(test_function, timeout=timeout)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
assert wait_until(test_function, timeout=timeout)
self.ping_counter += 1
return True
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Adnetcoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
except Exception as e:
logger.exception('got_data:', repr(e))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 31.152356
| 262
| 0.596511
|
cc6ece647c295aea078fd5fa118c09d24ecbd5b4
| 554
|
py
|
Python
|
Desafio 82.py
|
MoomenEltelbany/PythonDesafios
|
aa2f44d3104cf3607f58dc42c2f8fc8023f128de
|
[
"MIT"
] | null | null | null |
Desafio 82.py
|
MoomenEltelbany/PythonDesafios
|
aa2f44d3104cf3607f58dc42c2f8fc8023f128de
|
[
"MIT"
] | null | null | null |
Desafio 82.py
|
MoomenEltelbany/PythonDesafios
|
aa2f44d3104cf3607f58dc42c2f8fc8023f128de
|
[
"MIT"
] | null | null | null |
num = []
numpar = []
numimpar = []
while True:
n = int(input('Digite um valor: '))
num.append(n)
respos = str(input('Quer continuar: [S/N] ')).strip().upper()[0]
while respos not in 'SN':
respos = str(input('Opção inválida..Quer continuar: [S/N] ')).strip().upper()[0]
if respos == 'N':
break
print(f'A lista completa é {num}')
for c in num:
if c % 2 == 0:
numpar.append(c)
else:
numimpar.append(c)
print(f'A lista de números pares é {numpar}')
print(f'A lista de números ímpares é {numimpar}')
| 29.157895
| 88
| 0.583032
|
ecf7fce5f83a7672e652de32bdeabb4572aa601c
| 4,119
|
py
|
Python
|
vmtkScripts/vmtksurfacearrayoperation.py
|
michelebucelli/vmtk
|
738bd1d152e8836847ab4d75f7e8360bd574e724
|
[
"Apache-2.0"
] | 217
|
2015-01-05T19:08:30.000Z
|
2022-03-31T12:14:59.000Z
|
vmtkScripts/vmtksurfacearrayoperation.py
|
mrp089/vmtk
|
64675f598e31bc6be3d4fba903fb59bf1394f492
|
[
"Apache-2.0"
] | 226
|
2015-03-31T07:16:06.000Z
|
2022-03-01T14:59:30.000Z
|
vmtkScripts/vmtksurfacearrayoperation.py
|
mrp089/vmtk
|
64675f598e31bc6be3d4fba903fb59bf1394f492
|
[
"Apache-2.0"
] | 132
|
2015-02-16T11:38:34.000Z
|
2022-03-18T04:38:45.000Z
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfacearrayoperation.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:49:59 $
## Version: $Revision: 1.7 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
from vmtk import vtkvmtk
import sys
from vmtk import pypes
class vmtkSurfaceArrayOperation(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.Surface2 = None
self.InputArrayName = None
self.Input2ArrayName = None
self.ResultArrayName = "Result"
self.Constant = 0.0
self.Operation = 'add'
self.SetScriptName('vmtksurfacearrayoperation')
self.SetScriptDoc('perform an operation between arrays of two surfaces and store result in the first surface')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['Surface2','i2','vtkPolyData',1,'','the second input surface','vmtksurfacereader'],
['Operation','operation','str',1,'["multiplybyc","addc","add","subtract","multiply","min","max"]','the operation to be performed on the array; multiplybyc and addc only require the first input Surface to be specified'],
['Constant','constant','float',1,'','the value of the constant for multiplybyc and addc'],
['InputArrayName','iarray','str',1,'','the name of the array on the first surface'],
['Input2ArrayName','i2array','str',1,'','the name of the array on the second surface; if unspecified, InputArrayName is used'],
['ResultArrayName','resultarray','str',1,'','the name of the array where the result of the operation is stored']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter']
])
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No Surface.')
if self.Surface2 == None and self.Operation not in ["multiplybyc", "addc"]:
self.Surface2 = self.Surface
array1 = self.Surface.GetPointData().GetArray(self.InputArrayName)
array2 = None
if self.Operation not in ["multiplybyc", "addc"]:
if self.Input2ArrayName:
array2 = self.Surface2.GetPointData().GetArray(self.Input2ArrayName)
else:
array2 = self.Surface2.GetPointData().GetArray(self.InputArrayName)
resultArray = vtk.vtkDoubleArray()
resultArray.DeepCopy(array1)
resultArray.SetName(self.ResultArrayName)
for i in range(array1.GetNumberOfTuples()):
value1 = array1.GetTuple1(i)
value2 = None
if array2:
value2 = array2.GetTuple1(i)
resultValue = 0.0
if self.Operation == "multiplybyc":
resultValue = value1 * self.Constant
elif self.Operation == "addc":
resultValue = value1 + self.Constant
elif self.Operation == "add":
resultValue = value1 + value2
elif self.Operation == "subtract":
resultValue = value1 - value2
elif self.Operation == "multiply":
resultValue = value1 * value2
elif self.Operation == "min":
resultValue = min(value1, value2)
elif self.Operation == "max":
resultValue = max(value1, value2)
resultArray.SetValue(i,resultValue)
self.Surface.GetPointData().AddArray(resultArray)
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 38.858491
| 231
| 0.621267
|
38a2b0a0da4a63833529771e715eeb32452f1fdf
| 2,986
|
py
|
Python
|
file_date.py
|
scienceopen/filedate-histogram
|
5a075be229d39a8c623c0ae3eb0f553843282e68
|
[
"MIT"
] | null | null | null |
file_date.py
|
scienceopen/filedate-histogram
|
5a075be229d39a8c623c0ae3eb0f553843282e68
|
[
"MIT"
] | 1
|
2019-11-19T19:05:57.000Z
|
2019-12-10T10:02:54.000Z
|
file_date.py
|
scivision/filedate-histogram
|
5a075be229d39a8c623c0ae3eb0f553843282e68
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Plots histogram of dates of files in directory.
Works for Hugo, Jekyll and Git.
"""
from pathlib import Path
from datetime import datetime
import yaml
import re
import shutil
import typing
import subprocess
import logging
try:
import pandas
from matplotlib.pyplot import show
except ImportError:
pandas = show = None
GIT = shutil.which("git")
use_git = GIT is not None
def file_date():
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument("path", help="path to filename to analyze")
p.add_argument("-e", "--ext", help="file extension to analyze", default=".md")
p.add_argument("-v", "--verbose", help="print method used to get date", action="store_true")
p = p.parse_args()
if p.verbose:
logging.basicConfig(level=logging.DEBUG)
dates = filedates(p.path, p.ext)
if pandas is None:
return
# http://stackoverflow.com/questions/27365467/python-pandas-plot-histogram-of-dates
ds = pandas.Series(dates)
bins = ds.groupby([ds.dt.year, ds.dt.month]).count()
if show is None:
return
bins.plot(kind="bar")
show()
def filedates(path: Path, ext: str) -> typing.Iterator[datetime]:
root = Path(path).expanduser()
if not root.is_dir():
raise NotADirectoryError(root)
use_header = True
use_filename = True
for file in root.glob(f"*{ext}"):
if use_header:
date = get_markdown_date(file)
logging.debug(f"header {file} {date}")
if date is not None:
yield date
continue
if use_filename:
try:
logging.debug(f"filename {file}")
yield datetime.strptime(file.name[:10], "%Y-%m-%d")
continue
except ValueError:
pass
if use_git:
date = get_gitcommit_date(file)
logging.debug(f"git {file} {date}")
if date is not None:
yield date
continue
logging.debug(f"stat {file}")
yield datetime.utcfromtimestamp(file.stat().st_mtime)
def get_markdown_date(path: Path) -> datetime:
content = path.read_text(errors="ignore")
pat = re.compile(r"^-{3}\s*\n([\S\s]+?)\n-{3}\s*\n([\S\s]+)")
mat = pat.search(content)
if mat:
meta = yaml.load(mat.groups()[0], Loader=yaml.BaseLoader)
if "date" in meta:
return datetime.strptime(meta["date"][:10], "%Y-%m-%d")
return None
def get_gitcommit_date(path: Path) -> datetime:
if not path.is_file():
return None
cmd = [GIT, "-C", str(path.parent), "log", "-1", "--format=%cd", "--date=iso", path.name]
datestr = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE)
try:
date = datetime.strptime(datestr.stdout[:10], "%Y-%m-%d")
except ValueError:
date = None
return date
if __name__ == "__main__":
file_date()
| 25.521368
| 96
| 0.602813
|
f4ad1a6fdb03860182a7170406b362ab5de2ec11
| 20,812
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/aio/operations/_web_application_firewall_policies_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/aio/operations/_web_application_firewall_policies_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/aio/operations/_web_application_firewall_policies_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations:
"""WebApplicationFirewallPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.WebApplicationFirewallPolicyListResult"]:
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.WebApplicationFirewallPolicyListResult"]:
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
async def get(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> "_models.WebApplicationFirewallPolicy":
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
policy_name: str,
parameters: "_models.WebApplicationFirewallPolicy",
**kwargs: Any
) -> "_models.WebApplicationFirewallPolicy":
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
| 49.908873
| 215
| 0.672785
|
e2102bd0266e822f4bbf2eba7d66e02c18793d60
| 554
|
py
|
Python
|
The Pythonic Temple/citizen.py
|
Shivam0808/TwilioQuestSolution
|
2a265d68f8ff3d8c34ef4661c94bb128baaf8d1f
|
[
"MIT"
] | null | null | null |
The Pythonic Temple/citizen.py
|
Shivam0808/TwilioQuestSolution
|
2a265d68f8ff3d8c34ef4661c94bb128baaf8d1f
|
[
"MIT"
] | null | null | null |
The Pythonic Temple/citizen.py
|
Shivam0808/TwilioQuestSolution
|
2a265d68f8ff3d8c34ef4661c94bb128baaf8d1f
|
[
"MIT"
] | null | null | null |
from distutils.command.build_scripts import first_line_re
class Citizen:
"this class is to describe a citizen of the City of Python"
def __init__(self, first_name, last_name):
self.first_name = first_name #this is an instance variable
self.last_name = last_name
def full_name(self): #This is an instance method
return self.first_name + " " + self.last_name #This took the 2 instance variables and put them together
greeting = "For the glory of Python!"
| 25.181818
| 113
| 0.648014
|
989291f2dad3a8b7b952de77688317d17f49b8d0
| 11,631
|
py
|
Python
|
operations/mpi_rendering.py
|
vincentfung13/MINE
|
ef16cd83ae99c70f7970a583c533016c63587373
|
[
"MIT"
] | 191
|
2021-07-30T06:11:28.000Z
|
2022-03-31T13:45:51.000Z
|
operations/mpi_rendering.py
|
FreemanG/MINE
|
ef16cd83ae99c70f7970a583c533016c63587373
|
[
"MIT"
] | 20
|
2021-08-24T16:05:11.000Z
|
2022-03-28T11:58:47.000Z
|
operations/mpi_rendering.py
|
FreemanG/MINE
|
ef16cd83ae99c70f7970a583c533016c63587373
|
[
"MIT"
] | 21
|
2021-07-30T06:49:52.000Z
|
2022-03-07T05:42:44.000Z
|
import torch
from operations.homography_sampler import HomographySample
from operations.rendering_utils import transform_G_xyz, sample_pdf, gather_pixel_by_pxpy
def render(rgb_BS3HW, sigma_BS1HW, xyz_BS3HW, use_alpha=False, is_bg_depth_inf=False):
if not use_alpha:
imgs_syn, depth_syn, blend_weights, weights = plane_volume_rendering(
rgb_BS3HW,
sigma_BS1HW,
xyz_BS3HW,
is_bg_depth_inf
)
else:
imgs_syn, weights = alpha_composition(sigma_BS1HW, rgb_BS3HW)
depth_syn, _ = alpha_composition(sigma_BS1HW, xyz_BS3HW[:, :, 2:])
# No rgb blending with alpha composition
blend_weights = torch.zeros_like(rgb_BS3HW).cuda()
return imgs_syn, depth_syn, blend_weights, weights
def alpha_composition(alpha_BK1HW, value_BKCHW):
"""
composition equation from 'Single-View View Synthesis with Multiplane Images'
K is the number of planes, k=0 means the nearest plane, k=K-1 means the farthest plane
:param alpha_BK1HW: alpha at each of the K planes
:param value_BKCHW: rgb/disparity at each of the K planes
:return:
"""
B, K, _, H, W = alpha_BK1HW.size()
alpha_comp_cumprod = torch.cumprod(1 - alpha_BK1HW, dim=1) # BxKx1xHxW
preserve_ratio = torch.cat((torch.ones((B, 1, 1, H, W), dtype=alpha_BK1HW.dtype, device=alpha_BK1HW.device),
alpha_comp_cumprod[:, 0:K-1, :, :, :]), dim=1) # BxKx1xHxW
weights = alpha_BK1HW * preserve_ratio # BxKx1xHxW
value_composed = torch.sum(value_BKCHW * weights, dim=1, keepdim=False) # Bx3xHxW
return value_composed, weights
def plane_volume_rendering(rgb_BS3HW, sigma_BS1HW, xyz_BS3HW, is_bg_depth_inf):
B, S, _, H, W = sigma_BS1HW.size()
xyz_diff_BS3HW = xyz_BS3HW[:, 1:, :, :, :] - xyz_BS3HW[:, 0:-1, :, :, :] # Bx(S-1)x3xHxW
xyz_dist_BS1HW = torch.norm(xyz_diff_BS3HW, dim=2, keepdim=True) # Bx(S-1)x1xHxW
xyz_dist_BS1HW = torch.cat((xyz_dist_BS1HW,
torch.full((B, 1, 1, H, W),
fill_value=1e3,
dtype=xyz_BS3HW.dtype,
device=xyz_BS3HW.device)),
dim=1) # BxSx3xHxW
transparency = torch.exp(-sigma_BS1HW * xyz_dist_BS1HW) # BxSx1xHxW
alpha = 1 - transparency # BxSx1xHxW
# add small eps to avoid zero transparency_acc
# pytorch.cumprod is like: [a, b, c] -> [a, a*b, a*b*c], we need to modify it to [1, a, a*b]
transparency_acc = torch.cumprod(transparency + 1e-6, dim=1) # BxSx1xHxW
transparency_acc = torch.cat((torch.ones((B, 1, 1, H, W), dtype=transparency.dtype, device=transparency.device),
transparency_acc[:, 0:-1, :, :, :]),
dim=1) # BxSx1xHxW
weights = transparency_acc * alpha # BxSx1xHxW
rgb_out, depth_out = weighted_sum_mpi(rgb_BS3HW, xyz_BS3HW, weights, is_bg_depth_inf)
return rgb_out, depth_out, transparency_acc, weights
def weighted_sum_mpi(rgb_BS3HW, xyz_BS3HW, weights, is_bg_depth_inf):
weights_sum = torch.sum(weights, dim=1, keepdim=False) # Bx1xHxW
rgb_out = torch.sum(weights * rgb_BS3HW, dim=1, keepdim=False) # Bx3xHxW
if is_bg_depth_inf:
# for dtu dataset, set large depth if weight_sum is small
depth_out = torch.sum(weights * xyz_BS3HW[:, :, 2:, :, :], dim=1, keepdim=False) \
+ (1 - weights_sum) * 1000
else:
depth_out = torch.sum(weights * xyz_BS3HW[:, :, 2:, :, :], dim=1, keepdim=False) \
/ (weights_sum + 1e-5) # Bx1xHxW
return rgb_out, depth_out
def get_xyz_from_depth(meshgrid_homo,
depth,
K_inv):
"""
:param meshgrid_homo: 3xHxW
:param depth: Bx1xHxW
:param K_inv: Bx3x3
:return:
"""
H, W = meshgrid_homo.size(1), meshgrid_homo.size(2)
B, _, H_d, W_d = depth.size()
assert H==H_d, W==W_d
# 3xHxW -> Bx3xHxW
meshgrid_src_homo = meshgrid_homo.unsqueeze(0).repeat(B, 1, 1, 1)
meshgrid_src_homo_B3N = meshgrid_src_homo.reshape(B, 3, -1)
xyz_src = torch.matmul(K_inv, meshgrid_src_homo_B3N) # Bx3xHW
xyz_src = xyz_src.reshape(B, 3, H, W) * depth # Bx3xHxW
return xyz_src
def disparity_consistency_src_to_tgt(meshgrid_homo, K_src_inv, disparity_src,
G_tgt_src, K_tgt, disparity_tgt):
"""
:param xyz_src_B3N: Bx3xN
:param G_tgt_src: Bx4x4
:param K_tgt: Bx3x3
:param disparity_tgt: Bx1xHxW
:return:
"""
B, _, H, W = disparity_src.size()
depth_src = torch.reciprocal(disparity_src)
xyz_src_B3N = get_xyz_from_depth(meshgrid_homo, depth_src, K_src_inv).view(B, 3, H*W)
xyz_tgt_B3N = transform_G_xyz(G_tgt_src, xyz_src_B3N, is_return_homo=False)
K_xyz_tgt_B3N = torch.matmul(K_tgt, xyz_tgt_B3N)
pxpy_tgt_B2N = K_xyz_tgt_B3N[:, 0:2, :] / K_xyz_tgt_B3N[:, 2:, :] # Bx2xN
pxpy_tgt_mask = torch.logical_and(
torch.logical_and(pxpy_tgt_B2N[:, 0:1, :] >= 0,
pxpy_tgt_B2N[:, 0:1, :] <= W - 1),
torch.logical_and(pxpy_tgt_B2N[:, 1:2, :] >= 0,
pxpy_tgt_B2N[:, 1:2, :] <= H - 1)
) # B1N
disparity_src = torch.reciprocal(xyz_tgt_B3N[:, 2:, :]) # Bx1xN
disparity_tgt = gather_pixel_by_pxpy(disparity_tgt, pxpy_tgt_B2N) # Bx1xN
depth_diff = torch.abs(disparity_src - disparity_tgt)
return torch.mean(depth_diff[pxpy_tgt_mask])
def get_src_xyz_from_plane_disparity(meshgrid_src_homo,
mpi_disparity_src,
K_src_inv):
"""
:param meshgrid_src_homo: 3xHxW
:param mpi_disparity_src: BxS
:param K_src_inv: Bx3x3
:return:
"""
B, S = mpi_disparity_src.size()
H, W = meshgrid_src_homo.size(1), meshgrid_src_homo.size(2)
mpi_depth_src = torch.reciprocal(mpi_disparity_src) # BxS
K_src_inv_Bs33 = K_src_inv.unsqueeze(1).repeat(1, S, 1, 1).reshape(B * S, 3, 3)
# 3xHxW -> BxSx3xHxW
meshgrid_src_homo = meshgrid_src_homo.unsqueeze(0).unsqueeze(1).repeat(B, S, 1, 1, 1)
meshgrid_src_homo_Bs3N = meshgrid_src_homo.reshape(B * S, 3, -1)
xyz_src = torch.matmul(K_src_inv_Bs33, meshgrid_src_homo_Bs3N) # BSx3xHW
xyz_src = xyz_src.reshape(B, S, 3, H * W) * mpi_depth_src.unsqueeze(2).unsqueeze(3) # BxSx3xHW
xyz_src_BS3HW = xyz_src.reshape(B, S, 3, H, W)
return xyz_src_BS3HW
def get_tgt_xyz_from_plane_disparity(xyz_src_BS3HW,
G_tgt_src):
"""
:param xyz_src_BS3HW: BxSx3xHxW
:param G_tgt_src: Bx4x4
:return:
"""
B, S, _, H, W = xyz_src_BS3HW.size()
G_tgt_src_Bs33 = G_tgt_src.unsqueeze(1).repeat(1, S, 1, 1).reshape(B*S, 4, 4)
xyz_tgt = transform_G_xyz(G_tgt_src_Bs33, xyz_src_BS3HW.reshape(B*S, 3, H*W)) # Bsx3xHW
xyz_tgt_BS3HW = xyz_tgt.reshape(B, S, 3, H, W) # BxSx3xHxW
return xyz_tgt_BS3HW
def render_tgt_rgb_depth(H_sampler: HomographySample,
mpi_rgb_src,
mpi_sigma_src,
mpi_disparity_src,
xyz_tgt_BS3HW,
G_tgt_src,
K_src_inv, K_tgt,
use_alpha=False,
is_bg_depth_inf=False):
"""
:param H_sampler:
:param mpi_rgb_src: BxSx3xHxW
:param mpi_sigma_src: BxSx1xHxW
:param mpi_disparity_src: BxS
:param xyz_tgt_BS3HW: BxSx3xHxW
:param G_tgt_src: Bx4x4
:param K_src_inv: Bx3x3
:param K_tgt: Bx3x3
:return:
"""
B, S, _, H, W = mpi_rgb_src.size()
mpi_depth_src = torch.reciprocal(mpi_disparity_src) # BxS
# note that here we concat the mpi_src with xyz_tgt, because H_sampler will sample them for tgt frame
# mpi_src is the same in whatever frame, but xyz has to be in tgt frame
mpi_xyz_src = torch.cat((mpi_rgb_src, mpi_sigma_src, xyz_tgt_BS3HW), dim=2) # BxSx(3+1+3)xHxW
# homography warping of mpi_src into tgt frame
G_tgt_src_Bs44 = G_tgt_src.unsqueeze(1).repeat(1, S, 1, 1).contiguous().reshape(B*S, 4, 4) # Bsx4x4
K_src_inv_Bs33 = K_src_inv.unsqueeze(1).repeat(1, S, 1, 1).contiguous().reshape(B*S, 3, 3) # Bsx3x3
K_tgt_Bs33 = K_tgt.unsqueeze(1).repeat(1, S, 1, 1).contiguous().reshape(B*S, 3, 3) # Bsx3x3
# BsxCxHxW, BsxHxW
tgt_mpi_xyz_BsCHW, tgt_mask_BsHW = H_sampler.sample(mpi_xyz_src.view(B*S, 7, H, W),
mpi_depth_src.view(B*S),
G_tgt_src_Bs44,
K_src_inv_Bs33,
K_tgt_Bs33)
# mpi composition
tgt_mpi_xyz = tgt_mpi_xyz_BsCHW.view(B, S, 7, H, W)
tgt_rgb_BS3HW = tgt_mpi_xyz[:, :, 0:3, :, :]
tgt_sigma_BS1HW = tgt_mpi_xyz[:, :, 3:4, :, :]
tgt_xyz_BS3HW = tgt_mpi_xyz[:, :, 4:, :, :]
tgt_mask_BSHW = tgt_mask_BsHW.view(B, S, H, W)
tgt_mask_BSHW = torch.where(tgt_mask_BSHW,
torch.ones((B, S, H, W), dtype=torch.float32, device=mpi_rgb_src.device),
torch.zeros((B, S, H, W), dtype=torch.float32, device=mpi_rgb_src.device))
# Bx3xHxW, Bx1xHxW, Bx1xHxW
tgt_z_BS1HW = tgt_xyz_BS3HW[:, :, -1:]
tgt_sigma_BS1HW = torch.where(tgt_z_BS1HW >= 0,
tgt_sigma_BS1HW,
torch.zeros_like(tgt_sigma_BS1HW, device=tgt_sigma_BS1HW.device))
tgt_rgb_syn, tgt_depth_syn, _, _ = render(tgt_rgb_BS3HW, tgt_sigma_BS1HW, tgt_xyz_BS3HW,
use_alpha=use_alpha,
is_bg_depth_inf=is_bg_depth_inf)
tgt_mask = torch.sum(tgt_mask_BSHW, dim=1, keepdim=True) # Bx1xHxW
return tgt_rgb_syn, tgt_depth_syn, tgt_mask
def predict_mpi_coarse_to_fine(mpi_predictor, src_imgs, xyz_src_BS3HW_coarse,
disparity_coarse_src, S_fine, is_bg_depth_inf):
if S_fine > 0:
with torch.no_grad():
# predict coarse mpi
mpi_coarse_src_list = mpi_predictor(src_imgs, disparity_coarse_src) # BxS_coarsex4xHxW
mpi_coarse_rgb_src = mpi_coarse_src_list[0][:, :, 0:3, :, :] # BxSx1xHxW
mpi_coarse_sigma_src = mpi_coarse_src_list[0][:, :, 3:, :, :] # BxSx1xHxW
_, _, _, weights = plane_volume_rendering(
mpi_coarse_rgb_src,
mpi_coarse_sigma_src,
xyz_src_BS3HW_coarse,
is_bg_depth_inf
)
weights = weights.mean((2, 3, 4)).unsqueeze(1).unsqueeze(2)
# sample fine disparity
disparity_fine_src = sample_pdf(disparity_coarse_src.unsqueeze(1).unsqueeze(2), weights, S_fine)
disparity_fine_src = disparity_fine_src.squeeze(2).squeeze(1)
# assemble coarse and fine disparity
disparity_all_src = torch.cat((disparity_coarse_src, disparity_fine_src), dim=1) # Bx(S_coarse + S_fine)
disparity_all_src, _ = torch.sort(disparity_all_src, dim=1, descending=True)
mpi_all_src_list = mpi_predictor(src_imgs, disparity_all_src) # BxS_coarsex4xHxW
return mpi_all_src_list, disparity_all_src
else:
mpi_coarse_src_list = mpi_predictor(src_imgs, disparity_coarse_src) # BxS_coarsex4xHxW
return mpi_coarse_src_list, disparity_coarse_src
| 42.761029
| 116
| 0.613361
|
2581bc68e76a073a60fa53f989150b3f2e571373
| 594
|
py
|
Python
|
1294E.py
|
julianferres/Codeforces
|
ac80292a4d53b8078fc1a85e91db353c489555d9
|
[
"MIT"
] | 4
|
2020-01-31T15:49:25.000Z
|
2020-07-07T11:44:03.000Z
|
1294E.py
|
julianferres/CodeForces
|
14e8369e82a2403094183d6f7824201f681c9f65
|
[
"MIT"
] | null | null | null |
1294E.py
|
julianferres/CodeForces
|
14e8369e82a2403094183d6f7824201f681c9f65
|
[
"MIT"
] | null | null | null |
n, m = map(int, input().split())
def solve(actual, target):
#print(actual, target)
rots = {i: 0 for i in range(n)}
for i, x in enumerate(actual):
if x in target:
rots[(i-target[x]) % n] -= 1
# print(rots)
return min([n+i+rots[i] for i in range(n)])
a = []
for _ in range(n):
b = [int(x) for x in input().split()]
a.append(b)
for i in range(n):
for j in range(m):
a[i][j] -= 1
ans = 0
for j in range(m):
target = {m*i+j: i for i in range(n)}
actual = [a[i][j] for i in range(n)]
ans += solve(actual, target)
print(ans)
| 22
| 47
| 0.52862
|
f2685aaeb014edcfaef67f4467c20c9e7c8303fe
| 6,111
|
bzl
|
Python
|
generated_api_shadow/bazel/repository_locations.bzl
|
Y0Username/envoy
|
9dd8ae3c69a9917d97a886ed03e1c010dcd9b098
|
[
"Apache-2.0"
] | 1
|
2021-07-03T18:53:39.000Z
|
2021-07-03T18:53:39.000Z
|
generated_api_shadow/bazel/repository_locations.bzl
|
Y0Username/envoy
|
9dd8ae3c69a9917d97a886ed03e1c010dcd9b098
|
[
"Apache-2.0"
] | 184
|
2021-04-19T09:34:37.000Z
|
2022-03-31T15:14:40.000Z
|
generated_api_shadow/bazel/repository_locations.bzl
|
Y0Username/envoy
|
9dd8ae3c69a9917d97a886ed03e1c010dcd9b098
|
[
"Apache-2.0"
] | null | null | null |
# This should match the schema defined in external_deps.bzl.
REPOSITORY_LOCATIONS_SPEC = dict(
bazel_skylib = dict(
project_name = "bazel-skylib",
project_desc = "Common useful functions and rules for Bazel",
project_url = "https://github.com/bazelbuild/bazel-skylib",
version = "1.0.3",
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
release_date = "2020-08-27",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"],
use_category = ["api"],
),
com_envoyproxy_protoc_gen_validate = dict(
project_name = "protoc-gen-validate (PGV)",
project_desc = "protoc plugin to generate polyglot message validators",
project_url = "https://github.com/envoyproxy/protoc-gen-validate",
version = "0.6.1",
sha256 = "c695fc5a2e5a1b52904cd8a58ce7a1c3a80f7f50719496fd606e551685c01101",
release_date = "2021-04-26",
strip_prefix = "protoc-gen-validate-{version}",
urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v{version}.tar.gz"],
use_category = ["api"],
implied_untracked_deps = [
"com_github_iancoleman_strcase",
"com_github_lyft_protoc_gen_star",
"com_github_spf13_afero",
"org_golang_google_genproto",
"org_golang_x_text",
],
),
com_github_bazelbuild_buildtools = dict(
project_name = "Bazel build tools",
project_desc = "Developer tools for working with Google's bazel buildtool.",
project_url = "https://github.com/bazelbuild/buildtools",
version = "4.0.1",
sha256 = "c28eef4d30ba1a195c6837acf6c75a4034981f5b4002dda3c5aa6e48ce023cf1",
release_date = "2021-03-01",
strip_prefix = "buildtools-{version}",
urls = ["https://github.com/bazelbuild/buildtools/archive/{version}.tar.gz"],
use_category = ["api"],
),
com_github_cncf_udpa = dict(
project_name = "xDS API",
project_desc = "xDS API Working Group (xDS-WG)",
project_url = "https://github.com/cncf/xds",
# During the UDPA -> xDS migration, we aren't working with releases.
version = "b88cc788a63e5b38ee334a2e702c67901355ae2c",
sha256 = "3220df8564f217665b6e17776569c5f748178c2b9cbf83bb55a13ddc0a3738f0",
release_date = "2021-03-23",
strip_prefix = "xds-{version}",
urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"],
use_category = ["api"],
),
com_github_openzipkin_zipkinapi = dict(
project_name = "Zipkin API",
project_desc = "Zipkin's language independent model and HTTP Api Definitions",
project_url = "https://github.com/openzipkin/zipkin-api",
version = "1.0.0",
sha256 = "6c8ee2014cf0746ba452e5f2c01f038df60e85eb2d910b226f9aa27ddc0e44cf",
release_date = "2020-11-22",
strip_prefix = "zipkin-api-{version}",
urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"],
use_category = ["api"],
),
com_google_googleapis = dict(
# TODO(dio): Consider writing a Starlark macro for importing Google API proto.
project_name = "Google APIs",
project_desc = "Public interface definitions of Google APIs",
project_url = "https://github.com/googleapis/googleapis",
version = "82944da21578a53b74e547774cf62ed31a05b841",
sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405",
release_date = "2019-12-02",
strip_prefix = "googleapis-{version}",
urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"],
use_category = ["api"],
),
opencensus_proto = dict(
project_name = "OpenCensus Proto",
project_desc = "Language Independent Interface Types For OpenCensus",
project_url = "https://github.com/census-instrumentation/opencensus-proto",
version = "0.3.0",
sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0",
release_date = "2020-07-21",
strip_prefix = "opencensus-proto-{version}/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"],
use_category = ["api"],
),
prometheus_metrics_model = dict(
project_name = "Prometheus client model",
project_desc = "Data model artifacts for Prometheus",
project_url = "https://github.com/prometheus/client_model",
version = "0255a22d35ad5661ef7aa89c95fdf5dfd685283f",
sha256 = "a83fd26a80c5f9b82d1231448141a148c1d7a0c8f581ddf49fdbd8c1545e5661",
release_date = "2021-01-16",
strip_prefix = "client_model-{version}",
urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"],
use_category = ["api"],
),
rules_proto = dict(
project_name = "Protobuf Rules for Bazel",
project_desc = "Protocol buffer rules for Bazel",
project_url = "https://github.com/bazelbuild/rules_proto",
version = "f7a30f6f80006b591fa7c437fe5a951eb10bcbcf",
sha256 = "9fc210a34f0f9e7cc31598d109b5d069ef44911a82f507d5a88716db171615a8",
release_date = "2021-02-09",
strip_prefix = "rules_proto-{version}",
urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"],
use_category = ["api"],
),
opentelemetry_proto = dict(
project_name = "OpenTelemetry Proto",
project_desc = "Language Independent Interface Types For OpenTelemetry",
project_url = "https://github.com/open-telemetry/opentelemetry-proto",
version = "0.9.0",
sha256 = "9ec38ab51eedbd7601979b0eda962cf37bc8a4dc35fcef604801e463f01dcc00",
release_date = "2021-05-12",
strip_prefix = "opentelemetry-proto-{version}",
urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"],
use_category = ["api"],
),
)
| 50.090164
| 120
| 0.664048
|
6675b8f956718dcf59ced1e287b7e8fc11b75806
| 2,437
|
py
|
Python
|
cogs/fun.py
|
uthree/GeneralBot
|
4204d4ba2b7df4c68bda3caa7cee368c37bdf2e7
|
[
"MIT"
] | null | null | null |
cogs/fun.py
|
uthree/GeneralBot
|
4204d4ba2b7df4c68bda3caa7cee368c37bdf2e7
|
[
"MIT"
] | null | null | null |
cogs/fun.py
|
uthree/GeneralBot
|
4204d4ba2b7df4c68bda3caa7cee368c37bdf2e7
|
[
"MIT"
] | 1
|
2021-08-13T07:35:34.000Z
|
2021-08-13T07:35:34.000Z
|
from discord.ext import commands
import discord as discord
import random
# Fun(ネタコマンド系) cog
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["sintyoku"])
async def sinchoku(self,ctx):
links = [
"https://cdn.discordapp.com/attachments/708828790796845189/787409537883439134/alicedoudesuka-300x168.jpg",
"https://cdn.discordapp.com/attachments/787413181277798400/787413202958417920/images-6.jpeg",
"https://cdn.discordapp.com/attachments/787413181277798400/787413219085647872/images-5.jpeg",
"https://cdn.discordapp.com/attachments/787413181277798400/787413268872429568/images.jpeg",
"https://cdn.discordapp.com/attachments/787413181277798400/787413301776089118/main.pngcompresstrue.png",
"https://cdn.discordapp.com/attachments/787413181277798400/787413247686737930/images-3.jpeg"
]
await ctx.send(random.choice(links))
@commands.command(aliases=["sintyoku_dame"])
async def sinchoku_dame(self,ctx):
links = [
"https://ghippos.net/image/blog/20131008_1.jpg",
"https://livedoor.blogimg.jp/tank_make/imgs/a/9/a996bd7c.jpg",
"https://d2dcan0armyq93.cloudfront.net/photo/odai/400/8a1b02cea77695724af97b596cbc5acc_400.jpg"
]
await ctx.send(random.choice(links))
@commands.command()
async def krsw(self,ctx):
images = [
"https://cdn.discordapp.com/attachments/859753134729199666/875924516616957982/KRSW.png", # 無能弁護士
"https://cdn.discordapp.com/attachments/859753134729199666/875927011154088016/HSGW.png", # 長谷川亮太
"https://cdn.discordapp.com/attachments/859753134729199666/875926898838999050/JEX.jpg" # ペルソナメガネ
]
await ctx.send(random.choice(images))
@commands.command()
async def pakason(self,ctx):
pakasong_txt = open('./cogs/pakason.txt', 'r')
pakasong_list = pakasong_txt.readlines()
pakasong = [
pakasong_list[0],
pakasong_list[1],
pakasong_list[2],
pakasong_list[3]
]
await ctx.send(random.choice(pakasong))
pakasong_txt.close()
def setup(bot):
bot.add_cog(Fun(bot))
| 42.017241
| 122
| 0.625359
|
6150bdf9e94064fef835942e95b9e6e70d3ea7eb
| 7,645
|
py
|
Python
|
tools/mxnet/common/fit.py
|
feifeibear/dlbench
|
978b034e9c34e6aaa38782bb1e4a2cea0c01d0f9
|
[
"MIT"
] | 181
|
2017-01-29T23:50:25.000Z
|
2022-01-15T13:43:17.000Z
|
tools/mxnet/common/fit.py
|
waltersharpWEI/dlbenchData
|
8f551ead44141aa1f678b1a32724d07ccbf4db5b
|
[
"MIT"
] | 33
|
2017-01-30T00:50:24.000Z
|
2020-12-19T07:00:56.000Z
|
tools/mxnet/common/fit.py
|
waltersharpWEI/dlbenchData
|
8f551ead44141aa1f678b1a32724d07ccbf4db5b
|
[
"MIT"
] | 54
|
2017-01-30T21:04:27.000Z
|
2020-07-09T03:26:56.000Z
|
import mxnet as mx
import logging
import os
import time
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = args.num_examples / args.batch_size
if 'dist' in args.kv_store:
epoch_size /= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))
steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank))
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=None,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--num-nodes', type=int, default=1,
help='number of machines used for training')
train.add_argument('--wd', type=float, default=0.00001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
return train
def fit(args, network, data_loader, init=None, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args, kv)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i+1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec' % (
i, args.disp_batches*args.batch_size/(time.time()-tic)))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context = devs,
symbol = network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'momentum' : args.mom,
'wd' : args.wd,
'lr_scheduler': lr_scheduler}
monitor = mx.mon.Monitor(args.monitor, pattern=".*") if args.monitor > 0 else None
if init is None:
initializer = mx.initializer.Normal(sigma=0.01)
#initializer = mx.initializer.Xavier( rnd_type='gaussian', factor_type="in", magnitude=2)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
else:
initializer = init
# evaluation metrices
eval_metrics = ['accuracy', 'ce']
if args.top_k > 0:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=args.top_k))
# callbacks that run after each batch
args.disp_batches = int((args.num_examples-args.batch_size)/args.batch_size) - 1
batch_end_callbacks = [mx.callback.Speedometer(args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch = args.load_epoch if args.load_epoch else 0,
num_epoch = args.num_epochs,
# eval_data = val,
eval_metric = eval_metrics,
kvstore = kv,
optimizer = args.optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
batch_end_callback = batch_end_callbacks,
epoch_end_callback = checkpoint,
allow_missing = True,
monitor = monitor)
| 40.664894
| 111
| 0.608764
|
11602b0a020af8b87b4cc9327c2c6bfcb1e1845e
| 2,079
|
py
|
Python
|
challenge/agoda_cancellation_estimator.py
|
ranitoukhy/IML.HUJI
|
156e67422311847d9f40efa36fb2020da3dd0087
|
[
"MIT"
] | null | null | null |
challenge/agoda_cancellation_estimator.py
|
ranitoukhy/IML.HUJI
|
156e67422311847d9f40efa36fb2020da3dd0087
|
[
"MIT"
] | null | null | null |
challenge/agoda_cancellation_estimator.py
|
ranitoukhy/IML.HUJI
|
156e67422311847d9f40efa36fb2020da3dd0087
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import NoReturn
from IMLearn.base import BaseEstimator
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
class AgodaCancellationEstimator(BaseEstimator):
"""
An estimator for solving the Agoda Cancellation challenge
"""
def __init__(self) -> AgodaCancellationEstimator:
"""
Instantiate an estimator for solving the Agoda Cancellation challenge
Parameters
----------
Attributes
----------
"""
super().__init__()
self.model = None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit an estimator for given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
"""
self.model = LogisticRegression().fit(X, y)
#self.model = KNeighborsClassifier(5).fit(X,y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.model.predict(X)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under loss function
"""
return (self.predict(X) == y).sum() / len(X)
| 25.666667
| 77
| 0.577201
|
084df0b54fbbc8ce4f1d747c892a5203142abe75
| 2,721
|
py
|
Python
|
pymic/transform/flip.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | null | null | null |
pymic/transform/flip.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | null | null | null |
pymic/transform/flip.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import json
import math
import random
import numpy as np
from scipy import ndimage
from pymic.transform.abstract_transform import AbstractTransform
from pymic.util.image_process import *
class RandomFlip(AbstractTransform):
""" random flip the image (shape [C, D, H, W] or [C, H, W]) """
def __init__(self, params):
"""
flip_depth (bool) : random flip along depth axis or not, only used for 3D images
flip_height (bool): random flip along height axis or not
flip_width (bool) : random flip along width axis or not
"""
super(RandomFlip, self).__init__(params)
self.flip_depth = params['RandomFlip_flip_depth'.lower()]
self.flip_height = params['RandomFlip_flip_height'.lower()]
self.flip_width = params['RandomFlip_flip_width'.lower()]
self.inverse = params['RandomFlip_inverse'.lower()]
def __call__(self, sample):
image = sample['image']
input_shape = image.shape
input_dim = len(input_shape) - 1
flip_axis = []
if(self.flip_width):
if(random.random() > 0.5):
flip_axis.append(-1)
if(self.flip_height):
if(random.random() > 0.5):
flip_axis.append(-2)
if(input_dim == 3 and self.flip_depth):
if(random.random() > 0.5):
flip_axis.append(-3)
sample['RandomFlip_Param'] = json.dumps(flip_axis)
if(len(flip_axis) > 0):
# use .copy() to avoid negative strides of numpy array
# current pytorch does not support negative strides
image_t = np.flip(image, flip_axis).copy()
sample['image'] = image_t
if('label' in sample and self.task == 'segmentation'):
sample['label'] = np.flip(sample['label'] , flip_axis).copy()
if('pixel_weight' in sample and self.task == 'segmentation'):
sample['pixel_weight'] = np.flip(sample['pixel_weight'] , flip_axis).copy()
return sample
def inverse_transform_for_prediction(self, sample):
''' flip sample['predict'] (5D or 4D) to the original direction.
flip_axis is a list as saved in __call__().'''
if(isinstance(sample['RandomFlip_Param'], list) or \
isinstance(sample['RandomFlip_Param'], tuple)):
flip_axis = json.loads(sample['RandomFlip_Param'][0])
else:
flip_axis = json.loads(sample['RandomFlip_Param'])
if(len(flip_axis) > 0):
sample['predict'] = np.flip(sample['predict'] , flip_axis).copy()
return sample
| 41.227273
| 92
| 0.6086
|
ee31c8bd8d074717ba1b1d5fd094ad2d7ebfff16
| 9,532
|
py
|
Python
|
banana-split/banana-split.py
|
timwedde/banana-split
|
f129db7527b52206058727e8849d70f9a73f03ea
|
[
"MIT"
] | 7
|
2020-03-31T15:25:54.000Z
|
2022-03-22T22:22:39.000Z
|
banana-split/banana-split.py
|
timwedde/banana-split
|
f129db7527b52206058727e8849d70f9a73f03ea
|
[
"MIT"
] | 4
|
2018-10-25T17:50:29.000Z
|
2021-05-13T11:11:50.000Z
|
banana-split/banana-split.py
|
timwedde/banana-split
|
f129db7527b52206058727e8849d70f9a73f03ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
### System ###
import re
import sys
import time
import shutil
import argparse
from glob import glob
from tqdm import tqdm
from os import mkdir, remove, listdir
from os.path import join, dirname, exists
from signal import signal, SIGINT, SIG_IGN
from multiprocessing import cpu_count, Pool
### py-midicsv ###
import py_midicsv
### Local ###
from patterns import *
from scanner import scan
def check(args):
if not exists(args.input_dir):
print("Input directory does not exist!")
sys.exit(1)
if exists(args.output_dir):
if listdir(args.output_dir):
print("The output contains data. Do you want to overwrite it?")
result = input("[y]es/[n]o: ").lower()
if not result in ["y", "yes"]:
print("Aborted")
sys.exit(0)
shutil.rmtree(args.output_dir)
mkdir(args.output_dir)
def __midi_to_csv(file_in, file_out):
with open(file_out, "w") as f:
csv_string = py_midicsv.midi_to_csv(file_in)
f.write("\n".join(csv_string))
def __csv_to_midi(file_in, file_out):
with open(file_in, "r") as f:
midi_object = py_midicsv.csv_to_midi(f.readlines())
with open(file_out, "wb") as f:
midi_writer = py_midicsv.FileWriter(f)
midi_writer.write(midi_object)
def midi_to_csv(data):
args, file = data
folder = join(args.output_dir, file["name"])
csv_file = join(folder, "{}_full.csv".format(file["name"]))
mkdir(folder)
try:
__midi_to_csv(file["path"], csv_file)
except:
shutil.rmtree(folder)
if args.verbose:
return "Could not convert '{}'".format(file["name"])
def csv_to_midi(file):
midi_file = join(dirname(file["path"]), "{}.mid".format(file["name"]))
try:
__csv_to_midi(file["path"], midi_file)
except:
if args.verbose:
return "An error occurred while converting '{}' in folder {}".format(file["name"], dirname(file["path"]))
def list_channels(file):
channels = set()
with open(file["path"], "r", encoding="latin-1") as f:
for line in f:
m = channel_pattern.match(line)
if m:
channels.add(m.group(1))
return channels
def split_channel(file, data, channel):
file_out = join(dirname(file["path"]), "channel_{}.csv".format(channel))
with open(file_out, "w") as f:
for line in data:
if comment_pattern.match(line):
continue
m = channel_pattern.match(line)
if m:
if m.group(1) == channel:
f.write(line)
else:
if not lyric_pattern.match(line): # skip lyrics
f.write(line)
def split_channels(file, channels):
file_in = file["path"]
data = open(file_in, "r", encoding="latin-1").readlines()
for channel in channels:
split_channel(file, data, channel)
def extract_channels(file):
channels = list_channels(file)
split_channels(file, channels)
def transpose(file):
data = []
with open(file["path"], "r", encoding="latin-1") as f:
for line in f:
m = note_pattern.match(line)
if m:
if m.group(2) != drum_channel:
note = int(m.group(5)) + args.offset
if note < 0:
note = 0
data.append(re.sub(note_pattern, "\\1, \\2, \\3, \\4, {}, \\6".format(note), line))
else:
data.append(line)
else:
data.append(line)
with open(file["path"], "w", encoding="latin-1") as f:
for line in data:
f.write(line)
def check_channel(file):
file_in = file["path"]
data = open(file_in, "r", encoding="latin-1").readlines()
for line in data:
if note_pattern.match(line):
return
remove(file_in)
def list_tracks(file):
tracks = set()
with open(file["path"], "r", encoding="latin-1") as f:
for line in f:
m = track_pattern.match(line)
if m:
tracks.add(m.group(1))
return tracks
def split_track(file, data, track):
folder = join(dirname(file["path"]), file["name"])
file_out = join(folder, "track_{}.csv".format(track))
if not exists(folder):
mkdir(folder)
with open(file_out, "w") as f:
for line in data:
if comment_pattern.match(line): # skip comments
continue
m = track_pattern.match(line)
if m:
if m.group(1) == track:
f.write(line)
else:
if not lyric_pattern.match(line): # skip lyrics
f.write(line)
def split_tracks(file, tracks):
file_in = file["path"]
channel_target = join(dirname(file["path"]), file["name"], "{}.csv".format(file["name"]))
data = open(file_in, "r", encoding="latin-1").readlines()
for track in tracks:
split_track(file, data, track)
shutil.move(file["path"], channel_target)
def extract_tracks(file):
tracks = list_tracks(file)
split_tracks(file, tracks)
def clean(file):
data = open(file["path"], "r", encoding="latin-1").readlines()
with open(file["path"], "w") as f:
for line in data:
if comment_pattern.match(line) or unknown_event_pattern.match(line) or sequencer_specific_pattern.match(line):
continue
f.write(line)
# TODO: load the file to be processed into RAM once, instead of reading it from disk multiple times to improve performance
# Remove dependency of development version of python-midi (python3 branch)
# (pip install https://github.com/vishnubob/python-midi/archive/feature/python3.zip)
def main(args):
with tqdm(total=(6 if args.keep else 7), unit="step") as bar:
tqdm.write("Converting input data...")
files = scan(args.input_dir, "*.mid")
files = [(args, file) for file in files]
for e in tqdm(worker_pool.imap_unordered(midi_to_csv, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
tqdm.write("Cleaning input data...")
files = scan(args.output_dir, "**/*_full.csv")
for e in tqdm(worker_pool.imap_unordered(clean, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
tqdm.write("Splitting channels...")
files = scan(args.output_dir, "**/*_full.csv")
for e in tqdm(worker_pool.imap_unordered(extract_channels, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
tqdm.write("Removing empty channels...")
files = scan(args.output_dir, "**/channel_*.csv", True)
for e in tqdm(worker_pool.imap_unordered(check_channel, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
tqdm.write("Splitting tracks...")
files = scan(args.output_dir, "**/channel_*.csv", True)
for e in tqdm(worker_pool.imap_unordered(extract_tracks, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
tqdm.write("Converting output data...")
files = scan(args.output_dir, "**/channel_*.csv", True)
files += scan(args.output_dir, "**/track_*.csv", True)
for e in tqdm(worker_pool.imap_unordered(csv_to_midi, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
if not args.keep:
tqdm.write("Removing intermediary artifacts...")
files = scan(args.output_dir, "**/*.csv", True)
files = [f["path"] for f in files]
for e in tqdm(worker_pool.imap_unordered(remove, files), total=len(files), unit="files"):
if e:
tqdm.write(e)
bar.update(1)
tqdm.write("Finished processing")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Split MIDI files into channels and tracks.")
parser.add_argument("-i", "--input", type=str, dest="input_dir", required=True,
metavar="dir", help="(required) The folder containing the input data")
parser.add_argument("-o", "--output", type=str, dest="output_dir", required=True,
metavar="dir", help="(required) The folder containing the output data")
parser.add_argument("-t", "--threads", type=int, dest="num_threads", default=cpu_count(),
metavar="N", help="The amount of threads to use (default: {})".format(cpu_count()))
parser.add_argument("-k", "--keep", dest="keep", action="store_true",
help="When given, will keep the intermediary product of each file (.csv)")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="When given, will produce more verbose output for debugging purposes")
args = parser.parse_args()
original_sigint_handler = signal(SIGINT, SIG_IGN)
worker_pool = Pool(args.num_threads)
signal(SIGINT, original_sigint_handler)
check(args)
try:
main(args)
except KeyboardInterrupt:
print("Received SIGINT, terminating...")
worker_pool.terminate()
else:
worker_pool.close()
worker_pool.join()
| 33.801418
| 122
| 0.587075
|
3e6652683f1ad0fa77f5e86134f041ec0ab7792a
| 2,217
|
py
|
Python
|
tests/qtoggleserver/core/expressions/conftest.py
|
DigitEgal/qtoggleserver
|
54b6ac53742af9529fd349d4fc207b0dc8a38d3b
|
[
"Apache-2.0"
] | 12
|
2020-07-26T05:49:25.000Z
|
2022-01-08T21:50:44.000Z
|
tests/qtoggleserver/core/expressions/conftest.py
|
DigitEgal/qtoggleserver
|
54b6ac53742af9529fd349d4fc207b0dc8a38d3b
|
[
"Apache-2.0"
] | 8
|
2020-04-30T18:40:18.000Z
|
2020-11-08T21:09:35.000Z
|
tests/qtoggleserver/core/expressions/conftest.py
|
DigitEgal/qtoggleserver
|
54b6ac53742af9529fd349d4fc207b0dc8a38d3b
|
[
"Apache-2.0"
] | 2
|
2020-02-14T02:52:13.000Z
|
2021-04-21T05:13:07.000Z
|
import pytest
from qtoggleserver.core.expressions import literalvalues
@pytest.fixture(scope='session')
def literal_false():
return literalvalues.LiteralValue(False, 'false')
@pytest.fixture(scope='session')
def literal_true():
return literalvalues.LiteralValue(True, 'true')
@pytest.fixture(scope='session')
def literal_zero():
return literalvalues.LiteralValue(0, '0')
@pytest.fixture(scope='session')
def literal_zero_point_five():
return literalvalues.LiteralValue(0.5, '0.5')
@pytest.fixture(scope='session')
def literal_one():
return literalvalues.LiteralValue(1, '1')
@pytest.fixture(scope='session')
def literal_two():
return literalvalues.LiteralValue(2, '2')
@pytest.fixture(scope='session')
def literal_three():
return literalvalues.LiteralValue(3, '3')
@pytest.fixture(scope='session')
def literal_pi():
return literalvalues.LiteralValue(3.14159, '3.14159')
@pytest.fixture(scope='session')
def literal_ten():
return literalvalues.LiteralValue(10, '10')
@pytest.fixture(scope='session')
def literal_ten_point_fifty_one():
return literalvalues.LiteralValue(10.51, '10.51')
@pytest.fixture(scope='session')
def literal_sixteen():
return literalvalues.LiteralValue(16, '16')
@pytest.fixture(scope='session')
def literal_one_hundred():
return literalvalues.LiteralValue(100, '100')
@pytest.fixture(scope='session')
def literal_two_hundreds():
return literalvalues.LiteralValue(200, '200')
@pytest.fixture(scope='session')
def literal_one_thousand():
return literalvalues.LiteralValue(1000, '1000')
@pytest.fixture(scope='session')
def literal_minus_one():
return literalvalues.LiteralValue(-1, '-1')
@pytest.fixture(scope='session')
def literal_minus_two():
return literalvalues.LiteralValue(-2, '-2')
@pytest.fixture(scope='session')
def literal_minus_pi():
return literalvalues.LiteralValue(-3.14159, '-3.14159')
@pytest.fixture(scope='session')
def literal_minus_ten_point_fifty_one():
return literalvalues.LiteralValue(-10.51, '-10.51')
@pytest.fixture(scope='session')
def literal_dummy_timestamp(dummy_timestamp):
return literalvalues.LiteralValue(dummy_timestamp, str(dummy_timestamp))
| 22.17
| 76
| 0.747406
|
f22d5351fa47bbf8e0095e9da9abb445c0f45208
| 5,149
|
py
|
Python
|
slider-agent/src/main/python/agent/shell.py
|
turningme/incubator-retired-slider
|
1d4f519d763210f46e327338be72efa99e65cb5d
|
[
"Apache-2.0"
] | 60
|
2015-01-05T10:51:11.000Z
|
2018-12-15T03:48:09.000Z
|
slider-agent/src/main/python/agent/shell.py
|
turningme/incubator-retired-slider
|
1d4f519d763210f46e327338be72efa99e65cb5d
|
[
"Apache-2.0"
] | null | null | null |
slider-agent/src/main/python/agent/shell.py
|
turningme/incubator-retired-slider
|
1d4f519d763210f46e327338be72efa99e65cb5d
|
[
"Apache-2.0"
] | 87
|
2015-01-14T05:14:15.000Z
|
2018-12-25T14:14:56.000Z
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import os
import signal
import subprocess
import threading
import platform
from process_utils import get_flat_process_tree, kill_pids, wait_for_entire_process_tree_death, \
get_processes_running, get_command_by_pid
if platform.system() != "Windows":
try:
import pwd
except ImportError:
import winpwd as pwd
global serverTracker
serverTracker = {}
logger = logging.getLogger()
shellRunner = None
threadLocal = threading.local()
tempFiles = []
def noteTempFile(filename):
tempFiles.append(filename)
def getTempFiles():
return tempFiles
class _dict_to_object:
def __init__(self, entries):
self.__dict__.update(entries)
def __getitem__(self, item):
return self.__dict__[item]
# windows specific code
def _kill_process_with_children_windows(parent_pid):
shellRunner().run(["taskkill", "/T", "/PID", "{0}".format(parent_pid)])
class shellRunnerWindows:
# Run any command
def run(self, script, user=None):
logger.warn("user argument ignored on windows")
code = 0
if not isinstance(script, list):
cmd = " "
cmd = cmd.join(script)
else:
cmd = script
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
out, err = p.communicate()
code = p.wait()
logger.debug("Exitcode for %s is %d" % (cmd, code))
return {'exitCode': code, 'output': out, 'error': err}
def runPowershell(self, file=None, script_block=None, args=[]):
logger.warn("user argument ignored on windows")
code = 0
cmd = None
if file:
cmd = ['powershell', '-WindowStyle', 'Hidden', '-File', file] + args
elif script_block:
cmd = ['powershell', '-WindowStyle', 'Hidden', '-Command', script_block] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
out, err = p.communicate()
code = p.wait()
logger.debug("Exitcode for %s is %d" % (cmd, code))
return _dict_to_object({'exitCode': code, 'output': out, 'error': err})
#linux specific code
def _kill_process_with_children_linux(parent_pid):
"""
Kills process tree starting from a given pid.
:param parent_pid: head of tree
:param graceful_kill_delays: map <command name, custom delay between SIGTERM and SIGKILL>
:return:
"""
pids = get_flat_process_tree(parent_pid)
logger.info("Process tree: %s" % ','.join(pids))
try:
kill_pids(pids, signal.SIGTERM)
except Exception, e:
logger.warn("Failed to kill PID %d" % parent_pid)
logger.warn("Reported error: " + repr(e))
wait_for_entire_process_tree_death(pids)
try:
running_processes = get_processes_running(pids)
if running_processes:
process_names = map(lambda x: get_command_by_pid(x), running_processes)
logger.warn("These PIDs %s did not die after SIGTERM, sending SIGKILL. Exact commands to be killed:\n %s" %
(", ".join(running_processes), "\n".join(process_names)))
kill_pids(running_processes, signal.SIGKILL)
except Exception, e:
logger.error("Failed to send SIGKILL to PID %d. Process exited?" % parent_pid)
logger.error("Reported error: " + repr(e))
def _changeUid():
try:
os.setuid(threadLocal.uid)
except Exception:
logger.warn("can not switch user for running command.")
class shellRunnerLinux:
# Run any command
def run(self, script, user=None):
try:
if user != None:
user = pwd.getpwnam(user)[2]
else:
user = os.getuid()
threadLocal.uid = user
except Exception:
logger.warn("can not switch user for RUN_COMMAND.")
code = 0
cmd = " "
cmd = cmd.join(script)
p = subprocess.Popen(cmd, preexec_fn=_changeUid, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True, close_fds=True)
out, err = p.communicate()
code = p.wait()
logger.debug("Exitcode for %s is %d" % (cmd, code))
return {'exitCode': code, 'output': out, 'error': err}
def kill_process_with_children(parent_pid):
if platform.system() == "Windows":
_kill_process_with_children_windows(parent_pid)
else:
_kill_process_with_children_linux(parent_pid)
if platform.system() == "Windows":
shellRunner = shellRunnerWindows
else:
shellRunner = shellRunnerLinux
| 31.981366
| 115
| 0.689066
|
ac26e6819735de755fc895f321077fd6d0bb6fdb
| 11,670
|
py
|
Python
|
tensorflow/python/keras/engine/base_preprocessing_layer_test.py
|
plopresti/tensorflow
|
8b0c84d30d957596cbb3bcac9245e114c3f0b65b
|
[
"Apache-2.0"
] | 2
|
2019-06-28T17:43:04.000Z
|
2019-06-28T17:43:07.000Z
|
tensorflow/python/keras/engine/base_preprocessing_layer_test.py
|
plopresti/tensorflow
|
8b0c84d30d957596cbb3bcac9245e114c3f0b65b
|
[
"Apache-2.0"
] | 8
|
2019-07-08T10:09:18.000Z
|
2019-09-26T20:55:43.000Z
|
tensorflow/python/keras/engine/base_preprocessing_layer_test.py
|
plopresti/tensorflow
|
8b0c84d30d957596cbb3bcac9245e114c3f0b65b
|
[
"Apache-2.0"
] | 1
|
2021-02-27T07:40:01.000Z
|
2021-02-27T07:40:01.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras' base preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine import base_preprocessing_layer_v1
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# Define a test-only implementation of CombinerPreprocessingLayer to validate
# its correctness directly.
class AddingPreprocessingLayer(
base_preprocessing_layer.CombinerPreprocessingLayer):
_SUM_NAME = "sum"
def __init__(self, **kwargs):
super(AddingPreprocessingLayer, self).__init__(
combiner=self.AddingCombiner(), **kwargs)
def build(self, input_shape):
super(AddingPreprocessingLayer, self).build(input_shape)
self._sum = self._add_state_variable(
name=self._SUM_NAME,
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer)
def set_total(self, sum_value):
"""This is an example of how a subclass would implement a direct setter.
These methods should generally just create a dict mapping the correct names
to the relevant passed values, and call self._set_state_variables() with the
dict of data.
Args:
sum_value: The total to set.
"""
self._set_state_variables({self._SUM_NAME: [sum_value]})
def call(self, inputs):
return inputs + self._sum
# Define a Combiner for this layer class.
class AddingCombiner(base_preprocessing_layer.Combiner):
def compute(self, batch_values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator."""
new_accumulator = 0 if batch_values is None else np.sum(batch_values)
if accumulator is None:
return new_accumulator
else:
return self.merge([accumulator, new_accumulator])
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator."""
# Combine accumulators and return the result.
result = accumulators[0]
for accumulator in accumulators[1:]:
result = np.sum([np.sum(result), np.sum(accumulator)])
return result
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values."""
# We have to add an additional dimension here because the weight shape
# is (1,) not None.
return {AddingPreprocessingLayer._SUM_NAME: [accumulator]}
def restore(self, output):
"""Create an accumulator based on 'output'."""
# There is no special internal state here, so we just return the relevant
# internal value. We take the [0] value here because the weight itself
# is of the shape (1,) and we want the scalar contained inside it.
return output[AddingPreprocessingLayer._SUM_NAME][0]
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call."""
return compat.as_bytes(json.dumps(accumulator))
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'."""
return json.loads(compat.as_text(encoded_accumulator))
class AddingPreprocessingLayerV1(
AddingPreprocessingLayer,
base_preprocessing_layer_v1.CombinerPreprocessingLayer):
pass
def get_layer():
if context.executing_eagerly():
return AddingPreprocessingLayer()
else:
return AddingPreprocessingLayerV1()
@keras_parameterized.run_all_keras_modes
class PreprocessingLayerTest(keras_parameterized.TestCase):
def test_adapt_list_fails(self):
"""Test that non-Dataset/Numpy inputs cause a reasonable error."""
input_dataset = [1, 2, 3, 4, 5]
layer = get_layer()
with self.assertRaisesRegex(ValueError, ".*a Dataset or a Numpy.*"):
layer.adapt(input_dataset)
def test_adapt_infinite_dataset_fails(self):
"""Test that preproc layers fail if an infinite dataset is passed."""
input_dataset = dataset_ops.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]])).repeat()
layer = get_layer()
with self.assertRaisesRegex(ValueError, ".*infinite number of elements.*"):
layer.adapt(input_dataset)
def test_pre_build_injected_update_with_no_build_fails(self):
"""Test external update injection before build() is called fails."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
with self.assertRaisesRegex(RuntimeError, ".*called after build.*"):
layer._set_state_variables(updates)
def test_setter_update(self):
"""Test the prototyped setter method."""
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
layer.set_total(15)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_pre_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_post_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_pre_build_injected_update(self):
"""Test external update injection before build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
layer.build((1,))
layer._set_state_variables(updates)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_post_build_injected_update(self):
"""Test external update injection after build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
layer._set_state_variables(updates)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_pre_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = dataset_ops.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]]))
layer = get_layer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_post_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = dataset_ops.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]]))
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
def test_further_tuning(self):
"""Test that models can be tuned with multiple calls to 'adapt'."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
layer.adapt(np.array([1, 2]), reset_state=False)
self.assertAllEqual([[19], [20], [21]], model.predict([1, 2, 3]))
def test_further_tuning_post_injection(self):
"""Test that models can be tuned with multiple calls to 'adapt'."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
layer._set_state_variables(updates)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
layer.adapt(np.array([1, 2]), reset_state=False)
self.assertAllEqual([[19], [20], [21]], model.predict([1, 2, 3]))
def test_weight_based_state_transfer(self):
"""Test that preproc layers can transfer state via get/set weights.."""
def get_model():
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
return (keras.Model(input_data, output), layer)
input_dataset = np.array([1, 2, 3, 4, 5])
model, layer = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
# Create a new model and verify it has no state carryover.
weights = model.get_weights()
model_2, _ = get_model()
self.assertAllEqual([[1], [2], [3]], model_2.predict([1, 2, 3]))
# Transfer state from model to model_2 via get/set weights.
model_2.set_weights(weights)
self.assertAllEqual([[16], [17], [18]], model_2.predict([1, 2, 3]))
def test_weight_based_state_transfer_with_further_tuning(self):
"""Test that transferred state can be used to further tune a model.."""
def get_model():
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
return (keras.Model(input_data, output), layer)
input_dataset = np.array([1, 2, 3, 4, 5])
model, layer = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1, 2, 3]))
# Transfer state from model to model_2 via get/set weights.
weights = model.get_weights()
model_2, layer_2 = get_model()
model_2.set_weights(weights)
# Further adapt this layer based on the transferred weights.
layer_2.adapt(np.array([1, 2]), reset_state=False)
self.assertAllEqual([[19], [20], [21]], model_2.predict([1, 2, 3]))
if __name__ == "__main__":
test.main()
| 34.835821
| 80
| 0.688003
|
e63c94670b9bf8a67224ad0c655103e180099a4e
| 161
|
py
|
Python
|
pythoscope/__init__.py
|
jmikedupont2/pythoscope
|
58a1149f204897e8f789d93ee7e49b6db0bd346f
|
[
"MIT"
] | 2
|
2020-04-06T11:02:46.000Z
|
2020-05-14T18:37:04.000Z
|
pythoscope/__init__.py
|
jmikedupont2/pythoscope
|
58a1149f204897e8f789d93ee7e49b6db0bd346f
|
[
"MIT"
] | null | null | null |
pythoscope/__init__.py
|
jmikedupont2/pythoscope
|
58a1149f204897e8f789d93ee7e49b6db0bd346f
|
[
"MIT"
] | null | null | null |
import sys
#from importlib import reload
#reload(sys)
#sys.setdefaultencoding("utf-8")
from .cmdline import main, __version__
from .snippet import start, stop
| 17.888889
| 38
| 0.782609
|
b953806efd9a2a4c0e0eeab7503bb41eb686ba64
| 1,841
|
py
|
Python
|
SignLanguageRecognitionLearning/landmarkDetection.py
|
JanBinkowski/SignLanguageRecognition
|
9c111a77301aafd3d480fffbe73e6b86d65eaead
|
[
"MIT"
] | 4
|
2021-11-13T20:40:26.000Z
|
2021-12-30T18:25:51.000Z
|
SignLanguageRecognitionLearning/landmarkDetection.py
|
JanBinkowski/SignLanguageRecognition
|
9c111a77301aafd3d480fffbe73e6b86d65eaead
|
[
"MIT"
] | null | null | null |
SignLanguageRecognitionLearning/landmarkDetection.py
|
JanBinkowski/SignLanguageRecognition
|
9c111a77301aafd3d480fffbe73e6b86d65eaead
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import mediapipe as mp
import cv2.cv2
import time
import os
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
DATA_PATH = os.path.join('../MP_DATA')
DATA_PATH_VIDEO = os.path.join('../MP_VIDEOS')
actions = np.array(open('C://Users//JanBinkowski//Desktop//SignLanguageRecognition//classes.txt', 'r').read().split('\n'))
number_of_sequences = 100
every_sequence_length = 30
mpHolistic = mp.solutions.holistic
mpDrawing = mp.solutions.drawing_utils
def mediapipeDetection(image_param, model):
image = cv2.cvtColor(image_param, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = model.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image, results
def drawLandmarks(image, results):
mpDrawing.draw_landmarks(image, results.left_hand_landmarks, mpHolistic.HAND_CONNECTIONS,
mpDrawing.DrawingSpec(color=(81, 23, 11), thickness=2, circle_radius=4),
mpDrawing.DrawingSpec(color=(81, 45, 122), thickness=2, circle_radius=2))
mpDrawing.draw_landmarks(image, results.right_hand_landmarks, mpHolistic.HAND_CONNECTIONS,
mpDrawing.DrawingSpec(color=(122, 23, 77), thickness=2, circle_radius=4),
mpDrawing.DrawingSpec(color=(122, 45, 249), thickness=2, circle_radius=2))
def extractKeypoints(results):
lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)
rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)
return np.concatenate([lh, rh])
| 43.833333
| 153
| 0.718088
|
c193cf543ab0209bfa058ded5d760fba5602ed42
| 899
|
py
|
Python
|
shop/cart/modifiers/rebate_modifiers.py
|
ionata-archive/django-shop
|
9197d71c038bd9b236e405fe8031993c09cf1514
|
[
"BSD-3-Clause"
] | null | null | null |
shop/cart/modifiers/rebate_modifiers.py
|
ionata-archive/django-shop
|
9197d71c038bd9b236e405fe8031993c09cf1514
|
[
"BSD-3-Clause"
] | null | null | null |
shop/cart/modifiers/rebate_modifiers.py
|
ionata-archive/django-shop
|
9197d71c038bd9b236e405fe8031993c09cf1514
|
[
"BSD-3-Clause"
] | null | null | null |
#-*- coding: utf-8 -*-
from decimal import Decimal
from shop.cart.cart_modifiers_base import BaseCartModifier
class BulkRebateModifier(BaseCartModifier):
def add_extra_cart_item_price_field(self, cart_item):
"""
Add a rebate to a line item depending on the quantity ordered:
This serves as an example mass rebate modifier: if you buy more than
5 items of the same kind, you get 10% off the bunch
>>> cart_item.extra_price_fields.update({'Rebate': Decimal('10.0')})
"""
REBATE_PERCENTAGE = Decimal('10')
NUMBER_OF_ITEMS_TO_TRIGGER_REBATE = 5
if cart_item.quantity >= NUMBER_OF_ITEMS_TO_TRIGGER_REBATE:
rebate = (REBATE_PERCENTAGE/100) * cart_item.line_subtotal
to_append = ('Rebate', -rebate)
cart_item.extra_price_fields.append(to_append)
return cart_item
| 40.863636
| 76
| 0.670745
|
1e38ba3cc300a35717372d1d3204931ba401e76f
| 14,100
|
py
|
Python
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/providers_krb5_krb5_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/providers_krb5_krb5_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/providers_krb5_krb5_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_1.models.providers_krb5_id_params_keytab_entry import ProvidersKrb5IdParamsKeytabEntry # noqa: F401,E501
class ProvidersKrb5Krb5Item(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'groupnet': 'str',
'id': 'str',
'keytab_entries': 'list[ProvidersKrb5IdParamsKeytabEntry]',
'keytab_file': 'str',
'manual_keying': 'bool',
'name': 'str',
'realm': 'str',
'recommended_spns': 'list[str]',
'status': 'str',
'system': 'bool',
'user': 'str'
}
attribute_map = {
'groupnet': 'groupnet',
'id': 'id',
'keytab_entries': 'keytab_entries',
'keytab_file': 'keytab_file',
'manual_keying': 'manual_keying',
'name': 'name',
'realm': 'realm',
'recommended_spns': 'recommended_spns',
'status': 'status',
'system': 'system',
'user': 'user'
}
def __init__(self, groupnet=None, id=None, keytab_entries=None, keytab_file=None, manual_keying=None, name=None, realm=None, recommended_spns=None, status=None, system=None, user=None): # noqa: E501
"""ProvidersKrb5Krb5Item - a model defined in Swagger""" # noqa: E501
self._groupnet = None
self._id = None
self._keytab_entries = None
self._keytab_file = None
self._manual_keying = None
self._name = None
self._realm = None
self._recommended_spns = None
self._status = None
self._system = None
self._user = None
self.discriminator = None
if groupnet is not None:
self.groupnet = groupnet
if id is not None:
self.id = id
if keytab_entries is not None:
self.keytab_entries = keytab_entries
if keytab_file is not None:
self.keytab_file = keytab_file
if manual_keying is not None:
self.manual_keying = manual_keying
if name is not None:
self.name = name
if realm is not None:
self.realm = realm
if recommended_spns is not None:
self.recommended_spns = recommended_spns
if status is not None:
self.status = status
if system is not None:
self.system = system
if user is not None:
self.user = user
@property
def groupnet(self):
"""Gets the groupnet of this ProvidersKrb5Krb5Item. # noqa: E501
Groupnet identifier. # noqa: E501
:return: The groupnet of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._groupnet
@groupnet.setter
def groupnet(self, groupnet):
"""Sets the groupnet of this ProvidersKrb5Krb5Item.
Groupnet identifier. # noqa: E501
:param groupnet: The groupnet of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if groupnet is not None and len(groupnet) > 255:
raise ValueError("Invalid value for `groupnet`, length must be less than or equal to `255`") # noqa: E501
if groupnet is not None and len(groupnet) < 0:
raise ValueError("Invalid value for `groupnet`, length must be greater than or equal to `0`") # noqa: E501
self._groupnet = groupnet
@property
def id(self):
"""Gets the id of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the Kerberos provider ID. # noqa: E501
:return: The id of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProvidersKrb5Krb5Item.
Specifies the Kerberos provider ID. # noqa: E501
:param id: The id of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if id is not None and len(id) > 255:
raise ValueError("Invalid value for `id`, length must be less than or equal to `255`") # noqa: E501
if id is not None and len(id) < 0:
raise ValueError("Invalid value for `id`, length must be greater than or equal to `0`") # noqa: E501
self._id = id
@property
def keytab_entries(self):
"""Gets the keytab_entries of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the key information for the Kerberos SPNs. # noqa: E501
:return: The keytab_entries of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: list[ProvidersKrb5IdParamsKeytabEntry]
"""
return self._keytab_entries
@keytab_entries.setter
def keytab_entries(self, keytab_entries):
"""Sets the keytab_entries of this ProvidersKrb5Krb5Item.
Specifies the key information for the Kerberos SPNs. # noqa: E501
:param keytab_entries: The keytab_entries of this ProvidersKrb5Krb5Item. # noqa: E501
:type: list[ProvidersKrb5IdParamsKeytabEntry]
"""
self._keytab_entries = keytab_entries
@property
def keytab_file(self):
"""Gets the keytab_file of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the path to a keytab file to import. # noqa: E501
:return: The keytab_file of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._keytab_file
@keytab_file.setter
def keytab_file(self, keytab_file):
"""Sets the keytab_file of this ProvidersKrb5Krb5Item.
Specifies the path to a keytab file to import. # noqa: E501
:param keytab_file: The keytab_file of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if keytab_file is not None and len(keytab_file) > 4096:
raise ValueError("Invalid value for `keytab_file`, length must be less than or equal to `4096`") # noqa: E501
if keytab_file is not None and len(keytab_file) < 0:
raise ValueError("Invalid value for `keytab_file`, length must be greater than or equal to `0`") # noqa: E501
self._keytab_file = keytab_file
@property
def manual_keying(self):
"""Gets the manual_keying of this ProvidersKrb5Krb5Item. # noqa: E501
If true, keys are managed manually. If false, keys are managed through kadmin. # noqa: E501
:return: The manual_keying of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: bool
"""
return self._manual_keying
@manual_keying.setter
def manual_keying(self, manual_keying):
"""Sets the manual_keying of this ProvidersKrb5Krb5Item.
If true, keys are managed manually. If false, keys are managed through kadmin. # noqa: E501
:param manual_keying: The manual_keying of this ProvidersKrb5Krb5Item. # noqa: E501
:type: bool
"""
self._manual_keying = manual_keying
@property
def name(self):
"""Gets the name of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the Kerberos provider name. # noqa: E501
:return: The name of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProvidersKrb5Krb5Item.
Specifies the Kerberos provider name. # noqa: E501
:param name: The name of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if name is not None and len(name) > 255:
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if name is not None and len(name) < 0:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
@property
def realm(self):
"""Gets the realm of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the name of realm. # noqa: E501
:return: The realm of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._realm
@realm.setter
def realm(self, realm):
"""Sets the realm of this ProvidersKrb5Krb5Item.
Specifies the name of realm. # noqa: E501
:param realm: The realm of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if realm is not None and len(realm) > 255:
raise ValueError("Invalid value for `realm`, length must be less than or equal to `255`") # noqa: E501
if realm is not None and len(realm) < 0:
raise ValueError("Invalid value for `realm`, length must be greater than or equal to `0`") # noqa: E501
self._realm = realm
@property
def recommended_spns(self):
"""Gets the recommended_spns of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the recommended SPNs. # noqa: E501
:return: The recommended_spns of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: list[str]
"""
return self._recommended_spns
@recommended_spns.setter
def recommended_spns(self, recommended_spns):
"""Sets the recommended_spns of this ProvidersKrb5Krb5Item.
Specifies the recommended SPNs. # noqa: E501
:param recommended_spns: The recommended_spns of this ProvidersKrb5Krb5Item. # noqa: E501
:type: list[str]
"""
self._recommended_spns = recommended_spns
@property
def status(self):
"""Gets the status of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the status of the provider. # noqa: E501
:return: The status of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ProvidersKrb5Krb5Item.
Specifies the status of the provider. # noqa: E501
:param status: The status of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if status is not None and len(status) > 255:
raise ValueError("Invalid value for `status`, length must be less than or equal to `255`") # noqa: E501
if status is not None and len(status) < 0:
raise ValueError("Invalid value for `status`, length must be greater than or equal to `0`") # noqa: E501
self._status = status
@property
def system(self):
"""Gets the system of this ProvidersKrb5Krb5Item. # noqa: E501
If true, indicates that this provider instance was created by OneFS and cannot be removed # noqa: E501
:return: The system of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: bool
"""
return self._system
@system.setter
def system(self, system):
"""Sets the system of this ProvidersKrb5Krb5Item.
If true, indicates that this provider instance was created by OneFS and cannot be removed # noqa: E501
:param system: The system of this ProvidersKrb5Krb5Item. # noqa: E501
:type: bool
"""
self._system = system
@property
def user(self):
"""Gets the user of this ProvidersKrb5Krb5Item. # noqa: E501
Specifies the name of the user that performs kadmin tasks. # noqa: E501
:return: The user of this ProvidersKrb5Krb5Item. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this ProvidersKrb5Krb5Item.
Specifies the name of the user that performs kadmin tasks. # noqa: E501
:param user: The user of this ProvidersKrb5Krb5Item. # noqa: E501
:type: str
"""
if user is not None and len(user) > 255:
raise ValueError("Invalid value for `user`, length must be less than or equal to `255`") # noqa: E501
if user is not None and len(user) < 0:
raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`") # noqa: E501
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProvidersKrb5Krb5Item):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.176471
| 203
| 0.609645
|
27b222b6028081725f25826a990984fb5c12a0ad
| 1,398
|
py
|
Python
|
socket-server.py
|
edlane/python-debug-harness
|
45a98df0d9b2ba5164209c2a873f68f3bae96402
|
[
"Apache-2.0"
] | null | null | null |
socket-server.py
|
edlane/python-debug-harness
|
45a98df0d9b2ba5164209c2a873f68f3bae96402
|
[
"Apache-2.0"
] | null | null | null |
socket-server.py
|
edlane/python-debug-harness
|
45a98df0d9b2ba5164209c2a873f68f3bae96402
|
[
"Apache-2.0"
] | null | null | null |
import sys
import socket
import threading
import SocketServer
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
cur_thread = threading.current_thread()
response = "{}: {}".format(cur_thread.name, data)
self.request.sendall(response)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
response = sock.recv(1024)
# print "Received: {}".format(response)
finally:
sock.close()
if __name__ == "__main__":
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 0
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print "Server loop running in thread:", server_thread.name
for x in xrange(0, int(sys.argv[1])):
client(ip, port, "Hello World " + str(x))
server.shutdown()
| 30.391304
| 77
| 0.690987
|
f17e40ed6efff60d12f36143476fb6deb83f6f8c
| 1,103
|
py
|
Python
|
SimGeneral/Debugging/test/runSimDigiDumper_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
SimGeneral/Debugging/test/runSimDigiDumper_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 26
|
2018-10-30T12:47:58.000Z
|
2022-03-29T08:39:00.000Z
|
SimGeneral/Debugging/test/runSimDigiDumper_cfg.py
|
p2l1pfp/cmssw
|
9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("SimDigiDump")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:myfile.root')
)
process.prod = cms.EDAnalyzer("SimDigiDumper",
MuCSCStripSrc = cms.InputTag("simMuonCSCDigis","MuonCSCStripDigi"),
MuDTSrc = cms.InputTag("simMuonDTDigis"),
HCalDigi = cms.InputTag("simHcalDigis"),
ZdcDigi = cms.InputTag("simHcalUnsuppressedDigis"),
MuCSCWireSrc = cms.InputTag("simMuonCSCDigis","MuonCSCWireDigi"),
ECalEESrc = cms.InputTag("simEcalDigis","eeDigis"),
SiStripSrc = cms.InputTag("simSiStripDigis","ZeroSuppressed"),
SiPxlSrc = cms.InputTag("simSiPixelDigis"),
ECalEBSrc = cms.InputTag("simEcalDigis","ebDigis"),
ECalESSrc = cms.InputTag("simEcalPreshowerDigis"),
MuRPCSrc = cms.InputTag("simMuonRPCDigis")
)
process.p1 = cms.Path(process.prod)
| 34.46875
| 79
| 0.726201
|
cb08ae274a5819afe9e7dd6d360926696844e503
| 5,042
|
py
|
Python
|
nlpsandbox/exceptions.py
|
Sage-Bionetworks/nlp-sandbox-client
|
e51720b35ca3413ccee71b9cdc223ce3578fe0fd
|
[
"Apache-2.0"
] | 3
|
2021-06-15T16:36:10.000Z
|
2021-11-15T01:44:46.000Z
|
nlpsandbox/exceptions.py
|
nlpsandbox/nlpsandbox-client
|
8cba4f65ff2c06cbef7dc50f45b0aec9b8ee0476
|
[
"Apache-2.0"
] | 165
|
2020-11-23T00:36:40.000Z
|
2022-03-24T00:53:59.000Z
|
nlpsandbox/exceptions.py
|
data2health/nlp-sandbox-evaluation
|
e51720b35ca3413ccee71b9cdc223ce3578fe0fd
|
[
"Apache-2.0"
] | 3
|
2020-12-11T00:04:13.000Z
|
2022-01-03T16:59:10.000Z
|
"""
NLP Sandbox API
NLP Sandbox REST API # noqa: E501
The version of the OpenAPI document: 1.2.0
Contact: team@nlpsandbox.io
Generated by: https://openapi-generator.tech
"""
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(OpenApiException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| 31.31677
| 78
| 0.599564
|
8fb2af576f4d78fa3c8f6c22b27c2e6e6aba133a
| 5,921
|
py
|
Python
|
cart_venv/Lib/site-packages/tensorflow_estimator/_api/v2/estimator/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 2
|
2019-08-04T20:28:14.000Z
|
2019-10-27T23:26:42.000Z
|
cart_venv/Lib/site-packages/tensorflow_estimator/_api/v2/estimator/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | null | null | null |
cart_venv/Lib/site-packages/tensorflow_estimator/_api/v2/estimator/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 1
|
2020-11-04T03:16:29.000Z
|
2020-11-04T03:16:29.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Estimator: High level tools for working with models.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow_estimator._api.v2.estimator import experimental
from tensorflow_estimator._api.v2.estimator import export
from tensorflow_estimator.python.estimator.canned.baseline import BaselineClassifierV2 as BaselineClassifier
from tensorflow_estimator.python.estimator.canned.baseline import BaselineEstimatorV2 as BaselineEstimator
from tensorflow_estimator.python.estimator.canned.baseline import BaselineRegressorV2 as BaselineRegressor
from tensorflow_estimator.python.estimator.canned.baseline import ModeKeys
from tensorflow_estimator.python.estimator.canned.boosted_trees import BoostedTreesClassifier
from tensorflow_estimator.python.estimator.canned.boosted_trees import BoostedTreesEstimator
from tensorflow_estimator.python.estimator.canned.boosted_trees import BoostedTreesRegressor
from tensorflow_estimator.python.estimator.canned.dnn import DNNClassifierV2 as DNNClassifier
from tensorflow_estimator.python.estimator.canned.dnn import DNNEstimatorV2 as DNNEstimator
from tensorflow_estimator.python.estimator.canned.dnn import DNNRegressorV2 as DNNRegressor
from tensorflow_estimator.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedClassifierV2 as DNNLinearCombinedClassifier
from tensorflow_estimator.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedEstimatorV2 as DNNLinearCombinedEstimator
from tensorflow_estimator.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedRegressorV2 as DNNLinearCombinedRegressor
from tensorflow_estimator.python.estimator.canned.linear import LinearClassifierV2 as LinearClassifier
from tensorflow_estimator.python.estimator.canned.linear import LinearEstimatorV2 as LinearEstimator
from tensorflow_estimator.python.estimator.canned.linear import LinearRegressorV2 as LinearRegressor
from tensorflow_estimator.python.estimator.canned.parsing_utils import classifier_parse_example_spec_v2 as classifier_parse_example_spec
from tensorflow_estimator.python.estimator.canned.parsing_utils import regressor_parse_example_spec_v2 as regressor_parse_example_spec
from tensorflow_estimator.python.estimator.estimator import EstimatorV2 as Estimator
from tensorflow_estimator.python.estimator.estimator import VocabInfo
from tensorflow_estimator.python.estimator.estimator import WarmStartSettings
from tensorflow_estimator.python.estimator.exporter import BestExporter
from tensorflow_estimator.python.estimator.exporter import Exporter
from tensorflow_estimator.python.estimator.exporter import FinalExporter
from tensorflow_estimator.python.estimator.exporter import LatestExporter
from tensorflow_estimator.python.estimator.extenders import add_metrics
from tensorflow_estimator.python.estimator.head.base_head import Head
from tensorflow_estimator.python.estimator.head.binary_class_head import BinaryClassHead
from tensorflow_estimator.python.estimator.head.multi_class_head import MultiClassHead
from tensorflow_estimator.python.estimator.head.multi_head import MultiHead
from tensorflow_estimator.python.estimator.head.multi_label_head import MultiLabelHead
from tensorflow_estimator.python.estimator.head.regression_head import LogisticRegressionHead
from tensorflow_estimator.python.estimator.head.regression_head import PoissonRegressionHead
from tensorflow_estimator.python.estimator.head.regression_head import RegressionHead
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import CheckpointSaverHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import CheckpointSaverListener
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import FeedFnHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import FinalOpsHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import GlobalStepWaiterHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import LoggingTensorHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import NanLossDuringTrainingError
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import NanTensorHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import ProfilerHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import SecondOrStepTimer
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import StepCounterHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import StopAtStepHook
from tensorflow_estimator.python.estimator.hooks.basic_session_run_hooks import SummarySaverHook
from tensorflow_estimator.python.estimator.hooks.session_run_hook import SessionRunArgs
from tensorflow_estimator.python.estimator.hooks.session_run_hook import SessionRunContext
from tensorflow_estimator.python.estimator.hooks.session_run_hook import SessionRunHook
from tensorflow_estimator.python.estimator.hooks.session_run_hook import SessionRunValues
from tensorflow_estimator.python.estimator.model_fn import EstimatorSpec
from tensorflow_estimator.python.estimator.run_config import RunConfig
from tensorflow_estimator.python.estimator.training import EvalSpec
from tensorflow_estimator.python.estimator.training import TrainSpec
from tensorflow_estimator.python.estimator.training import train_and_evaluate
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "estimator", public_apis=None, deprecation=False,
has_lite=False)
| 76.896104
| 137
| 0.898497
|
97b30df027910dc253773d37c64f3ab44defc5b4
| 484
|
py
|
Python
|
src/third_party/snappy.py
|
spencerjackson/mongo
|
51c46e71c9f310fc91168c0945ffa6cfc00d380b
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/snappy.py
|
spencerjackson/mongo
|
51c46e71c9f310fc91168c0945ffa6cfc00d380b
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/snappy.py
|
spencerjackson/mongo
|
51c46e71c9f310fc91168c0945ffa6cfc00d380b
|
[
"Apache-2.0"
] | null | null | null |
def configure( env , fileLists , options ):
myenv = env.Clone()
if not options["windows"]:
myenv.Append(CPPFLAGS=" -Wno-sign-compare -Wno-unused-function ") #snappy doesn't compile cleanly
files = ["$BUILD_DIR/third_party/snappy/snappy.cc", "$BUILD_DIR/third_party/snappy/snappy-sinksource.cc"]
fileLists["serverOnlyFiles"] += [ myenv.Object(f) for f in files ]
def configureSystem( env , fileLists , options ):
configure( env , fileLists , options )
| 34.571429
| 109
| 0.690083
|
dba01925051ab301755dd4a749b5f493af6ff92e
| 10,344
|
py
|
Python
|
src/settings.py
|
rdococ/lykos
|
fec1b361a6a539c997366a6536b2fcfbcfb1953a
|
[
"BSD-2-Clause"
] | null | null | null |
src/settings.py
|
rdococ/lykos
|
fec1b361a6a539c997366a6536b2fcfbcfb1953a
|
[
"BSD-2-Clause"
] | null | null | null |
src/settings.py
|
rdococ/lykos
|
fec1b361a6a539c997366a6536b2fcfbcfb1953a
|
[
"BSD-2-Clause"
] | null | null | null |
import fnmatch
import re
import threading
from collections import OrderedDict
LANGUAGE = 'en'
MINIMUM_WAIT = 60
EXTRA_WAIT = 30
EXTRA_WAIT_JOIN = 0 # Add this many seconds to the waiting time for each !join
WAIT_AFTER_JOIN = 25 # Wait at least this many seconds after the last join
# token bucket for the IRC client; 1 token = 1 message sent to IRC
# Run the bot with --lagtest to receive settings recommendations for this
IRC_TB_INIT = 23 # initial number of tokens
IRC_TB_DELAY = 1.73 # wait time between adding tokens
IRC_TB_BURST = 23 # maximum number of tokens that can be accumulated
# !wait uses a token bucket
WAIT_TB_INIT = 2 # initial number of tokens
WAIT_TB_DELAY = 240 # wait time between adding tokens
WAIT_TB_BURST = 3 # maximum number of tokens that can be accumulated
STATS_RATE_LIMIT = 60
VOTES_RATE_LIMIT = 60
ADMINS_RATE_LIMIT = 300
GSTATS_RATE_LIMIT = 0
PSTATS_RATE_LIMIT = 0
RSTATS_RATE_LIMIT = 0
TIME_RATE_LIMIT = 10
START_RATE_LIMIT = 10 # (per-user)
WAIT_RATE_LIMIT = 10 # (per-user)
GOAT_RATE_LIMIT = 300 # (per-user)
MIN_PLAYERS = 4
MAX_PLAYERS = 24
NIGHT_TIME_LIMIT = 120
NIGHT_TIME_WARN = 90 # should be less than NIGHT_TIME_LIMIT
DAY_TIME_LIMIT = 720
DAY_TIME_WARN = 600 # should be less than DAY_TIME_LIMIT
JOIN_TIME_LIMIT = 3600
# May only be set if the above are also set
SHORT_DAY_PLAYERS = 6 # Number of players left to have a short day
SHORT_DAY_LIMIT = 520
SHORT_DAY_WARN = 400
# If time lord dies, the timers get set to this instead (60s day, 30s night)
TIME_LORD_DAY_LIMIT = 60
TIME_LORD_DAY_WARN = 45
TIME_LORD_NIGHT_LIMIT = 30
TIME_LORD_NIGHT_WARN = 0
KILL_IDLE_TIME = 300
WARN_IDLE_TIME = 180
PM_WARN_IDLE_TIME = 240
PART_GRACE_TIME = 30
QUIT_GRACE_TIME = 60
ACC_GRACE_TIME = 30
START_QUIT_DELAY = 10
# controls how many people it does in one /msg; only works for messages that are the same
MAX_PRIVMSG_TARGETS = 4
# how many mode values can be specified at once; used only as fallback
MODELIMIT = 3
QUIET_DEAD_PLAYERS = False
DEVOICE_DURING_NIGHT = False
ALWAYS_PM_ROLE = False
QUIET_MODE = "q" # "q" or "b"
QUIET_PREFIX = "" # "" or "~q:"
ACCOUNT_PREFIX = "$a:" # "$a:" or "~a:"
# The bot will automatically toggle those modes of people joining
AUTO_TOGGLE_MODES = ""
DEFAULT_EXPIRY = "30d"
LEAVE_PENALTY = 1
LEAVE_EXPIRY = "30d"
IDLE_PENALTY = 1
IDLE_EXPIRY = "30d"
PART_PENALTY = 1
PART_EXPIRY = "30d"
ACC_PENALTY = 1
ACC_EXPIRY = "30d"
# Give penalties if idling night.
# All other penalties take precedence over night penalties; only one penalty will be given per game.
NIGHT_IDLE_PENALTY = 1
NIGHT_IDLE_EXPIRY = "14d"
# If True, disallows adding stasis via !fstasis (requires warnings instead)
RESTRICT_FSTASIS = True
# The formatting of this sucks, sorry. This is used to automatically apply sanctions to warning levels
# When a user crosses from below the min threshold to min or above points, the listed sanctions apply
# Sanctions also apply while moving within the same threshold bracket (such as from min to max)
# Valid sanctions are deny, stasis, scalestasis, and tempban
# Scalestasis applies stasis equal to the formula ax^2 + bx + c, where x is the number of warning points
# Tempban number can either be a duration (ending in d, h, or m) or a number meaning it expires when
# warning points fall below that threshold.
AUTO_SANCTION = (
#min max sanctions
(6, 10, {"stasis": 1}),
(11, 15, {"scalestasis": (0, 1, -8)}),
(16, 16, {"tempban": 8})
)
# Send a message to deadchat or wolfchat when a user spectates them
SPECTATE_NOTICE = True
# Whether to include which user is doing the spectating in the message
SPECTATE_NOTICE_USER = False
# The following is a bitfield, and they can be mixed together
# Defaults to none of these, can be changed on a per-game-mode basis
RESTRICT_WOLFCHAT = 0x00
### DO NOT CHANGE THESE!
### They are for easier code interpretation/modification
RW_DISABLE_NIGHT = 0x01 # Disable during night (commands are still relayed)
RW_DISABLE_DAY = 0x02 # Disable during day (commands are still relayed)
RW_ONLY_KILL_CMD = 0x04 # Only relay kill commands when wolfchat is disabled
RW_ONLY_SAME_CMD = 0x08 # Only relay commands to other people who have access to the same command
RW_WOLVES_ONLY_CHAT = 0x10 # Non-wolves cannot participate in wolfchat (commands still relayed as applicable)
RW_NO_INTERACTION = 0x20 # Do not relay commands to/from non-wolves regardless of other settings
RW_REM_NON_WOLVES = 0x40 # Remove non-wolves from wolfchat entirely (can be killed, do not count towards wolf win condition, do not show in wolflist, etc.)
RW_TRAITOR_NON_WOLF = 0x80 # Consider traitor as a non-wolf for the purposes of the above restrictions (if unset, traitor is treated the same as wolf cub)
ENABLE_DEADCHAT = True # dead players can communicate with each other
ABSTAIN_ENABLED = True # whether village can !abstain in order to not vote anyone during day
LIMIT_ABSTAIN = True # if true, village will be limited to successfully !abstaining a vote only once
SELF_LYNCH_ALLOWED = True
HIDDEN_TRAITOR = True
HIDDEN_AMNESIAC = False # amnesiac still shows as amnesiac if killed even after turning
HIDDEN_CLONE = False
GUARDIAN_ANGEL_CAN_GUARD_SELF = True
START_WITH_DAY = False
ROLE_REVEAL = "on" # on/off/team - what role information is shown on death
STATS_TYPE = "default" # default/accurate/team/disabled - what role information is shown when doing !stats
START_VOTES_SCALE = 0.3
START_VOTES_MAX = 4
# Debug mode settings, whether or not timers and stasis should apply during debug mode
DISABLE_DEBUG_MODE_TIMERS = True
DISABLE_DEBUG_MODE_TIME_LORD = False
DISABLE_DEBUG_MODE_REAPER = True
DISABLE_DEBUG_MODE_STASIS = True
DEBUG_MODE_NOTHROW_MESSAGES = True
# number of bullets a gunner role gets when the role is assigned or swapped in
SHOTS_MULTIPLIER = {
"gunner": 0.12,
"sharpshooter": 0.06,
"wolf gunner": 0.06
}
# hit, miss, and headshot chances for each gunner role (explode = 1 - hit - miss)
GUN_CHANCES = {
"gunner": (15/20, 4/20, 4/20), # 75% hit, 20% miss, 5% explode, 20% headshot
"sharpshooter": (1, 0, 1), # 100% hit, 0% miss, 0% explode, 100% headshot
"wolf gunner": (14/20, 6/20, 12/20) # 70% hit, 30% miss, 0% explode, 60% headshot
}
# modifier applied to regular gun chances if the user is also drunk
DRUNK_GUN_CHANCES = (-5/20, 4/20, -3/20) # -25% hit, +20% miss, +5% explode, -15% headshot
DRUNK_SHOTS_MULTIPLIER = 3
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 1/4
# at night, the wolf can steal 1 bullet from the victim and become a wolf gunner
# (will always be 1 bullet regardless of SHOTS_MULTIPLIER setting for wolf gunner above)
WOLF_STEALS_GUN = True
GUARDIAN_ANGEL_DIES_CHANCE = 0
BODYGUARD_DIES_CHANCE = 0
DETECTIVE_REVEALED_CHANCE = 2/5
FALLEN_ANGEL_KILLS_GUARDIAN_ANGEL_CHANCE = 1/2
AMNESIAC_NIGHTS = 3 # amnesiac gets to know their actual role on this night
DOCTOR_IMMUNIZATION_MULTIPLIER = 0.135 # ceil(num_players * multiplier) = number of immunizations
GAME_MODES = {}
GAME_PHASES = ("night", "day") # all phases that constitute "in game", game modes can extend this with custom phases
# IP address to bind to before connecting, or empty string to use OS default
BINDHOST = ""
# Disable CPRIVMSG/CNOTICE -- some ircds implicitly treat regular PRIVMSG and NOTICE as such, and support more
# targets per message the normal way than with the explicit command
DISABLE_CPRIVMSG = False
SSL_VERIFY = True
SSL_CERTFP = ()
# Tracking Mozilla's "intermediate" compatibility list -- https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
SSL_CIPHERS = ( # single string split over multiple lines - lack of commas intentional
"ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-"
"SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-"
"SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-"
"AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-"
"SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS"
)
SSL_CERTFILE = None
SSL_KEYFILE = None
NICKSERV = "NickServ"
NICKSERV_IDENTIFY_COMMAND = "IDENTIFY {account} {password}"
NICKSERV_GHOST_COMMAND = "GHOST {nick}"
NICKSERV_RELEASE_COMMAND = "RELEASE {nick}"
NICKSERV_REGAIN_COMMAND = "REGAIN {nick}"
CHANSERV = "ChanServ"
CHANSERV_OP_COMMAND = "OP {channel}"
GUEST_NICK_PATTERN = r"^Guest\d+$|^\d|away.+|.+away"
LOG_CHANNEL = "" # Log !fwarns to this channel, if set
LOG_PREFIX = "" # Message prefix for LOG_CHANNEL
DEV_CHANNEL = ""
DEV_PREFIX = ""
# Data collection settings. lykos will send details about errors that happen to the lykos developers,
# these settings control how much data is sent. Please see https://werewolf.chat/dc for more information.
# These settings additionally impacts what data is written to the error log.
TRACEBACK_VERBOSITY = 2 # 0 = no locals at all, 1 = innermost frame's locals, 2 = all locals
USER_DATA_LEVEL = 0 # 0 = fully anonymize users, 1 = expose nick only, 2 = expose full hostmask, account, and channel membership
CHANNEL_DATA_LEVEL = 0 # 0 = fully anonymize channels, 1 = expose channel name
# How often to ping the server (in seconds) to detect unclean disconnection
SERVER_PING_INTERVAL = 120
# The default role can be anything, but HIDDEN_ROLE must be either "villager" or "cultist";
# hidden roles are informed they are HIDDEN_ROLE (ergo that role should not have any abilities),
# and win with that role's team. Seer sees all non-safe and non-cursed roles as HIDDEN_ROLE.
DEFAULT_ROLE = "villager"
HIDDEN_ROLE = "villager"
# Roles listed here cannot be used in !fgame roles=blah.
DISABLED_ROLES = frozenset()
# Game modes that cannot be randomly picked or voted for
DISABLED_GAMEMODES = frozenset()
# Commands listed here cannot be used by anyone (even admins/owners)
DISABLED_COMMANDS = frozenset()
GIF_CHANCE = 1/50
ALL_FLAGS = frozenset("AaDdFgjmNpSsw")
GRAVEYARD_LOCK = threading.RLock()
WARNING_LOCK = threading.RLock()
# vim: set sw=4 expandtab:
| 42.393443
| 157
| 0.763631
|
c3c8771817c94c8d52c3a981908a7fa7762951f9
| 274
|
py
|
Python
|
se34euca/runtest_view_page.py
|
eucalyptus/se34euca
|
af5da36754fccca84b7f260ba7605b8fdc30fa55
|
[
"BSD-2-Clause"
] | 8
|
2015-01-08T21:06:08.000Z
|
2019-10-26T13:17:16.000Z
|
se34euca/runtest_view_page.py
|
eucalyptus/se34euca
|
af5da36754fccca84b7f260ba7605b8fdc30fa55
|
[
"BSD-2-Clause"
] | null | null | null |
se34euca/runtest_view_page.py
|
eucalyptus/se34euca
|
af5da36754fccca84b7f260ba7605b8fdc30fa55
|
[
"BSD-2-Clause"
] | 7
|
2016-08-31T07:02:21.000Z
|
2020-07-18T00:10:36.000Z
|
#!/usr/bin/python
import se34euca
from se34euca.testcase.testcase_view_page import testcase_view_page
class ViewPage(se34euca.TestRunner):
testcase = "check_login_and_logout"
testclass = testcase_view_page
if __name__ == "__main__":
ViewPage().start_test()
| 19.571429
| 67
| 0.773723
|
8facf76fded2920425e3cd61578bb981c09fe709
| 1,794
|
py
|
Python
|
aerosandbox/visualization/plotly.py
|
raihaan123/AeroSandbox
|
1e7c78f04b066415f671237a4833ba98901bb9ec
|
[
"MIT"
] | 322
|
2019-05-29T20:40:04.000Z
|
2022-03-29T12:46:45.000Z
|
aerosandbox/visualization/plotly.py
|
raihaan123/AeroSandbox
|
1e7c78f04b066415f671237a4833ba98901bb9ec
|
[
"MIT"
] | 55
|
2019-07-14T09:52:59.000Z
|
2022-03-28T16:02:21.000Z
|
aerosandbox/visualization/plotly.py
|
raihaan123/AeroSandbox
|
1e7c78f04b066415f671237a4833ba98901bb9ec
|
[
"MIT"
] | 68
|
2019-06-02T09:57:26.000Z
|
2022-03-28T15:03:47.000Z
|
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
import aerosandbox.numpy as np
# Set the rendering to happen in browser
pio.renderers.default = "browser"
def spy(
matrix,
show=True,
):
"""
Plots the sparsity pattern of a matrix.
:param matrix: The matrix to plot the sparsity pattern of. [2D ndarray or CasADi array]
:param show: Whether or not to show the sparsity plot. [boolean]
:return: The figure to be plotted [go.Figure]
"""
try:
matrix = matrix.toarray()
except:
pass
abs_m = np.abs(matrix)
sparsity_pattern = abs_m >= 1e-16
matrix[sparsity_pattern] = np.log10(abs_m[sparsity_pattern] + 1e-16)
j_index_map, i_index_map = np.meshgrid(np.arange(matrix.shape[1]), np.arange(matrix.shape[0]))
i_index = i_index_map[sparsity_pattern]
j_index = j_index_map[sparsity_pattern]
val = matrix[sparsity_pattern]
val = np.ones_like(i_index)
fig = go.Figure(
data=go.Heatmap(
y=i_index,
x=j_index,
z=val,
# type='heatmap',
colorscale='RdBu',
showscale=False,
),
)
fig.update_layout(
plot_bgcolor="black",
xaxis=dict(showgrid=False, zeroline=False),
yaxis=dict(showgrid=False, zeroline=False, autorange="reversed", scaleanchor="x", scaleratio=1),
width=800,
height=800 * (matrix.shape[0] / matrix.shape[1]),
)
if show:
fig.show()
return fig
def plot_point_cloud(
p # type: np.ndarray
):
"""
Plots an Nx3 point cloud with Plotly
:param p: An Nx3 array of points to be plotted.
:return: None
"""
p = np.array(p)
px.scatter_3d(x=p[:, 0], y=p[:, 1], z=p[:, 2]).show()
| 27.6
| 104
| 0.61427
|
5a77156150e573812b9dcfb2554339685692d6f3
| 2,544
|
py
|
Python
|
app/app.py
|
paulrinckens/han_for_doc_classification
|
2131e383f8bb779ae33ca658de9f9f66391bea74
|
[
"MIT"
] | 3
|
2020-05-01T10:59:40.000Z
|
2020-12-01T14:41:19.000Z
|
app/app.py
|
paulrinckens/han_for_doc_classification
|
2131e383f8bb779ae33ca658de9f9f66391bea74
|
[
"MIT"
] | 3
|
2020-11-13T18:48:49.000Z
|
2022-02-10T01:55:04.000Z
|
app/app.py
|
paulrinckens/han_for_doc_classification
|
2131e383f8bb779ae33ca658de9f9f66391bea74
|
[
"MIT"
] | 1
|
2020-06-04T08:00:23.000Z
|
2020-06-04T08:00:23.000Z
|
import base64
from io import BytesIO
import matplotlib.colors
import numpy as np
from fastapi import FastAPI, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from han.HAN import HAN
app = FastAPI()
app.mount("/app/static", StaticFiles(directory="app/static"), name="static")
templates = Jinja2Templates(directory="app/templates")
han = HAN()
han.load_model(model_dir="./models",
model_filename="han-10kGNAD.h5",
tokenizer_filename="tokenizer.pickle")
class_names = ['Etat', 'Inland', 'International', 'Kultur', 'Panorama', 'Sport',
'Web', 'Wirtschaft', 'Wissenschaft']
@app.get("/predict")
async def predict(text: str):
probs = han.predict([text])[0]
return dict({class_names[i]: float(probs[i]) for i in range(len(probs))})
@app.get("/visualize")
async def visualize(request: Request, text: str):
probs, attentions = han.predict_and_visualize_attention(text)
prediction = class_names[np.argmax(probs)]
fig = Figure(figsize=(5, 2), tight_layout=True)
ax = fig.add_subplot(1, 1, 1)
y_pos = np.arange(len(class_names))
confidences = [probs[i] for i in range(len(class_names))]
ax.barh(y_pos, confidences, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(class_names)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Confidence')
output = BytesIO()
FigureCanvas(fig).print_png(output)
out = base64.b64encode(output.getvalue()).decode()
# weigh word attentions by sqrt of sentence attention
for sent in attentions:
for word in sent[1]:
word[0] *= np.sqrt(sent[0])
return templates.TemplateResponse("han_template.html",
{"request": request,
"prediction": prediction,
"attentions": attentions,
"red_background_hex": red_background_hex,
"blue_background_hex": blue_background_hex,
"plot_url": out})
def scale_color_h_hex(c_h, scale):
return matplotlib.colors.to_hex(
matplotlib.colors.hsv_to_rgb((c_h, scale, 1)))
def red_background_hex(scale):
return scale_color_h_hex(0, scale)
def blue_background_hex(scale):
return scale_color_h_hex(0.625, scale)
| 31.8
| 82
| 0.648585
|
25dc4c3d3dc1d7545e66b30fe1e7895ef3503121
| 9,803
|
py
|
Python
|
util/tracklist.py
|
MeKLiN2/TinyChatBot
|
606c792c2390ef40bfb9ed9277bbe638e1248342
|
[
"MIT"
] | 18
|
2017-10-25T20:14:26.000Z
|
2022-01-16T05:56:31.000Z
|
util/tracklist.py
|
MeKLiN2/TinyChatBot
|
606c792c2390ef40bfb9ed9277bbe638e1248342
|
[
"MIT"
] | 26
|
2017-10-25T17:58:44.000Z
|
2019-07-26T06:01:08.000Z
|
util/tracklist.py
|
MeKLiN2/TinyChatBot
|
606c792c2390ef40bfb9ed9277bbe638e1248342
|
[
"MIT"
] | 23
|
2018-01-12T01:50:23.000Z
|
2022-03-22T02:32:56.000Z
|
import time
class Track:
""" A class representing a track. """
def __init__(self, nick=None, **kwargs):
self.owner = nick
self.rq_time = time.time()
self.id = kwargs.get('video_id')
self.type = kwargs.get('type')
self.title = kwargs.get('video_title')
self.time = float(kwargs.get('video_time', 0))
self.image = kwargs.get('image')
self.start_time = 0
self.pause_time = 0
class PlayList:
""" Class to do various playlist operation with. """
def __init__(self):
self.track_list = []
self.track_index = 0
self.current_track = None
self.is_paused = False
@property
def track(self):
"""
Returns the current track in the track list.
:return: The current Track or None if no track is being timed.
:rtype: Track | None
"""
return self.current_track
@property
def current_index(self):
"""
Return the current track list index.
:return: The current index of the track list.
:rtype: int
"""
return self.track_index
@property
def last_index(self):
"""
Return the last index of the track list.
:return: The last index in the track list.
:rtype: int
"""
if len(self.track_list) == 0:
return 0
else:
return len(self.track_list) - 1
@property
def has_active_track(self):
"""
Check if the track list has a active track based on time and pause state.
:return: True if active else False.
:rtype: bool
"""
if self.is_paused:
return True
if self.elapsed == 0:
return False
if self.elapsed > 0:
return True
return False
@property
def elapsed(self):
"""
Returns the current track elapsed time.
:return: The elapsed track time in seconds.
:rtype: int | float
"""
if self.current_track is not None:
if self.is_paused:
return self.current_track.pause_time
elapsed = time.time() - self.current_track.start_time
if elapsed > self.current_track.time:
return 0
return elapsed
return 0
@property
def remaining(self):
"""
Returns the current track remaining time.
:return: The remaining time in seconds.
:rtype: int | float
"""
if self.current_track is not None:
time_left = self.current_track.time - self.elapsed
return time_left
return 0
@property
def next_track(self):
"""
Returns the next track in the track list
:return: The next Track in the track list or None if the track list is empty.
:rtype: Track | None
"""
if len(self.track_list) > 0:
if self.track_index <= len(self.track_list): # self.last_index:
next_track = self.track_list[self.track_index]
self.current_track = next_track
self.current_track.start_time = time.time()
self.track_index += 1
return next_track
return None
@property
def is_last_track(self):
"""
Check if the track list is at the last index.
:return: True if last track list index, else False. None if the track list is empty.
:rtype: bool | None
"""
if len(self.track_list) > 0:
if self.track_index >= len(self.track_list):
return True
return False
return None
@property
def queue(self):
"""
Return the queue of the track list.
:return: The track list length and the remaining tracks.
:rtype: tuple
"""
if len(self.track_list) > 0:
remaining = len(self.track_list) - self.track_index
queue = (len(self.track_list), remaining)
return queue
def start(self, owner, track):
"""
Start a track to be timed.
:param owner: The nick of the user who started the track.
:type owner: str
:param track: The track data.
:type track: dict
:return: The track as a Track.
:rtype: Track
"""
if self.is_paused:
self.is_paused = False
self.current_track = Track(owner, **track)
self.current_track.start_time = time.time()
return self.current_track
def play(self, offset):
"""
Play or search a track.
:param offset: The time in seconds to start playing from.
:type offset: int | float
:return: The remaining track time in seconds.
:rtype: int | float
"""
if self.is_paused:
self.is_paused = False
self.current_track.start_time = time.time() - offset
return self.remaining
def replay(self): # TODO: check if this is working correct.
"""
Replay(re-time) the current track.
:return: The current track.
:rtype: Track
"""
if self.is_paused:
self.is_paused = False
self.current_track.start_time = time.time()
return self.current_track
def pause(self, offset=0):
"""
Pause a track.
:param offset: The time in seconds to pause the track at.
:type offset: int | float
"""
self.is_paused = True
if offset != 0:
self.current_track.pause_time = offset
else:
self.current_track.pause_time = time.time() - self.current_track.start_time
def stop(self):
""" Stop a track. """
self.is_paused = False
self.current_track.start_time = 0
self.current_track.pause_time = 0
def add(self, owner, track):
"""
Add a track to the track list.
:param owner: The nick name of the user adding the track.
:type owner: str
:param track: The track data.
:type track: dict
:return: The track as Track.
:rtype: Track
"""
if track is not None:
_track = Track(owner, **track)
self.track_list.append(_track)
return _track
def add_list(self, owner, tracks):
"""
Add a list of track data to the track list.
:param owner: The nick name of the user adding the tracks.
:type owner: str
:param tracks: A list of track data.
:type tracks: list
"""
if len(tracks) > 0:
for track in tracks:
self.add(owner, track)
def clear(self):
"""
Clear the track list for all items.
:return: True if cleared successfully, else False.
:rtype: bool
"""
if len(self.track_list) > 0:
self.track_list[:] = []
self.track_index = 0
return True
return False
def get_tracks(self, amount=5, from_index=True):
"""
Get a list of Track's from the track list.
:param amount: The amount of Track's to get.
:type amount: int
:param from_index: Get Track's from the current track list index.
:type from_index: bool
:return: A list of Track's.
:rtype: list
"""
start_index = 0
result = list()
if len(self.track_list) > 0:
if from_index:
start_index = self.track_index
ic = 0
for i in range(start_index, len(self.track_list)):
if ic <= amount - 1:
_track = (i, self.track_list[i])
result.append(_track)
ic += 1
return result
def next_track_info(self, jump=0):
"""
Get the next Track object in the track list.
:param jump: Instead of getting the next track, use this to jump in the track list.
:type jump: int
:return: The index of the Track and the Track.
:rtype: tuple | None
"""
if jump != 0:
if self.track_index + jump < len(self.track_list):
return self.track_index + jump, self.track_list[self.track_index + jump]
elif self.track_index < len(self.track_list):
return self.track_index, self.track_list[self.track_index]
def delete(self, indexes, by_range=False):
"""
Delete track list items by index.
:param indexes: A list of indexes to delete.
:type indexes: list
:param by_range: Delete a range of indexes.
:type by_range: bool
:return: A dictionary containing information about the delete operation.
:rtype: dict | None
"""
tracks = list(self.track_list)
deleted_indexes = []
for i in sorted(indexes, reverse=True):
if self.track_index <= i < len(self.track_list):
del self.track_list[i]
deleted_indexes.append(str(i))
deleted_indexes.reverse()
if len(deleted_indexes) > 0:
_result = dict()
if by_range:
_result['from'] = deleted_indexes[0]
_result['to'] = deleted_indexes[-1]
elif len(deleted_indexes) is 1:
_result['track_title'] = tracks[int(deleted_indexes[0])].title
_result['deleted_indexes'] = deleted_indexes
_result['deleted_indexes_len'] = len(deleted_indexes)
return _result
return None
| 30.538941
| 92
| 0.545955
|
7caf390e85f91d2dfcb09449a9d71db842eab585
| 8,887
|
py
|
Python
|
steveBot.py
|
PikaBlue107/steve-content-warning
|
1157824a96163987afb2750a045a1fde27e8a2cb
|
[
"MIT"
] | 1
|
2021-02-15T17:46:34.000Z
|
2021-02-15T17:46:34.000Z
|
steveBot.py
|
PikaBlue107/steve-content-warning
|
1157824a96163987afb2750a045a1fde27e8a2cb
|
[
"MIT"
] | null | null | null |
steveBot.py
|
PikaBlue107/steve-content-warning
|
1157824a96163987afb2750a045a1fde27e8a2cb
|
[
"MIT"
] | null | null | null |
#Discord
import discord
from discord.ext import commands
from discord import errors
#Error handling
from pickle import UnpicklingError
from json import JSONDecodeError
#My files
from steveIO import SteveIO
from history import History
AUTH_LOC = "auth.json"
PACKAGE_LOC = "package.json"
GUILDS_LOC = "guilds.pickle"
#Create IO Object for loading and saving data
io = SteveIO(auth_loc=AUTH_LOC, package_loc=PACKAGE_LOC, guilds_loc=GUILDS_LOC)
#Error codes
SUCCESS = 0
AUTH_MISSING = 1
AUTH_CORRUPT = 2
PACKAGE_MISSING = 3
PACKAGE_CORRUPT = 4
GUILDS_CORRUPT = 5
BAD_LOGIN = 6
try:
auth_string = io.loadAuth()
except FileNotFoundError:
print("auth.json not found at filepath '" + AUTH_LOC + "'. Exiting...")
exit(AUTH_MISSING)
except JSONDecodeError:
print(AUTH_LOC + " is unreadable by JSON. Please fix manually or by retrieving a new auth.json file. Exiting...")
exit(AUTH_CORRUPT)
try:
package = io.loadPackage()
except FileNotFoundError:
print("package.json not found at filepath '" + PACKAGE_LOC + "'. Exiting...")
exit(PACKAGE_MISSING)
except JSONDecodeError:
print(PACKAGE_LOC + " is unreadable by JSON. Please fix manually or by retrieving a new package.json file. Exiting...")
exit(PACKAGE_CORRUPT)
try:
guilds = io.loadGuilds()
except UnpicklingError:
print(GUILDS_LOC + " is unreadable by Python Pickle. Remove guilds.pickle from destination '" + GUILDS_LOC + "' and then restart to generate a blank file. Exiting...")
exit(GUILDS_CORRUPT)
#Declare variables
BOT_NAME = package["name"]
BOT_VERSION = package["version"]
BOT_DESC = package["description"]
BOT_MAIN = package["main"]
BOT_AUTHOR = package["author"]
BOT_DEPENDENCIES = package["dependencies"]
BOT_ID = 605836370749030490
TESTING_ID = 607087546333265920
OWNER_ID = 138461123899949057
BOT_COMMAND_PREFIX='>'
cur_ctx = None
#Initialize
bot = commands.Bot(command_prefix=BOT_COMMAND_PREFIX, description = BOT_DESC, owner_id = OWNER_ID)
def start():
try:
bot.run(auth_string)
except errors.LoginFailure:
print("Login unsuccessful. Please provide a new login token in auth.json. Exiting...")
exit(BAD_LOGIN)
#Utility functions
def register_guild(guild_id):
print(guild_id)
print(type(guild_id))
guilds[int(guild_id)] = {
"whitelist": {},
"blacklist": {},
"whitelist_enable": False,
"filters": {},
"bot_channel": None
}
print(guilds)
io.save(guilds)
def update_guild(guild):
guild["bot_channel"] = None
async def pp(str):
print(str)
await cur_ctx.send(str)
def make_history(ctx, safeword=None):
return History(time=ctx.message.created_at, user=ctx.author.id, nick=ctx.author.display_name, safeword=safeword)
#possibly never to be used
def remove_guild(guild_id):
data.pop(guild_id)
io.save(guilds)
def parse_channel(ctx, argument):
channel = ctx.channel
channel_id = channel.id
if argument is None:
return channel
elif argument.startswith("<#"):
channel_id = int(argument.strip('<').strip('#').strip('>'))
for channel in ctx.guild.channels:
if channel.id == channel_id:
return channel
else:
for channel in ctx.guild.channels:
if channel.name == argument:
return channel
#Channel not found.
return None
# def is_not_me(): #TODO get working
# print("hello?")
# def predicate(ctx):
# print(ctx.message.author.id, BOT_ID)
# return ctx.message.author.id != BOT_ID
# print("hi")
# return commands.check(predicate)
#TODO override close
#async def close(self, return=ret):
#COMMANDS
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def takecaresteve(ctx):
await ctx.send('you too')
@bot.command() #TODO delete_after?
async def seelist(ctx, confirm=None):
global cur_ctx
cur_ctx = ctx
guild_id = ctx.guild.id
if guild_id in guilds.keys():
#TODO are you sure?
filters = guilds[guild_id]["filters"]
await pp(filters)
else:
await pp("No data for server '" + ctx.guild.name + "'")
@bot.command() #TODO delete_after?
async def add(ctx, keyword):
global cur_ctx
cur_ctx = ctx
guild_id = ctx.guild.id
if guild_id not in guilds:
register_guild(guild_id)
guilds[guild_id]["filters"][keyword] = make_history(ctx)
io.save(guilds)
await pp("Added keyword {} to filter list".format(keyword))
@bot.command() #TODO delete_after?
async def remove(ctx, keyword):
global cur_ctx
cur_ctx = ctx
guild_id = ctx.guild.id
if guild_id not in guilds:
await pp("No data for server '" + ctx.guild.name + "'")
try:
guilds[guild_id]["filters"].pop(keyword)
await pp("Successfully removed word from filter list.")
except ValueError:
await pp("Word not found in filter list.")
async def change_list(ctx, white, argument=None):
async def whitelist_on():
guilds[guild_id]["whitelist_enable"] = True
await pp("Whitelist mode enabled, blacklist off.")
async def blacklist_on():
guilds[guild_id]["whitelist_enable"] = False
await pp("Blacklist mode enabled, whitelist off.")
async def register_channel():
channel = parse_channel(ctx, argument)
if channel is not None:
guilds[guild_id][string][channel.id] = history
await pp('#' + channel.name + " added to " + string + ".")
else:
await pp("Unable to process command. Type a #channel-name, channel-name, or a setting such as 'on', 'off', 'toggle', or 'print'")
global cur_ctx
cur_ctx = ctx
guild_id = ctx.guild.id
if guild_id not in guilds:
register_guild(guild_id)
channel_id = ctx.channel.id
history = make_history(ctx)
string = "whitelist" if white else "blacklist"
#">whitelist". Targets the current channel
if argument is None:
await register_channel()
if argument == "print": #User wants to print the list of whitelists/blacklists
print_list = []
for channel_id in guilds[guild_id][string]:
for channel in ctx.guild.channels:
if channel.id == channel_id:
print_list.append('#'+channel.name)
break
await pp(print_list)
elif argument.lower() == "on": await whitelist_on() if white else await blacklist_on()
elif argument.lower() == "off": await blacklist_on() if white else await whitelist_on()
elif argument.lower() == "toggle":
if guilds[guild_id]["whitelist_enable"]: await blacklist_on()
else: await whitelist_on()
#">whitelist testing, >whitelist #testing". Searches for the specified channel through the current guild.
else:
await register_channel()
io.save(guilds)
@bot.command()
async def whitelist(ctx, argument=None):
await change_list(ctx=ctx, white=True, argument=argument)
@bot.command()
async def blacklist(ctx, argument=None):
await change_list(ctx=ctx, white=False, argument=argument)
@bot.listen("on_message")
async def filter_message(msg):
#TODO turn into decorations
channel = msg.channel
guild_id = channel.guild.id
if msg.author.bot: return
if channel.guild.id not in guilds: return
if msg.content.startswith(BOT_COMMAND_PREFIX): return
print(guilds[guild_id]["whitelist_enable"])
print(list(guilds[guild_id]["whitelist"].keys()))
print(list(guilds[guild_id]["blacklist"].keys()))
if guilds[guild_id]["whitelist_enable"]:
if channel.id not in list(guilds[guild_id]["whitelist"].keys()): return
else:
if channel.id in list(guilds[guild_id]["blacklist"].keys()): return
#Nested fucntion, inserts spoiler tags ||content|| around finter words
def generate_message(str, filterIndex):
new = ""
index = 0
for filter in filterIndex:
new = new + str[index:filter[0]] + "||" + str[filter[0]:filter[1]] + "||"
index = filter[1]
new = new + str[index:]
print(new)
return new
#Scan for filter words
filterIndex = []
for filter in guilds[guild_id]["filters"]:
index = msg.content.upper().find(filter.upper())
if index > -1: filterIndex.append((index, index + len(filter)))
if len(filterIndex) > 0: #Filter words found, enact filter protocol
#Sort filtered words from the beginning of the message to the end
filterIndex.sort()
#Generate a reply message using generate_message nested function
reply = generate_message(msg.content, filterIndex)
#Get the user's profile picture and nickname
path_user = msg.author.avatar_url
nickname = msg.author.display_name
#Create an Embed object with the user's filtered post
embed = discord.Embed(
description=reply,
color=0xecce8b
)
embed.set_author(name=nickname, icon_url=path_user)
#Send message back to channel
await channel.send(embed=embed)
print(reply)
#Delete user's message
await msg.delete()
# @bot.command()
# async def botchannel(ctx):
# @bot.command()
# async def botchannel(ctx, channel):
@bot.command()
async def shutdown(ctx):
print("shutdown")
with open("tmp.txt", "w") as tmp:
tmp.write("false")
io.save(guilds)
await bot.logout()
@bot.command()
async def restart(ctx):
with open("tmp.txt", "w") as tmp:
tmp.write("true")
io.save(guilds)
await bot.logout()
# @self.event
# async def on_ready():
# for guild_id in list(guilds.keys()):
start()
| 25.98538
| 168
| 0.723529
|
cd2ad2828933005aac9f03b488bb44c0b4207c23
| 189
|
py
|
Python
|
test_tfrecordfile.py
|
travers-rhodes/multi_object_datasets
|
60077353045365a1b614dc4a4400521bfba46a58
|
[
"Apache-2.0"
] | null | null | null |
test_tfrecordfile.py
|
travers-rhodes/multi_object_datasets
|
60077353045365a1b614dc4a4400521bfba46a58
|
[
"Apache-2.0"
] | null | null | null |
test_tfrecordfile.py
|
travers-rhodes/multi_object_datasets
|
60077353045365a1b614dc4a4400521bfba46a58
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import dots as d
dat = d.dataset("6_dots.tfrecords")
i = 0
for el in dat:
i+=1
if i > 3:
break
print(el)
| 14.538462
| 37
| 0.640212
|
4091393d902ac9ae64cc0f16f55114d8b93ec046
| 17,016
|
py
|
Python
|
option_b.py
|
wrosecrans/colormap
|
0b6a3b7e4caa5df72e7bad8ba196acfbbe5e5946
|
[
"CC0-1.0"
] | 231
|
2015-06-03T01:28:13.000Z
|
2022-03-27T02:02:42.000Z
|
option_b.py
|
CatarinaL/colormap
|
bc549477db0c12b54a5928087552ad2cf274980f
|
[
"CC0-1.0"
] | 10
|
2015-06-06T23:06:06.000Z
|
2019-10-25T20:10:48.000Z
|
option_b.py
|
CatarinaL/colormap
|
bc549477db0c12b54a5928087552ad2cf274980f
|
[
"CC0-1.0"
] | 97
|
2015-06-04T00:46:34.000Z
|
2022-01-23T17:37:24.000Z
|
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [0.62912107182941668, 5.9459613496072166, 109.50795214009284, -21.828857638709152, -6.5743399496759416],
'yp': [-9.0711805555555429, -75.1519097222222, 50.040035674319967, 52.94104954493605, 24.552723501963708],
'min_JK': 1.1436170212765937,
'max_JK': 98.21808510638297}
cm_data = [[ 1.46159096e-03, 4.66127766e-04, 1.38655200e-02],
[ 2.26726368e-03, 1.26992553e-03, 1.85703520e-02],
[ 3.29899092e-03, 2.24934863e-03, 2.42390508e-02],
[ 4.54690615e-03, 3.39180156e-03, 3.09092475e-02],
[ 6.00552565e-03, 4.69194561e-03, 3.85578980e-02],
[ 7.67578856e-03, 6.13611626e-03, 4.68360336e-02],
[ 9.56051094e-03, 7.71344131e-03, 5.51430756e-02],
[ 1.16634769e-02, 9.41675403e-03, 6.34598080e-02],
[ 1.39950388e-02, 1.12247138e-02, 7.18616890e-02],
[ 1.65605595e-02, 1.31362262e-02, 8.02817951e-02],
[ 1.93732295e-02, 1.51325789e-02, 8.87668094e-02],
[ 2.24468865e-02, 1.71991484e-02, 9.73274383e-02],
[ 2.57927373e-02, 1.93306298e-02, 1.05929835e-01],
[ 2.94324251e-02, 2.15030771e-02, 1.14621328e-01],
[ 3.33852235e-02, 2.37024271e-02, 1.23397286e-01],
[ 3.76684211e-02, 2.59207864e-02, 1.32232108e-01],
[ 4.22525554e-02, 2.81385015e-02, 1.41140519e-01],
[ 4.69146287e-02, 3.03236129e-02, 1.50163867e-01],
[ 5.16437624e-02, 3.24736172e-02, 1.59254277e-01],
[ 5.64491009e-02, 3.45691867e-02, 1.68413539e-01],
[ 6.13397200e-02, 3.65900213e-02, 1.77642172e-01],
[ 6.63312620e-02, 3.85036268e-02, 1.86961588e-01],
[ 7.14289181e-02, 4.02939095e-02, 1.96353558e-01],
[ 7.66367560e-02, 4.19053329e-02, 2.05798788e-01],
[ 8.19620773e-02, 4.33278666e-02, 2.15289113e-01],
[ 8.74113897e-02, 4.45561662e-02, 2.24813479e-01],
[ 9.29901526e-02, 4.55829503e-02, 2.34357604e-01],
[ 9.87024972e-02, 4.64018731e-02, 2.43903700e-01],
[ 1.04550936e-01, 4.70080541e-02, 2.53430300e-01],
[ 1.10536084e-01, 4.73986708e-02, 2.62912235e-01],
[ 1.16656423e-01, 4.75735920e-02, 2.72320803e-01],
[ 1.22908126e-01, 4.75360183e-02, 2.81624170e-01],
[ 1.29284984e-01, 4.72930838e-02, 2.90788012e-01],
[ 1.35778450e-01, 4.68563678e-02, 2.99776404e-01],
[ 1.42377819e-01, 4.62422566e-02, 3.08552910e-01],
[ 1.49072957e-01, 4.54676444e-02, 3.17085139e-01],
[ 1.55849711e-01, 4.45588056e-02, 3.25338414e-01],
[ 1.62688939e-01, 4.35542881e-02, 3.33276678e-01],
[ 1.69575148e-01, 4.24893149e-02, 3.40874188e-01],
[ 1.76493202e-01, 4.14017089e-02, 3.48110606e-01],
[ 1.83428775e-01, 4.03288858e-02, 3.54971391e-01],
[ 1.90367453e-01, 3.93088888e-02, 3.61446945e-01],
[ 1.97297425e-01, 3.84001825e-02, 3.67534629e-01],
[ 2.04209298e-01, 3.76322609e-02, 3.73237557e-01],
[ 2.11095463e-01, 3.70296488e-02, 3.78563264e-01],
[ 2.17948648e-01, 3.66146049e-02, 3.83522415e-01],
[ 2.24762908e-01, 3.64049901e-02, 3.88128944e-01],
[ 2.31538148e-01, 3.64052511e-02, 3.92400150e-01],
[ 2.38272961e-01, 3.66209949e-02, 3.96353388e-01],
[ 2.44966911e-01, 3.70545017e-02, 4.00006615e-01],
[ 2.51620354e-01, 3.77052832e-02, 4.03377897e-01],
[ 2.58234265e-01, 3.85706153e-02, 4.06485031e-01],
[ 2.64809649e-01, 3.96468666e-02, 4.09345373e-01],
[ 2.71346664e-01, 4.09215821e-02, 4.11976086e-01],
[ 2.77849829e-01, 4.23528741e-02, 4.14392106e-01],
[ 2.84321318e-01, 4.39325787e-02, 4.16607861e-01],
[ 2.90763373e-01, 4.56437598e-02, 4.18636756e-01],
[ 2.97178251e-01, 4.74700293e-02, 4.20491164e-01],
[ 3.03568182e-01, 4.93958927e-02, 4.22182449e-01],
[ 3.09935342e-01, 5.14069729e-02, 4.23720999e-01],
[ 3.16281835e-01, 5.34901321e-02, 4.25116277e-01],
[ 3.22609671e-01, 5.56335178e-02, 4.26376869e-01],
[ 3.28920763e-01, 5.78265505e-02, 4.27510546e-01],
[ 3.35216916e-01, 6.00598734e-02, 4.28524320e-01],
[ 3.41499828e-01, 6.23252772e-02, 4.29424503e-01],
[ 3.47771086e-01, 6.46156100e-02, 4.30216765e-01],
[ 3.54032169e-01, 6.69246832e-02, 4.30906186e-01],
[ 3.60284449e-01, 6.92471753e-02, 4.31497309e-01],
[ 3.66529195e-01, 7.15785403e-02, 4.31994185e-01],
[ 3.72767575e-01, 7.39149211e-02, 4.32400419e-01],
[ 3.79000659e-01, 7.62530701e-02, 4.32719214e-01],
[ 3.85228383e-01, 7.85914864e-02, 4.32954973e-01],
[ 3.91452659e-01, 8.09267058e-02, 4.33108763e-01],
[ 3.97674379e-01, 8.32568129e-02, 4.33182647e-01],
[ 4.03894278e-01, 8.55803445e-02, 4.33178526e-01],
[ 4.10113015e-01, 8.78961593e-02, 4.33098056e-01],
[ 4.16331169e-01, 9.02033992e-02, 4.32942678e-01],
[ 4.22549249e-01, 9.25014543e-02, 4.32713635e-01],
[ 4.28767696e-01, 9.47899342e-02, 4.32411996e-01],
[ 4.34986885e-01, 9.70686417e-02, 4.32038673e-01],
[ 4.41207124e-01, 9.93375510e-02, 4.31594438e-01],
[ 4.47428382e-01, 1.01597079e-01, 4.31080497e-01],
[ 4.53650614e-01, 1.03847716e-01, 4.30497898e-01],
[ 4.59874623e-01, 1.06089165e-01, 4.29845789e-01],
[ 4.66100494e-01, 1.08321923e-01, 4.29124507e-01],
[ 4.72328255e-01, 1.10546584e-01, 4.28334320e-01],
[ 4.78557889e-01, 1.12763831e-01, 4.27475431e-01],
[ 4.84789325e-01, 1.14974430e-01, 4.26547991e-01],
[ 4.91022448e-01, 1.17179219e-01, 4.25552106e-01],
[ 4.97257069e-01, 1.19379132e-01, 4.24487908e-01],
[ 5.03492698e-01, 1.21575414e-01, 4.23356110e-01],
[ 5.09729541e-01, 1.23768654e-01, 4.22155676e-01],
[ 5.15967304e-01, 1.25959947e-01, 4.20886594e-01],
[ 5.22205646e-01, 1.28150439e-01, 4.19548848e-01],
[ 5.28444192e-01, 1.30341324e-01, 4.18142411e-01],
[ 5.34682523e-01, 1.32533845e-01, 4.16667258e-01],
[ 5.40920186e-01, 1.34729286e-01, 4.15123366e-01],
[ 5.47156706e-01, 1.36928959e-01, 4.13510662e-01],
[ 5.53391649e-01, 1.39134147e-01, 4.11828882e-01],
[ 5.59624442e-01, 1.41346265e-01, 4.10078028e-01],
[ 5.65854477e-01, 1.43566769e-01, 4.08258132e-01],
[ 5.72081108e-01, 1.45797150e-01, 4.06369246e-01],
[ 5.78303656e-01, 1.48038934e-01, 4.04411444e-01],
[ 5.84521407e-01, 1.50293679e-01, 4.02384829e-01],
[ 5.90733615e-01, 1.52562977e-01, 4.00289528e-01],
[ 5.96939751e-01, 1.54848232e-01, 3.98124897e-01],
[ 6.03138930e-01, 1.57151161e-01, 3.95891308e-01],
[ 6.09330184e-01, 1.59473549e-01, 3.93589349e-01],
[ 6.15512627e-01, 1.61817111e-01, 3.91219295e-01],
[ 6.21685340e-01, 1.64183582e-01, 3.88781456e-01],
[ 6.27847374e-01, 1.66574724e-01, 3.86276180e-01],
[ 6.33997746e-01, 1.68992314e-01, 3.83703854e-01],
[ 6.40135447e-01, 1.71438150e-01, 3.81064906e-01],
[ 6.46259648e-01, 1.73913876e-01, 3.78358969e-01],
[ 6.52369348e-01, 1.76421271e-01, 3.75586209e-01],
[ 6.58463166e-01, 1.78962399e-01, 3.72748214e-01],
[ 6.64539964e-01, 1.81539111e-01, 3.69845599e-01],
[ 6.70598572e-01, 1.84153268e-01, 3.66879025e-01],
[ 6.76637795e-01, 1.86806728e-01, 3.63849195e-01],
[ 6.82656407e-01, 1.89501352e-01, 3.60756856e-01],
[ 6.88653158e-01, 1.92238994e-01, 3.57602797e-01],
[ 6.94626769e-01, 1.95021500e-01, 3.54387853e-01],
[ 7.00575937e-01, 1.97850703e-01, 3.51112900e-01],
[ 7.06499709e-01, 2.00728196e-01, 3.47776863e-01],
[ 7.12396345e-01, 2.03656029e-01, 3.44382594e-01],
[ 7.18264447e-01, 2.06635993e-01, 3.40931208e-01],
[ 7.24102613e-01, 2.09669834e-01, 3.37423766e-01],
[ 7.29909422e-01, 2.12759270e-01, 3.33861367e-01],
[ 7.35683432e-01, 2.15905976e-01, 3.30245147e-01],
[ 7.41423185e-01, 2.19111589e-01, 3.26576275e-01],
[ 7.47127207e-01, 2.22377697e-01, 3.22855952e-01],
[ 7.52794009e-01, 2.25705837e-01, 3.19085410e-01],
[ 7.58422090e-01, 2.29097492e-01, 3.15265910e-01],
[ 7.64009940e-01, 2.32554083e-01, 3.11398734e-01],
[ 7.69556038e-01, 2.36076967e-01, 3.07485188e-01],
[ 7.75058888e-01, 2.39667435e-01, 3.03526312e-01],
[ 7.80517023e-01, 2.43326720e-01, 2.99522665e-01],
[ 7.85928794e-01, 2.47055968e-01, 2.95476756e-01],
[ 7.91292674e-01, 2.50856232e-01, 2.91389943e-01],
[ 7.96607144e-01, 2.54728485e-01, 2.87263585e-01],
[ 8.01870689e-01, 2.58673610e-01, 2.83099033e-01],
[ 8.07081807e-01, 2.62692401e-01, 2.78897629e-01],
[ 8.12239008e-01, 2.66785558e-01, 2.74660698e-01],
[ 8.17340818e-01, 2.70953688e-01, 2.70389545e-01],
[ 8.22385784e-01, 2.75197300e-01, 2.66085445e-01],
[ 8.27372474e-01, 2.79516805e-01, 2.61749643e-01],
[ 8.32299481e-01, 2.83912516e-01, 2.57383341e-01],
[ 8.37165425e-01, 2.88384647e-01, 2.52987700e-01],
[ 8.41968959e-01, 2.92933312e-01, 2.48563825e-01],
[ 8.46708768e-01, 2.97558528e-01, 2.44112767e-01],
[ 8.51383572e-01, 3.02260213e-01, 2.39635512e-01],
[ 8.55992130e-01, 3.07038188e-01, 2.35132978e-01],
[ 8.60533241e-01, 3.11892183e-01, 2.30606009e-01],
[ 8.65005747e-01, 3.16821833e-01, 2.26055368e-01],
[ 8.69408534e-01, 3.21826685e-01, 2.21481734e-01],
[ 8.73740530e-01, 3.26906201e-01, 2.16885699e-01],
[ 8.78000715e-01, 3.32059760e-01, 2.12267762e-01],
[ 8.82188112e-01, 3.37286663e-01, 2.07628326e-01],
[ 8.86301795e-01, 3.42586137e-01, 2.02967696e-01],
[ 8.90340885e-01, 3.47957340e-01, 1.98286080e-01],
[ 8.94304553e-01, 3.53399363e-01, 1.93583583e-01],
[ 8.98192017e-01, 3.58911240e-01, 1.88860212e-01],
[ 9.02002544e-01, 3.64491949e-01, 1.84115876e-01],
[ 9.05735448e-01, 3.70140419e-01, 1.79350388e-01],
[ 9.09390090e-01, 3.75855533e-01, 1.74563472e-01],
[ 9.12965874e-01, 3.81636138e-01, 1.69754764e-01],
[ 9.16462251e-01, 3.87481044e-01, 1.64923826e-01],
[ 9.19878710e-01, 3.93389034e-01, 1.60070152e-01],
[ 9.23214783e-01, 3.99358867e-01, 1.55193185e-01],
[ 9.26470039e-01, 4.05389282e-01, 1.50292329e-01],
[ 9.29644083e-01, 4.11479007e-01, 1.45366973e-01],
[ 9.32736555e-01, 4.17626756e-01, 1.40416519e-01],
[ 9.35747126e-01, 4.23831237e-01, 1.35440416e-01],
[ 9.38675494e-01, 4.30091162e-01, 1.30438175e-01],
[ 9.41521384e-01, 4.36405243e-01, 1.25409440e-01],
[ 9.44284543e-01, 4.42772199e-01, 1.20354038e-01],
[ 9.46964741e-01, 4.49190757e-01, 1.15272059e-01],
[ 9.49561766e-01, 4.55659658e-01, 1.10163947e-01],
[ 9.52075421e-01, 4.62177656e-01, 1.05030614e-01],
[ 9.54505523e-01, 4.68743522e-01, 9.98735931e-02],
[ 9.56851903e-01, 4.75356048e-01, 9.46952268e-02],
[ 9.59114397e-01, 4.82014044e-01, 8.94989073e-02],
[ 9.61292850e-01, 4.88716345e-01, 8.42893891e-02],
[ 9.63387110e-01, 4.95461806e-01, 7.90731907e-02],
[ 9.65397031e-01, 5.02249309e-01, 7.38591143e-02],
[ 9.67322465e-01, 5.09077761e-01, 6.86589199e-02],
[ 9.69163264e-01, 5.15946092e-01, 6.34881971e-02],
[ 9.70919277e-01, 5.22853259e-01, 5.83674890e-02],
[ 9.72590351e-01, 5.29798246e-01, 5.33237243e-02],
[ 9.74176327e-01, 5.36780059e-01, 4.83920090e-02],
[ 9.75677038e-01, 5.43797733e-01, 4.36177922e-02],
[ 9.77092313e-01, 5.50850323e-01, 3.90500131e-02],
[ 9.78421971e-01, 5.57936911e-01, 3.49306227e-02],
[ 9.79665824e-01, 5.65056600e-01, 3.14091591e-02],
[ 9.80823673e-01, 5.72208516e-01, 2.85075931e-02],
[ 9.81895311e-01, 5.79391803e-01, 2.62497353e-02],
[ 9.82880522e-01, 5.86605627e-01, 2.46613416e-02],
[ 9.83779081e-01, 5.93849168e-01, 2.37702263e-02],
[ 9.84590755e-01, 6.01121626e-01, 2.36063833e-02],
[ 9.85315301e-01, 6.08422211e-01, 2.42021174e-02],
[ 9.85952471e-01, 6.15750147e-01, 2.55921853e-02],
[ 9.86502013e-01, 6.23104667e-01, 2.78139496e-02],
[ 9.86963670e-01, 6.30485011e-01, 3.09075459e-02],
[ 9.87337182e-01, 6.37890424e-01, 3.49160639e-02],
[ 9.87622296e-01, 6.45320152e-01, 3.98857472e-02],
[ 9.87818759e-01, 6.52773439e-01, 4.55808037e-02],
[ 9.87926330e-01, 6.60249526e-01, 5.17503867e-02],
[ 9.87944783e-01, 6.67747641e-01, 5.83286889e-02],
[ 9.87873910e-01, 6.75267000e-01, 6.52570167e-02],
[ 9.87713535e-01, 6.82806802e-01, 7.24892330e-02],
[ 9.87463516e-01, 6.90366218e-01, 7.99897176e-02],
[ 9.87123759e-01, 6.97944391e-01, 8.77314215e-02],
[ 9.86694229e-01, 7.05540424e-01, 9.56941797e-02],
[ 9.86174970e-01, 7.13153375e-01, 1.03863324e-01],
[ 9.85565739e-01, 7.20782460e-01, 1.12228756e-01],
[ 9.84865203e-01, 7.28427497e-01, 1.20784651e-01],
[ 9.84075129e-01, 7.36086521e-01, 1.29526579e-01],
[ 9.83195992e-01, 7.43758326e-01, 1.38453063e-01],
[ 9.82228463e-01, 7.51441596e-01, 1.47564573e-01],
[ 9.81173457e-01, 7.59134892e-01, 1.56863224e-01],
[ 9.80032178e-01, 7.66836624e-01, 1.66352544e-01],
[ 9.78806183e-01, 7.74545028e-01, 1.76037298e-01],
[ 9.77497453e-01, 7.82258138e-01, 1.85923357e-01],
[ 9.76108474e-01, 7.89973753e-01, 1.96017589e-01],
[ 9.74637842e-01, 7.97691563e-01, 2.06331925e-01],
[ 9.73087939e-01, 8.05409333e-01, 2.16876839e-01],
[ 9.71467822e-01, 8.13121725e-01, 2.27658046e-01],
[ 9.69783146e-01, 8.20825143e-01, 2.38685942e-01],
[ 9.68040817e-01, 8.28515491e-01, 2.49971582e-01],
[ 9.66242589e-01, 8.36190976e-01, 2.61533898e-01],
[ 9.64393924e-01, 8.43848069e-01, 2.73391112e-01],
[ 9.62516656e-01, 8.51476340e-01, 2.85545675e-01],
[ 9.60625545e-01, 8.59068716e-01, 2.98010219e-01],
[ 9.58720088e-01, 8.66624355e-01, 3.10820466e-01],
[ 9.56834075e-01, 8.74128569e-01, 3.23973947e-01],
[ 9.54997177e-01, 8.81568926e-01, 3.37475479e-01],
[ 9.53215092e-01, 8.88942277e-01, 3.51368713e-01],
[ 9.51546225e-01, 8.96225909e-01, 3.65627005e-01],
[ 9.50018481e-01, 9.03409063e-01, 3.80271225e-01],
[ 9.48683391e-01, 9.10472964e-01, 3.95289169e-01],
[ 9.47594362e-01, 9.17399053e-01, 4.10665194e-01],
[ 9.46809163e-01, 9.24168246e-01, 4.26373236e-01],
[ 9.46391536e-01, 9.30760752e-01, 4.42367495e-01],
[ 9.46402951e-01, 9.37158971e-01, 4.58591507e-01],
[ 9.46902568e-01, 9.43347775e-01, 4.74969778e-01],
[ 9.47936825e-01, 9.49317522e-01, 4.91426053e-01],
[ 9.49544830e-01, 9.55062900e-01, 5.07859649e-01],
[ 9.51740304e-01, 9.60586693e-01, 5.24203026e-01],
[ 9.54529281e-01, 9.65895868e-01, 5.40360752e-01],
[ 9.57896053e-01, 9.71003330e-01, 5.56275090e-01],
[ 9.61812020e-01, 9.75924241e-01, 5.71925382e-01],
[ 9.66248822e-01, 9.80678193e-01, 5.87205773e-01],
[ 9.71161622e-01, 9.85282161e-01, 6.02154330e-01],
[ 9.76510983e-01, 9.89753437e-01, 6.16760413e-01],
[ 9.82257307e-01, 9.94108844e-01, 6.31017009e-01],
[ 9.88362068e-01, 9.98364143e-01, 6.44924005e-01]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| 60.127208
| 124
| 0.57728
|
563500829c4d069d25d29e122217af76c93ab51d
| 174
|
py
|
Python
|
api/Note_test.py
|
gracejiang/note-sharing
|
3785ab4f3f6f805e8cd0f6556c2b0faf6ed6d8da
|
[
"MIT"
] | null | null | null |
api/Note_test.py
|
gracejiang/note-sharing
|
3785ab4f3f6f805e8cd0f6556c2b0faf6ed6d8da
|
[
"MIT"
] | null | null | null |
api/Note_test.py
|
gracejiang/note-sharing
|
3785ab4f3f6f805e8cd0f6556c2b0faf6ed6d8da
|
[
"MIT"
] | 1
|
2021-03-02T12:34:11.000Z
|
2021-03-02T12:34:11.000Z
|
import json
from Note import Note
n = Note("Friction", "introduction to friction", "UC Berkeley", 0, True, False, False, "https://google.com", 'lec.pdf')
print(n.toJSON())
| 24.857143
| 119
| 0.689655
|
668f9483105f239e5932fa588b65e6cc516b7c06
| 1,009
|
py
|
Python
|
bireme_tpcc_test/compare.py
|
lingfengzui/bireme
|
96aed5fe42ee2e0679d66e8a5518f48af19c9b42
|
[
"Apache-2.0"
] | 133
|
2017-09-13T02:32:56.000Z
|
2022-03-11T02:38:33.000Z
|
bireme_tpcc_test/compare.py
|
mgbin088/bireme
|
9cfc128230e7a718394132d9a51ec7d1019d08be
|
[
"Apache-2.0"
] | 84
|
2017-09-18T07:03:06.000Z
|
2021-03-26T06:35:14.000Z
|
bireme_tpcc_test/compare.py
|
mgbin088/bireme
|
9cfc128230e7a718394132d9a51ec7d1019d08be
|
[
"Apache-2.0"
] | 55
|
2017-09-13T03:28:22.000Z
|
2021-04-21T08:13:19.000Z
|
import os
import sys
from sqlCheckSum import sqlCheckSum
try:
table = sys.argv[2]
key = sys.argv[3:]
if sys.argv[1] == "mysql":
mysqlIP = os.environ['MYSQL_IP']
mysqlPort = 3306
mysqlUser = os.environ['MYSQL_USER']
mysqlPasswd = os.environ['MYSQL_PASSWD']
mysqlDB = os.environ['MYSQL_DB']
print "MD5 for table " + table + " in MySQL is " + sqlCheckSum("mysql", mysqlIP, mysqlPort, mysqlUser, mysqlPasswd, mysqlDB, table, *key)
elif sys.argv[1] == "hashdata":
pgIP = os.environ['PG_IP']
pgPort = 5432
pgUser = os.environ['PG_USER']
pgPasswd = os.environ['PG_PASSWD']
pgDB = os.environ['PG_DB']
print "MD5 for table " + table + " in Hashdata is " + sqlCheckSum("postgres", pgIP, pgPort, pgUser, pgPasswd, pgDB, table, *key)
except Exception as e:
if str(type(e)) == "<type 'exceptions.KeyError'>":
print "Environment variable " + str(e) + " is not found."
else:
print e
| 32.548387
| 145
| 0.603568
|
94ccda15af46f159402fbd830a40f27b6522faaa
| 847
|
py
|
Python
|
web/tests/utils/test_predict_queue.py
|
salathegroup/crowdbreaks-streamer
|
3019b0f8d48bd6b4419bdaa02d0463ea6617b496
|
[
"MIT"
] | 6
|
2019-03-25T17:44:42.000Z
|
2020-10-28T18:45:27.000Z
|
web/tests/utils/test_predict_queue.py
|
salathegroup/crowdbreaks-streamer
|
3019b0f8d48bd6b4419bdaa02d0463ea6617b496
|
[
"MIT"
] | null | null | null |
web/tests/utils/test_predict_queue.py
|
salathegroup/crowdbreaks-streamer
|
3019b0f8d48bd6b4419bdaa02d0463ea6617b496
|
[
"MIT"
] | null | null | null |
import pytest
class TestPredictQueue:
def test_push(self, predict_queue, tweet):
d = {'id': str(tweet['id']), 'text': tweet['text']}
predict_queue.push(d)
assert len(predict_queue) == 1
def test_pop_all(self, predict_queue, tweet, retweet):
d1 = {'id': str(tweet['id']), 'text': tweet['text']}
d2 = {'id': str(retweet['id']), 'text': retweet['text']}
predict_queue.multi_push([d1, d2])
assert len(predict_queue) == 2
resp = predict_queue.pop_all()
assert len(resp) == 2
assert resp[0]['text'] == tweet['text']
assert resp[1]['text'] == retweet['text']
if __name__ == "__main__":
# if running outside of docker, make sure redis is running on localhost
import os; os.environ["REDIS_HOST"] = "localhost"
pytest.main(['-s', '-m', 'focus'])
| 36.826087
| 75
| 0.59268
|
2b2993665fdfa4018243b61056eb2f4a0b88e524
| 2,660
|
py
|
Python
|
jointly/types.py
|
hpi-dhc/jointly
|
b56fca228b2705cf795ae453cd1d77c0567f099e
|
[
"MIT"
] | 7
|
2020-10-14T11:57:35.000Z
|
2021-12-28T11:32:45.000Z
|
jointly/types.py
|
hpi-dhc/jointly
|
b56fca228b2705cf795ae453cd1d77c0567f099e
|
[
"MIT"
] | 5
|
2021-08-18T09:04:16.000Z
|
2021-12-27T19:24:24.000Z
|
jointly/types.py
|
hpi-dhc/jointly
|
b56fca228b2705cf795ae453cd1d77c0567f099e
|
[
"MIT"
] | 1
|
2021-05-06T07:57:38.000Z
|
2021-05-06T07:57:38.000Z
|
from typing import Dict, Union, List
import pandas as pd
SourceDict = Dict[str, Dict[str, Union[str, pd.DataFrame, float, pd.Timedelta, None]]]
"""
A dictionary of dictionaries.
Each entry defines an input sensor, and points to a dictionary with the keys ``data`` and ``ref_column``.
``data`` is a pandas ``DataFrame`` with a ``DateTimeIndex``.
``ref_column`` specifies the column within ``data`` which should be used to extract synchronization points, e.g., shakes.
"""
SynchronizationPoint = Dict[str, pd.Timestamp]
"""
A dictionary describing a synchronization point, e.g., a shake.
A synchronization point has a start and an end, and thus the properties ``start`` and ``end``.
"""
SynchronizationPair = Dict[str, SynchronizationPoint]
"""
A dictionary containing both the first and the second synchronization point of a signal.
Two points are required to calculate the distance in between them.
Properties are ``first`` and ``second``.
"""
SyncPairs = Dict[str, SynchronizationPair]
"""
A dictionary that contains SynchronizationPair instances for a number of sources.
"""
SyncPairTimeshift = Dict[str, pd.Timedelta]
"""Timeshift for a single sync pair, i.e., the shift required to synchronize one pair to the reference signal"""
ResultTableSpec = Dict[str, Dict[str, List[str]]]
"""
Specification for saving the synchronized results in separated files, with each root key defining a target file.
The second level defines the columns which should be saved from each source file into the given target file.
This can be used to separate the input files into files containing only a single sensor type, e.g., to extract the
PPG signal from two different sensors into a single file.
Example:
.. code:: python
{
'ACC': {
'Faros': ['Accelerometer_X', 'Accelerometer_Y', 'Accelerometer_Z'],
'Empatica': ['acc_x', 'acc_y', 'acc_z'],
'Everion': ['accx_data', 'accy_data', 'accz_data'],
},
'PPG': {
'Empatica': ['bvp'],
'Everion': ['blood_pulse_wave', 'led2_data', 'led3_data'],
},
'EDA': {
'Empatica': ['eda'],
'Everion': ['gsr_electrode'],
},
'ECG': {
'Faros': ['ECG'],
},
'TEMP': {
'Empatica': ['temp'],
'Everion': ['temperature_object'],
},
'HR': {
'Empatica': ['hr'],
'Everion': ['heart_rate', 'heart_rate_quality'],
},
'IBI': {
'Faros': ['HRV'],
'Empatica': ['ibi'],
'Everion': ['inter_pulse_interval', 'inter_pulse_interval_deviation'],
}
}
"""
| 32.839506
| 121
| 0.631203
|
4070b2a5d9921417d2852365e13eb781b48b2cc7
| 237
|
py
|
Python
|
tests/django_app/conftest.py
|
achaidaris/django-ansible
|
d0f37ac05b2037e5d7077dc271e72bd02676ddd9
|
[
"MIT"
] | 19
|
2016-10-14T12:31:15.000Z
|
2021-05-08T08:31:01.000Z
|
tests/django_app/conftest.py
|
achaidaris/django-ansible
|
d0f37ac05b2037e5d7077dc271e72bd02676ddd9
|
[
"MIT"
] | 29
|
2016-11-15T15:38:59.000Z
|
2020-06-05T18:17:30.000Z
|
tests/django_app/conftest.py
|
achaidaris/django-ansible
|
d0f37ac05b2037e5d7077dc271e72bd02676ddd9
|
[
"MIT"
] | 9
|
2016-10-14T13:04:21.000Z
|
2021-04-17T16:24:34.000Z
|
import pytest
from django_app.web import factories as web_factories
from pytest_factoryboy import register
# enable database for all tests
@pytest.fixture(autouse=True)
def enable_db(db):
pass
register(web_factories.UserFactory)
| 18.230769
| 53
| 0.814346
|
03c97e3c4e7a9d3cabccb3c34f411e9164abf008
| 44
|
py
|
Python
|
tests/scruples/baselines/test_metrics.py
|
allenai/scruples
|
9a43459c507e57d89ab8442a4f3985cedecb8710
|
[
"Apache-2.0"
] | 29
|
2020-05-09T10:55:45.000Z
|
2022-03-28T16:18:02.000Z
|
tests/scruples/baselines/test_metrics.py
|
allenai/scruples
|
9a43459c507e57d89ab8442a4f3985cedecb8710
|
[
"Apache-2.0"
] | null | null | null |
tests/scruples/baselines/test_metrics.py
|
allenai/scruples
|
9a43459c507e57d89ab8442a4f3985cedecb8710
|
[
"Apache-2.0"
] | 6
|
2020-10-05T12:24:28.000Z
|
2021-12-06T19:51:06.000Z
|
"""Tests for scruples.baselines.metrics."""
| 22
| 43
| 0.727273
|
6bf81b127adc730c9ac3cf992c4e1b9d99303467
| 1,892
|
py
|
Python
|
pypulseq_cest/simulate.py
|
KerstinHut/pypulseq-cest
|
a81fba9b66de0bb4b32f3df26c2c1d15967948ca
|
[
"MIT"
] | 2
|
2021-05-08T21:25:52.000Z
|
2021-05-08T21:26:33.000Z
|
pypulseq_cest/simulate.py
|
KerstinHut/pypulseq-cest
|
a81fba9b66de0bb4b32f3df26c2c1d15967948ca
|
[
"MIT"
] | 13
|
2021-02-14T08:40:17.000Z
|
2021-10-18T13:16:18.000Z
|
pypulseq_cest/simulate.py
|
KerstinHut/pypulseq-cest
|
a81fba9b66de0bb4b32f3df26c2c1d15967948ca
|
[
"MIT"
] | 2
|
2021-02-27T20:27:18.000Z
|
2021-04-07T14:17:13.000Z
|
"""
simulate.py
"""
from typing import Union
from pathlib import Path
from pySimPulseqSBB import SimPulseqSBB, SimulationParameters
from pypulseq_cest.parser import parse_params, get_zspec
from bmctool.set_params import load_params
from bmctool.utils.eval import plot_z
def simulate(config_file: Union[str, Path],
seq_file: Union[str, Path],
show_plot: bool = False,
**kwargs) \
-> SimulationParameters:
"""
Function to run a pySimPulseqSBB based simulation for given seq and config files.
:param config_file: Path of the config file (can be of type str or Path)
:param seq_file: Path of the seq file (can be of type str or Path)
:param show_plot: flag to switch plotting option on/off
"""
# load the parameters
sp = load_params(config_file)
# parse for C++ handling
sim_params = parse_params(sp=sp, seq_file=seq_file)
# run the simulation
SimPulseqSBB(sim_params, str(seq_file))
# retrieve the calculated magnetization
m_out = sim_params.GetFinalMagnetizationVectors()
if show_plot:
if 'offsets' in kwargs:
offsets = kwargs.pop('offsets')
_, mz = get_zspec(m_out=m_out, sp=sp, seq_file=seq_file)
else:
offsets, mz = get_zspec(m_out=m_out, sp=sp, seq_file=seq_file)
plot_z(mz=mz,
offsets=offsets,
**kwargs)
return sim_params
def sim_example():
"""
Function to run an example simulation.
"""
seq_file = Path(__file__).parent / 'example_library' / 'seq_example.seq'
config_file = Path(__file__).parent / 'example_library' / 'config_example.yaml'
simulate(config_file=config_file,
seq_file=seq_file,
show_plot=True,
normalize=True,
title='Example spectrum')
if __name__ == '__main__':
sim_example()
| 28.238806
| 85
| 0.657505
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.