hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64c228b63b5583ac0af742fb24d220034b91b3b1
| 5,040
|
py
|
Python
|
mesonbuild/scripts/gettext.py
|
loongarch64/meson
|
673aff3595d04a82d431dc4903c95227481565ca
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/scripts/gettext.py
|
loongarch64/meson
|
673aff3595d04a82d431dc4903c95227481565ca
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/scripts/gettext.py
|
loongarch64/meson
|
673aff3595d04a82d431dc4903c95227481565ca
|
[
"Apache-2.0"
] | 1
|
2020-12-07T15:37:35.000Z
|
2020-12-07T15:37:35.000Z
|
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import argparse
import subprocess
from . import destdir_join
import typing as T
parser = argparse.ArgumentParser()
parser.add_argument('command')
parser.add_argument('--pkgname', default='')
parser.add_argument('--datadirs', default='')
parser.add_argument('--langs', default='')
parser.add_argument('--localedir', default='')
parser.add_argument('--subdir', default='')
parser.add_argument('--extra-args', default='')
def read_linguas(src_sub: str) -> T.List[str]:
# Syntax of this file is documented here:
# https://www.gnu.org/software/gettext/manual/html_node/po_002fLINGUAS.html
linguas = os.path.join(src_sub, 'LINGUAS')
try:
langs = []
with open(linguas) as f:
for line in f:
line = line.strip()
if line and not line.startswith('#'):
langs += line.split()
return langs
except (FileNotFoundError, PermissionError):
print('Could not find file LINGUAS in {}'.format(src_sub))
return []
def run_potgen(src_sub: str, pkgname: str, datadirs: str, args: T.List[str]) -> int:
listfile = os.path.join(src_sub, 'POTFILES.in')
if not os.path.exists(listfile):
listfile = os.path.join(src_sub, 'POTFILES')
if not os.path.exists(listfile):
print('Could not find file POTFILES in %s' % src_sub)
return 1
child_env = os.environ.copy()
if datadirs:
child_env['GETTEXTDATADIRS'] = datadirs
ofile = os.path.join(src_sub, pkgname + '.pot')
return subprocess.call(['xgettext', '--package-name=' + pkgname, '-p', src_sub, '-f', listfile,
'-D', os.environ['MESON_SOURCE_ROOT'], '-k_', '-o', ofile] + args,
env=child_env)
def gen_gmo(src_sub: str, bld_sub: str, langs: T.List[str]) -> int:
for l in langs:
subprocess.check_call(['msgfmt', os.path.join(src_sub, l + '.po'),
'-o', os.path.join(bld_sub, l + '.gmo')])
return 0
def update_po(src_sub: str, pkgname: str, langs: T.List[str]) -> int:
potfile = os.path.join(src_sub, pkgname + '.pot')
for l in langs:
pofile = os.path.join(src_sub, l + '.po')
if os.path.exists(pofile):
subprocess.check_call(['msgmerge', '-q', '-o', pofile, pofile, potfile])
else:
subprocess.check_call(['msginit', '--input', potfile, '--output-file', pofile, '--locale', l, '--no-translator'])
return 0
def do_install(src_sub: str, bld_sub: str, dest: str, pkgname: str, langs: T.List[str]) -> int:
for l in langs:
srcfile = os.path.join(bld_sub, l + '.gmo')
outfile = os.path.join(dest, l, 'LC_MESSAGES',
pkgname + '.mo')
tempfile = outfile + '.tmp'
os.makedirs(os.path.dirname(outfile), exist_ok=True)
shutil.copy2(srcfile, tempfile)
os.replace(tempfile, outfile)
if not os.getenv('MESON_INSTALL_QUIET', False):
print('Installing %s to %s' % (srcfile, outfile))
return 0
def run(args: T.List[str]) -> int:
options = parser.parse_args(args)
subcmd = options.command
langs = options.langs.split('@@') if options.langs else None
extra_args = options.extra_args.split('@@') if options.extra_args else []
subdir = os.environ.get('MESON_SUBDIR', '')
if options.subdir:
subdir = options.subdir
src_sub = os.path.join(os.environ['MESON_SOURCE_ROOT'], subdir)
bld_sub = os.path.join(os.environ['MESON_BUILD_ROOT'], subdir)
if not langs:
langs = read_linguas(src_sub)
if subcmd == 'pot':
return run_potgen(src_sub, options.pkgname, options.datadirs, extra_args)
elif subcmd == 'gen_gmo':
return gen_gmo(src_sub, bld_sub, langs)
elif subcmd == 'update_po':
if run_potgen(src_sub, options.pkgname, options.datadirs, extra_args) != 0:
return 1
return update_po(src_sub, options.pkgname, langs)
elif subcmd == 'install':
destdir = os.environ.get('DESTDIR', '')
dest = destdir_join(destdir, os.path.join(os.environ['MESON_INSTALL_PREFIX'],
options.localedir))
if gen_gmo(src_sub, bld_sub, langs) != 0:
return 1
do_install(src_sub, bld_sub, dest, options.pkgname, langs)
else:
print('Unknown subcommand.')
return 1
return 0
| 40
| 125
| 0.626389
|
388612e98d0613abc9fdf403f4d8b0d3f785cc4e
| 484
|
py
|
Python
|
backend/pharmacy/api/views/__init__.py
|
FreakStar03/MedAlthea
|
c11f6239baba9dc9514fc9b88adabfcab05f3d99
|
[
"BSD-3-Clause"
] | 4
|
2022-01-28T13:05:07.000Z
|
2022-01-31T12:24:56.000Z
|
backend/pharmacy/api/views/__init__.py
|
FreakStar03/MedAlthea
|
c11f6239baba9dc9514fc9b88adabfcab05f3d99
|
[
"BSD-3-Clause"
] | 6
|
2022-01-30T11:53:31.000Z
|
2022-02-02T06:17:30.000Z
|
backend/pharmacy/api/views/__init__.py
|
FreakStar03/MedAlthea
|
c11f6239baba9dc9514fc9b88adabfcab05f3d99
|
[
"BSD-3-Clause"
] | 3
|
2022-01-28T13:41:03.000Z
|
2022-01-30T12:23:11.000Z
|
# pylint: disable=missing-module-docstring
#
# Copyright (C) 2022 by YadavGulshan@Github, < https://github.com/YadavGulshan >.
#
# This file is part of < https://github.com/Yadavgulshan/pharmaService > project,
# and is released under the "BSD 3-Clause License Agreement".
# Please see < https://github.com/YadavGulshan/pharmaService/blob/master/LICENCE >
#
# All rights reserved.
from .medical import Medical
from .medicine import MedicineClass
from .userActions import UserAction
| 32.266667
| 82
| 0.768595
|
ab57134ffe6fb986ac5e03e42879e3df291303d7
| 3,932
|
py
|
Python
|
quick_email/builder.py
|
murrple-1/send-email-python
|
c32463a785376daf75575e967acd57b1e3cd5587
|
[
"MIT"
] | 1
|
2018-04-19T13:10:55.000Z
|
2018-04-19T13:10:55.000Z
|
quick_email/builder.py
|
murrple-1/quick-email-python
|
c32463a785376daf75575e967acd57b1e3cd5587
|
[
"MIT"
] | null | null | null |
quick_email/builder.py
|
murrple-1/quick-email-python
|
c32463a785376daf75575e967acd57b1e3cd5587
|
[
"MIT"
] | null | null | null |
import mimetypes
import six
from email import encoders
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.utils import COMMASPACE, parseaddr, formataddr
class Attachment(object):
def __init__(self, filename, bytes_):
self.filename = filename
self.bytes = bytes_
def build_msg(send_from, subject, send_to=None, send_cc=None, send_bcc=None, plain_text=None, html_text=None, attachment_list=None, inline_attachment_dict=None):
assert send_to or send_cc or send_bcc, u'At least one of send_to, send_cc, or send_bcc must exist'
assert plain_text or html_text, u'At least one of plain_text or html_text must exist'
msg = MIMEMultipart(u'mixed')
msg[u'Subject'] = subject
msg[u'From'] = to_address_string(send_from)
if send_to:
msg[u'To'] = to_address_string_list(send_to)
if send_cc:
msg[u'CC'] = to_address_string_list(send_cc)
if send_bcc:
msg[u'BCC'] = to_address_string_list(send_bcc)
text_msg = MIMEMultipart(u'alternative')
if plain_text:
if isinstance(plain_text, six.text_type):
text_msg.attach(MIMEText(plain_text, u'plain', u'utf-8'))
else:
text_msg.attach(MIMEText(plain_text.decode(), u'plain'))
if html_text:
if isinstance(html_text, six.text_type):
text_msg.attach(MIMEText(html_text, u'html', u'utf-8'))
else:
text_msg.attach(MIMEText(html_text.decode(), u'html'))
msg.attach(text_msg)
if attachment_list is not None:
for attachment in attachment_list:
type, encoding = mimetypes.guess_type(attachment.filename)
if type is None or encoding is not None:
type = u'application/octet-stream'
main_type, sub_type = type.split(u'/', 1)
part = None
if main_type == u'text':
part = MIMEText(attachment.bytes.decode(), sub_type)
elif main_type == u'image':
part = MIMEImage(attachment.bytes, sub_type)
elif main_type == u'audio':
part = MIMEAudio(attachment.bytes, sub_type)
else:
part = MIMEBase(main_type, sub_type)
part.set_payload(attachment.bytes)
encoders.encode_base64(part)
part.add_header(u'Content-Disposition',
u'attachment', filename=attachment.filename)
msg.attach(part)
if inline_attachment_dict is not None:
for content_id, attachment in six.iteritems(inline_attachment_dict):
type, encoding = mimetypes.guess_type(attachment.filename)
if type is None or encoding is not None:
type = u'application/octet-stream'
main_type, sub_type = type.split('/', 1)
part = None
if main_type == u'image':
part = MIMEImage(attachment.bytes, sub_type)
else:
raise RuntimeError(u'inline attachment must be an \'image\'')
part.add_header(u'Content-Disposition', u'inline',
filename=attachment.filename)
part.add_header(
u'Content-ID', u'<{content_id}>'.format(content_id=content_id))
part.add_header(u'X-Attachment-Id', content_id)
msg.attach(part)
return msg
def to_address_string(address):
if isinstance(address, six.string_types):
return formataddr(parseaddr(address))
else:
return formataddr(address)
def to_address_string_list(addresses):
if isinstance(addresses, six.string_types) or type(addresses) is tuple:
return to_address_string(addresses)
else:
return COMMASPACE.join(to_address_string(addr) for addr in addresses)
| 34.491228
| 161
| 0.64293
|
22e64c40e5585649a3a7f9dbecd216028c5aa85c
| 2,233
|
py
|
Python
|
labml_nn/normalization/weight_standardization/experiment.py
|
dongfangyixi/annotated_deep_learning_paper_implementations
|
d4b82dd2d3ff3230b060a2c8332f35158397cfdb
|
[
"MIT"
] | 1
|
2021-05-23T23:37:09.000Z
|
2021-05-23T23:37:09.000Z
|
labml_nn/normalization/weight_standardization/experiment.py
|
sachdevkartik/nn
|
a63f8f307ad5eefbcb537c69c63cea15a6208fb9
|
[
"MIT"
] | null | null | null |
labml_nn/normalization/weight_standardization/experiment.py
|
sachdevkartik/nn
|
a63f8f307ad5eefbcb537c69c63cea15a6208fb9
|
[
"MIT"
] | 2
|
2021-06-16T05:56:35.000Z
|
2021-10-19T07:33:44.000Z
|
"""
---
title: CIFAR10 Experiment to try Weight Standardization and Batch-Channel Normalization
summary: >
This trains is a VGG net that uses weight standardization and batch-channel normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment to try Weight Standardization and Batch-Channel Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_helpers.module import Module
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.normalization.batch_channel_norm import BatchChannelNorm
from labml_nn.normalization.weight_standardization.conv2d import Conv2d
class Model(Module):
"""
### Model
A VGG model that use [Weight Standardization](./index.html) and
[Batch-Channel Normalization](../batch_channel_norm/index.html).
"""
def __init__(self):
super().__init__()
layers = []
in_channels = 3
for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
for channels in block:
layers += [Conv2d(in_channels, channels, kernel_size=3, padding=1),
BatchChannelNorm(channels, 32),
nn.ReLU(inplace=True)]
in_channels = channels
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
self.layers = nn.Sequential(*layers)
self.fc = nn.Linear(512, 10)
def __call__(self, x):
x = self.layers(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
@option(CIFAR10Configs.model)
def model(c: CIFAR10Configs):
"""
### Create model
"""
return Model().to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='weight standardization')
# Create configurations
conf = CIFAR10Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'train_batch_size': 64,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| 29
| 95
| 0.647112
|
a9220e82329e5f66f6f32ef37040654614064f68
| 258
|
py
|
Python
|
1047.py
|
Valarr/Uri
|
807de771b14b0e60d44b23835ad9ee7423c83471
|
[
"MIT"
] | null | null | null |
1047.py
|
Valarr/Uri
|
807de771b14b0e60d44b23835ad9ee7423c83471
|
[
"MIT"
] | null | null | null |
1047.py
|
Valarr/Uri
|
807de771b14b0e60d44b23835ad9ee7423c83471
|
[
"MIT"
] | null | null | null |
a, d, c ,e = map(int, input().split())
tot = e-d
b = c-a
if(b<0):
b = 24+(b)
if(tot<0):
tot=60+(tot)
b-=1
if(a==c and d==e):
print("O JOGO DUROU 24 HORA(S) E 0 MINUTO(S)")
else:
print("O JOGO DUROU",b,"HORA(S) E",abs(tot),"MINUTO(S)")
| 19.846154
| 60
| 0.507752
|
937a19131003f6264a14fb427d28f70f038a7235
| 3,173
|
py
|
Python
|
utils/common.py
|
magicaltoast/reddit-attractiveness-dataset
|
4aa127cb64596b88577e06fe3f5e855423e1d83d
|
[
"MIT"
] | 2
|
2022-02-17T21:40:30.000Z
|
2022-02-18T00:52:37.000Z
|
utils/common.py
|
magicaltoast/reddit-attractiveness-dataset
|
4aa127cb64596b88577e06fe3f5e855423e1d83d
|
[
"MIT"
] | null | null | null |
utils/common.py
|
magicaltoast/reddit-attractiveness-dataset
|
4aa127cb64596b88577e06fe3f5e855423e1d83d
|
[
"MIT"
] | null | null | null |
from psaw import PushshiftAPI
from datetime import datetime
from queue import Queue
from tqdm.auto import tqdm
from threading import Thread
from praw import Reddit
import re
import requests
import numpy as np
import cv2
import numpy as np
import json
from pathlib import Path
from config import REDDIT_AUTH
img_ext = set([".png", ".jpg", ".jpeg"])
r = Reddit(
**REDDIT_AUTH
)
api = PushshiftAPI(r)
DONE = object()
def worker(queue, subreddit, limit, time_cache):
time_cache.mkdir(exist_ok=True)
file_path = time_cache / subreddit
if file_path.exists():
with open(file_path) as f:
lines = f.readlines()
before = int(lines[-2].strip())
print(before)
else:
before = int(datetime.now().timestamp())
with open(file_path, "a") as f:
while True:
for _ in range(10):
try:
posts = api.search_submissions(
before=before, subreddit=subreddit, limit=limit
)
break
except Exception as E:
print(E)
continue
first = next(posts, None)
if not first:
queue.put(DONE)
return
queue.put(first)
f.write(str(int(first.created_utc)) + "\n")
f.flush()
for post in posts:
queue.put(post)
last = post
before = int(last.created_utc)
def iter_posts(subreddit, queue_size, time_cache=Path("time-cache/")):
q = Queue(queue_size)
t = Thread(
target=worker,
args=[q, subreddit, 500, time_cache],
)
t.start()
p = tqdm(desc=subreddit)
while (item := q.get()) is not DONE:
yield item
p.update()
p.set_description(f"{subreddit} {datetime.fromtimestamp(item.created_utc)}")
def load_data(path):
if path.exists():
with open(path) as f:
return json.load(f)
return {}
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.float32):
return float(obj)
if isinstance(obj, np.int64):
return int(obj)
return json.JSONEncoder.default(self, obj)
def save_data(path, obj):
with open(path, "w") as f:
json.dump(obj, f, cls=NumpyEncoder)
def download_img_from_url(url):
r = requests.get(url, stream=True).raw
img = np.asarray(bytearray(r.read()), np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
if img is not None:
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def download_single_img(post):
if Path(post.url).suffix not in img_ext:
return
return download_img_from_url(post.url)
def download_multiple_images(post):
image = download_single_img(post)
if image is not None:
return [(image, post.url)]
images = []
for media in getattr(post, "media_metadata", {}).values():
url = media["p"][-1]["u"]
images.append((download_img_from_url(url), url))
return images
| 24.221374
| 84
| 0.587142
|
62240657bebacdf2f10e51f406f51c5af92de47b
| 11,679
|
py
|
Python
|
src/oci/devops/models/update_oke_helm_chart_deploy_stage_details.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/devops/models/update_oke_helm_chart_deploy_stage_details.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/devops/models/update_oke_helm_chart_deploy_stage_details.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_deploy_stage_details import UpdateDeployStageDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateOkeHelmChartDeployStageDetails(UpdateDeployStageDetails):
"""
Specifies the Kubernetes cluster deployment stage.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateOkeHelmChartDeployStageDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.UpdateOkeHelmChartDeployStageDetails.deploy_stage_type` attribute
of this class is ``OKE_HELM_CHART_DEPLOYMENT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param description:
The value to assign to the description property of this UpdateOkeHelmChartDeployStageDetails.
:type description: str
:param display_name:
The value to assign to the display_name property of this UpdateOkeHelmChartDeployStageDetails.
:type display_name: str
:param deploy_stage_type:
The value to assign to the deploy_stage_type property of this UpdateOkeHelmChartDeployStageDetails.
:type deploy_stage_type: str
:param deploy_stage_predecessor_collection:
The value to assign to the deploy_stage_predecessor_collection property of this UpdateOkeHelmChartDeployStageDetails.
:type deploy_stage_predecessor_collection: oci.devops.models.DeployStagePredecessorCollection
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateOkeHelmChartDeployStageDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateOkeHelmChartDeployStageDetails.
:type defined_tags: dict(str, dict(str, object))
:param oke_cluster_deploy_environment_id:
The value to assign to the oke_cluster_deploy_environment_id property of this UpdateOkeHelmChartDeployStageDetails.
:type oke_cluster_deploy_environment_id: str
:param helm_chart_deploy_artifact_id:
The value to assign to the helm_chart_deploy_artifact_id property of this UpdateOkeHelmChartDeployStageDetails.
:type helm_chart_deploy_artifact_id: str
:param values_artifact_ids:
The value to assign to the values_artifact_ids property of this UpdateOkeHelmChartDeployStageDetails.
:type values_artifact_ids: list[str]
:param release_name:
The value to assign to the release_name property of this UpdateOkeHelmChartDeployStageDetails.
:type release_name: str
:param namespace:
The value to assign to the namespace property of this UpdateOkeHelmChartDeployStageDetails.
:type namespace: str
:param timeout_in_seconds:
The value to assign to the timeout_in_seconds property of this UpdateOkeHelmChartDeployStageDetails.
:type timeout_in_seconds: int
:param rollback_policy:
The value to assign to the rollback_policy property of this UpdateOkeHelmChartDeployStageDetails.
:type rollback_policy: oci.devops.models.DeployStageRollbackPolicy
"""
self.swagger_types = {
'description': 'str',
'display_name': 'str',
'deploy_stage_type': 'str',
'deploy_stage_predecessor_collection': 'DeployStagePredecessorCollection',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'oke_cluster_deploy_environment_id': 'str',
'helm_chart_deploy_artifact_id': 'str',
'values_artifact_ids': 'list[str]',
'release_name': 'str',
'namespace': 'str',
'timeout_in_seconds': 'int',
'rollback_policy': 'DeployStageRollbackPolicy'
}
self.attribute_map = {
'description': 'description',
'display_name': 'displayName',
'deploy_stage_type': 'deployStageType',
'deploy_stage_predecessor_collection': 'deployStagePredecessorCollection',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'oke_cluster_deploy_environment_id': 'okeClusterDeployEnvironmentId',
'helm_chart_deploy_artifact_id': 'helmChartDeployArtifactId',
'values_artifact_ids': 'valuesArtifactIds',
'release_name': 'releaseName',
'namespace': 'namespace',
'timeout_in_seconds': 'timeoutInSeconds',
'rollback_policy': 'rollbackPolicy'
}
self._description = None
self._display_name = None
self._deploy_stage_type = None
self._deploy_stage_predecessor_collection = None
self._freeform_tags = None
self._defined_tags = None
self._oke_cluster_deploy_environment_id = None
self._helm_chart_deploy_artifact_id = None
self._values_artifact_ids = None
self._release_name = None
self._namespace = None
self._timeout_in_seconds = None
self._rollback_policy = None
self._deploy_stage_type = 'OKE_HELM_CHART_DEPLOYMENT'
@property
def oke_cluster_deploy_environment_id(self):
"""
Gets the oke_cluster_deploy_environment_id of this UpdateOkeHelmChartDeployStageDetails.
Kubernetes cluster environment OCID for deployment.
:return: The oke_cluster_deploy_environment_id of this UpdateOkeHelmChartDeployStageDetails.
:rtype: str
"""
return self._oke_cluster_deploy_environment_id
@oke_cluster_deploy_environment_id.setter
def oke_cluster_deploy_environment_id(self, oke_cluster_deploy_environment_id):
"""
Sets the oke_cluster_deploy_environment_id of this UpdateOkeHelmChartDeployStageDetails.
Kubernetes cluster environment OCID for deployment.
:param oke_cluster_deploy_environment_id: The oke_cluster_deploy_environment_id of this UpdateOkeHelmChartDeployStageDetails.
:type: str
"""
self._oke_cluster_deploy_environment_id = oke_cluster_deploy_environment_id
@property
def helm_chart_deploy_artifact_id(self):
"""
Gets the helm_chart_deploy_artifact_id of this UpdateOkeHelmChartDeployStageDetails.
Helm chart artifact OCID.
:return: The helm_chart_deploy_artifact_id of this UpdateOkeHelmChartDeployStageDetails.
:rtype: str
"""
return self._helm_chart_deploy_artifact_id
@helm_chart_deploy_artifact_id.setter
def helm_chart_deploy_artifact_id(self, helm_chart_deploy_artifact_id):
"""
Sets the helm_chart_deploy_artifact_id of this UpdateOkeHelmChartDeployStageDetails.
Helm chart artifact OCID.
:param helm_chart_deploy_artifact_id: The helm_chart_deploy_artifact_id of this UpdateOkeHelmChartDeployStageDetails.
:type: str
"""
self._helm_chart_deploy_artifact_id = helm_chart_deploy_artifact_id
@property
def values_artifact_ids(self):
"""
Gets the values_artifact_ids of this UpdateOkeHelmChartDeployStageDetails.
List of values.yaml file artifact OCIDs.
:return: The values_artifact_ids of this UpdateOkeHelmChartDeployStageDetails.
:rtype: list[str]
"""
return self._values_artifact_ids
@values_artifact_ids.setter
def values_artifact_ids(self, values_artifact_ids):
"""
Sets the values_artifact_ids of this UpdateOkeHelmChartDeployStageDetails.
List of values.yaml file artifact OCIDs.
:param values_artifact_ids: The values_artifact_ids of this UpdateOkeHelmChartDeployStageDetails.
:type: list[str]
"""
self._values_artifact_ids = values_artifact_ids
@property
def release_name(self):
"""
Gets the release_name of this UpdateOkeHelmChartDeployStageDetails.
Name of the Helm chart release.
:return: The release_name of this UpdateOkeHelmChartDeployStageDetails.
:rtype: str
"""
return self._release_name
@release_name.setter
def release_name(self, release_name):
"""
Sets the release_name of this UpdateOkeHelmChartDeployStageDetails.
Name of the Helm chart release.
:param release_name: The release_name of this UpdateOkeHelmChartDeployStageDetails.
:type: str
"""
self._release_name = release_name
@property
def namespace(self):
"""
Gets the namespace of this UpdateOkeHelmChartDeployStageDetails.
Default namespace to be used for Kubernetes deployment when not specified in the manifest.
:return: The namespace of this UpdateOkeHelmChartDeployStageDetails.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this UpdateOkeHelmChartDeployStageDetails.
Default namespace to be used for Kubernetes deployment when not specified in the manifest.
:param namespace: The namespace of this UpdateOkeHelmChartDeployStageDetails.
:type: str
"""
self._namespace = namespace
@property
def timeout_in_seconds(self):
"""
Gets the timeout_in_seconds of this UpdateOkeHelmChartDeployStageDetails.
Time to wait for execution of a helm stage. Defaults to 300 seconds.
:return: The timeout_in_seconds of this UpdateOkeHelmChartDeployStageDetails.
:rtype: int
"""
return self._timeout_in_seconds
@timeout_in_seconds.setter
def timeout_in_seconds(self, timeout_in_seconds):
"""
Sets the timeout_in_seconds of this UpdateOkeHelmChartDeployStageDetails.
Time to wait for execution of a helm stage. Defaults to 300 seconds.
:param timeout_in_seconds: The timeout_in_seconds of this UpdateOkeHelmChartDeployStageDetails.
:type: int
"""
self._timeout_in_seconds = timeout_in_seconds
@property
def rollback_policy(self):
"""
Gets the rollback_policy of this UpdateOkeHelmChartDeployStageDetails.
:return: The rollback_policy of this UpdateOkeHelmChartDeployStageDetails.
:rtype: oci.devops.models.DeployStageRollbackPolicy
"""
return self._rollback_policy
@rollback_policy.setter
def rollback_policy(self, rollback_policy):
"""
Sets the rollback_policy of this UpdateOkeHelmChartDeployStageDetails.
:param rollback_policy: The rollback_policy of this UpdateOkeHelmChartDeployStageDetails.
:type: oci.devops.models.DeployStageRollbackPolicy
"""
self._rollback_policy = rollback_policy
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 39.323232
| 245
| 0.706567
|
b46c7dcf3d3c5ddbba90e21666ffbac8af4a2f4e
| 74
|
py
|
Python
|
celloracle_streamlit/streamlit_perturb_simulation_app/__init__.py
|
morris-lab/CellOracle_streamlit
|
bf784215cafc5f826a9457def897dc22526bad8c
|
[
"Apache-2.0"
] | null | null | null |
celloracle_streamlit/streamlit_perturb_simulation_app/__init__.py
|
morris-lab/CellOracle_streamlit
|
bf784215cafc5f826a9457def897dc22526bad8c
|
[
"Apache-2.0"
] | null | null | null |
celloracle_streamlit/streamlit_perturb_simulation_app/__init__.py
|
morris-lab/CellOracle_streamlit
|
bf784215cafc5f826a9457def897dc22526bad8c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#from . import motif_analysis
__all__ = []
| 6.727273
| 29
| 0.554054
|
03cfdffcf6e5e07138d772292a9527dab0bf3e14
| 3,395
|
py
|
Python
|
examples/scripts/image_classification/run_enas.py
|
FeynmanDNA/singa-auto
|
e96982adc689335a323a5a32d03b23942e01d09f
|
[
"Apache-2.0"
] | 1
|
2020-05-11T02:19:47.000Z
|
2020-05-11T02:19:47.000Z
|
examples/scripts/image_classification/run_enas.py
|
FeynmanDNA/singa-auto
|
e96982adc689335a323a5a32d03b23942e01d09f
|
[
"Apache-2.0"
] | null | null | null |
examples/scripts/image_classification/run_enas.py
|
FeynmanDNA/singa-auto
|
e96982adc689335a323a5a32d03b23942e01d09f
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import argparse
from pprint import pprint
from singa_auto.client import Client
from singa_auto.config import SUPERADMIN_EMAIL
from singa_auto.constants import BudgetOption, ModelDependency
from examples.scripts.utils import gen_id
from examples.datasets.image_files.load_cifar10 import load_cifar10
def run_enas(client, train_dataset_path, val_dataset_path, gpus, hours):
'''
Conducts training of model `TfEnas` on the CIFAR-10 dataset for IMAGE_CLASSIFICATION.
Demonstrates architecture tuning with ENAS on SINGA-Auto.
'''
task = 'IMAGE_CLASSIFICATION'
app_id = gen_id()
app = 'cifar10_enas_{}'.format(app_id)
model_name = 'TfEnas_{}'.format(app_id)
print('Preprocessing datasets...')
load_cifar10(train_dataset_path, val_dataset_path)
print('Creating & uploading datasets onto SINGA-Auto...')
train_dataset = client.create_dataset('{}_train'.format(app), task, train_dataset_path)
pprint(train_dataset)
val_dataset = client.create_dataset('{}_val'.format(app), task, val_dataset_path)
pprint(val_dataset)
print('Creating model...')
model = client.create_model(
name=model_name,
task='IMAGE_CLASSIFICATION',
model_file_path='examples/models/image_classification/TfEnas.py',
model_class='TfEnas',
dependencies={ModelDependency.TENSORFLOW: '1.12.0'}
)
pprint(model)
print('Creating train job...')
budget = {
BudgetOption.TIME_HOURS: hours,
BudgetOption.GPU_COUNT: gpus
}
train_job = client.create_train_job(app, task, train_dataset['id'], val_dataset['id'], budget, models=[model['id']])
pprint(train_job)
print('Monitor the train job on SINGA-Auto Web Admin')
# TODO: Evaluate on test dataset?
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--email', type=str, default=SUPERADMIN_EMAIL, help='Email of user')
parser.add_argument('--password', type=str, default=os.environ.get('SUPERADMIN_PASSWORD'), help='Password of user')
parser.add_argument('--gpus', type=int, default=0, help='How many GPUs to use')
parser.add_argument('--hours', type=float, default=24, help='How long the train job should run for (in hours)')
out_train_dataset_path = 'data/cifar10_train.zip'
out_val_dataset_path = 'data/cifar10_val.zip'
(args, _) = parser.parse_known_args()
# Initialize client
client = Client()
client.login(email=args.email, password=args.password)
run_enas(client, out_train_dataset_path, out_val_dataset_path, args.gpus, args.hours)
| 38.579545
| 120
| 0.731075
|
220fd3ec782e50072ef214b833c5317f2f6d038a
| 22,865
|
py
|
Python
|
mindspore/python/mindspore/dataset/text/validators.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 1
|
2021-12-27T13:42:29.000Z
|
2021-12-27T13:42:29.000Z
|
mindspore/python/mindspore/dataset/text/validators.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | null | null | null |
mindspore/python/mindspore/dataset/text/validators.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
validators for text ops
"""
from functools import wraps
import mindspore._c_dataengine as cde
import mindspore.common.dtype as mstype
from mindspore._c_expression import typing
import mindspore.dataset.text as text
from ..core.validator_helpers import parse_user_args, type_check, type_check_list, check_uint32, \
INT32_MAX, check_value, check_positive, check_pos_int32, check_filename, check_non_negative_int32
def check_unique_list_of_words(words, arg_name):
"""Check that words is a list and each element is a str without any duplication"""
type_check(words, (list,), arg_name)
words_set = set()
for word in words:
type_check(word, (str,), arg_name)
if word in words_set:
raise ValueError(arg_name + " contains duplicate word: " + word + ".")
words_set.add(word)
return words_set
def check_lookup(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[vocab, unknown_token, data_type], _ = parse_user_args(method, *args, **kwargs)
if unknown_token is not None:
type_check(unknown_token, (str,), "unknown_token")
type_check(vocab, (text.Vocab,), "vocab is not an instance of text.Vocab.")
type_check(vocab.c_vocab, (cde.Vocab,), "vocab.c_vocab is not an instance of cde.Vocab.")
type_check(data_type, (typing.Type,), "data_type")
return method(self, *args, **kwargs)
return new_method
def check_from_file(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[file_path, delimiter, vocab_size, special_tokens, special_first], _ = parse_user_args(method, *args,
**kwargs)
if special_tokens is not None:
check_unique_list_of_words(special_tokens, "special_tokens")
type_check_list([file_path, delimiter], (str,), ["file_path", "delimiter"])
if vocab_size is not None:
check_positive(vocab_size, "vocab_size")
type_check(special_first, (bool,), special_first)
return method(self, *args, **kwargs)
return new_method
def check_vocab(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[vocab], _ = parse_user_args(method, *args, **kwargs)
if not isinstance(vocab, cde.Vocab):
type_error = "Input vocab is not an instance of cde.Vocab, got type {0}. ".format(type(vocab))
suggestion = "Use Vocab.from_dataset(), Vocab.from_list(), Vocab.from_file() or Vocab.from_dict() " \
"to build a vocab."
raise TypeError(type_error + suggestion)
return method(self, *args, **kwargs)
return new_method
def check_tokens_to_ids(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[tokens], _ = parse_user_args(method, *args, **kwargs)
type_check(tokens, (str, list), "tokens")
if isinstance(tokens, list):
param_names = ["tokens[{0}]".format(i) for i in range(len(tokens))]
type_check_list(tokens, (str,), param_names)
return method(self, *args, **kwargs)
return new_method
def check_ids_to_tokens(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[ids], _ = parse_user_args(method, *args, **kwargs)
type_check(ids, (int, list), "ids")
if isinstance(ids, list):
param_names = ["ids[{0}]".format(i) for i in range(len(ids))]
type_check_list(ids, (int,), param_names)
return method(self, *args, **kwargs)
return new_method
def check_from_list(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[word_list, special_tokens, special_first], _ = parse_user_args(method, *args, **kwargs)
word_set = check_unique_list_of_words(word_list, "word_list")
if special_tokens is not None:
token_set = check_unique_list_of_words(special_tokens, "special_tokens")
intersect = word_set.intersection(token_set)
if intersect != set():
raise ValueError("special_tokens and word_list contain duplicate word :" + str(intersect) + ".")
type_check(special_first, (bool,), "special_first")
return method(self, *args, **kwargs)
return new_method
def check_from_dict(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[word_dict], _ = parse_user_args(method, *args, **kwargs)
type_check(word_dict, (dict,), "word_dict")
for word, word_id in word_dict.items():
type_check(word, (str,), "word")
type_check(word_id, (int,), "word_id")
check_value(word_id, (0, INT32_MAX), "word_id")
return method(self, *args, **kwargs)
return new_method
def check_jieba_init(method):
"""Wrapper method to check the parameters of jieba init."""
@wraps(method)
def new_method(self, *args, **kwargs):
[hmm_path, mp_path, _, with_offsets], _ = parse_user_args(method, *args, **kwargs)
if hmm_path is None:
raise ValueError("The dict of HMMSegment in cppjieba is not provided.")
if not isinstance(hmm_path, str):
raise TypeError("Wrong input type for hmm_path, should be string.")
if mp_path is None:
raise ValueError("The dict of MPSegment in cppjieba is not provided.")
if not isinstance(mp_path, str):
raise TypeError("Wrong input type for mp_path, should be string.")
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
return method(self, *args, **kwargs)
return new_method
def check_jieba_add_word(method):
"""Wrapper method to check the parameters of jieba add word."""
@wraps(method)
def new_method(self, *args, **kwargs):
[word, freq], _ = parse_user_args(method, *args, **kwargs)
if word is None:
raise ValueError("word is not provided.")
if freq is not None:
check_uint32(freq)
return method(self, *args, **kwargs)
return new_method
def check_jieba_add_dict(method):
"""Wrapper method to check the parameters of add dict."""
@wraps(method)
def new_method(self, *args, **kwargs):
parse_user_args(method, *args, **kwargs)
return method(self, *args, **kwargs)
return new_method
def check_with_offsets(method):
"""Wrapper method to check if with_offsets is the only one parameter."""
@wraps(method)
def new_method(self, *args, **kwargs):
[with_offsets], _ = parse_user_args(method, *args, **kwargs)
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
return method(self, *args, **kwargs)
return new_method
def check_unicode_script_tokenizer(method):
"""Wrapper method to check the parameter of UnicodeScriptTokenizer."""
@wraps(method)
def new_method(self, *args, **kwargs):
[keep_whitespace, with_offsets], _ = parse_user_args(method, *args, **kwargs)
if not isinstance(keep_whitespace, bool):
raise TypeError("Wrong input type for keep_whitespace, should be boolean.")
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
return method(self, *args, **kwargs)
return new_method
def check_wordpiece_tokenizer(method):
"""Wrapper method to check the parameter of WordpieceTokenizer."""
@wraps(method)
def new_method(self, *args, **kwargs):
[vocab, suffix_indicator, max_bytes_per_token, unknown_token, with_offsets], _ = \
parse_user_args(method, *args, **kwargs)
if vocab is None:
raise ValueError("vocab is not provided.")
if not isinstance(vocab, text.Vocab):
raise TypeError("Wrong input type for vocab, should be text.Vocab object.")
if not isinstance(suffix_indicator, str):
raise TypeError("Wrong input type for suffix_indicator, should be string.")
if not isinstance(unknown_token, str):
raise TypeError("Wrong input type for unknown_token, should be string.")
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
check_uint32(max_bytes_per_token)
return method(self, *args, **kwargs)
return new_method
def check_regex_replace(method):
"""Wrapper method to check the parameter of RegexReplace."""
@wraps(method)
def new_method(self, *args, **kwargs):
[pattern, replace, replace_all], _ = parse_user_args(method, *args, **kwargs)
type_check(pattern, (str,), "pattern")
type_check(replace, (str,), "replace")
type_check(replace_all, (bool,), "replace_all")
return method(self, *args, **kwargs)
return new_method
def check_regex_tokenizer(method):
"""Wrapper method to check the parameter of RegexTokenizer."""
@wraps(method)
def new_method(self, *args, **kwargs):
[delim_pattern, keep_delim_pattern, with_offsets], _ = parse_user_args(method, *args, **kwargs)
if delim_pattern is None:
raise ValueError("delim_pattern is not provided.")
if not isinstance(delim_pattern, str):
raise TypeError("Wrong input type for delim_pattern, should be string.")
if not isinstance(keep_delim_pattern, str):
raise TypeError("Wrong input type for keep_delim_pattern, should be string.")
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
return method(self, *args, **kwargs)
return new_method
def check_basic_tokenizer(method):
"""Wrapper method to check the parameter of RegexTokenizer."""
@wraps(method)
def new_method(self, *args, **kwargs):
[lower_case, keep_whitespace, _, preserve_unused, with_offsets], _ = \
parse_user_args(method, *args, **kwargs)
if not isinstance(lower_case, bool):
raise TypeError("Wrong input type for lower_case, should be boolean.")
if not isinstance(keep_whitespace, bool):
raise TypeError("Wrong input type for keep_whitespace, should be boolean.")
if not isinstance(preserve_unused, bool):
raise TypeError("Wrong input type for preserve_unused_token, should be boolean.")
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
return method(self, *args, **kwargs)
return new_method
def check_bert_tokenizer(method):
"""Wrapper method to check the parameter of BertTokenizer."""
@wraps(method)
def new_method(self, *args, **kwargs):
[vocab, suffix_indicator, max_bytes_per_token, unknown_token, lower_case, keep_whitespace, _,
preserve_unused_token, with_offsets], _ = parse_user_args(method, *args, **kwargs)
if vocab is None:
raise ValueError("vacab is not provided.")
if not isinstance(vocab, text.Vocab):
raise TypeError("Wrong input type for vocab, should be text.Vocab object.")
if not isinstance(suffix_indicator, str):
raise TypeError("Wrong input type for suffix_indicator, should be string.")
if not isinstance(max_bytes_per_token, int):
raise TypeError("Wrong input type for max_bytes_per_token, should be int.")
check_uint32(max_bytes_per_token)
if not isinstance(unknown_token, str):
raise TypeError("Wrong input type for unknown_token, should be string.")
if not isinstance(lower_case, bool):
raise TypeError("Wrong input type for lower_case, should be boolean.")
if not isinstance(keep_whitespace, bool):
raise TypeError("Wrong input type for keep_whitespace, should be boolean.")
if not isinstance(preserve_unused_token, bool):
raise TypeError("Wrong input type for preserve_unused_token, should be boolean.")
if not isinstance(with_offsets, bool):
raise TypeError("Wrong input type for with_offsets, should be boolean.")
return method(self, *args, **kwargs)
return new_method
def check_from_dataset(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[_, columns, freq_range, top_k, special_tokens, special_first], _ = parse_user_args(method, *args,
**kwargs)
if columns is not None:
if not isinstance(columns, list):
columns = [columns]
type_check_list(columns, (str,), "col")
if freq_range is not None:
type_check(freq_range, (tuple,), "freq_range")
if len(freq_range) != 2:
raise ValueError("freq_range needs to be a tuple of 2 element.")
for num in freq_range:
if num is not None and (not isinstance(num, int)):
raise ValueError(
"freq_range needs to be either None or a tuple of 2 integers or an int and a None.")
if isinstance(freq_range[0], int) and isinstance(freq_range[1], int):
if freq_range[0] > freq_range[1] or freq_range[0] < 0:
raise ValueError("frequency range [a,b] should be 0 <= a <= b (a,b are inclusive).")
type_check(top_k, (int, type(None)), "top_k")
if isinstance(top_k, int):
check_positive(top_k, "top_k")
type_check(special_first, (bool,), "special_first")
if special_tokens is not None:
check_unique_list_of_words(special_tokens, "special_tokens")
return method(self, *args, **kwargs)
return new_method
def check_slidingwindow(method):
"""A wrapper that wraps a parameter checker to the original function(sliding window operation)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[width, axis], _ = parse_user_args(method, *args, **kwargs)
check_pos_int32(width, "width")
type_check(axis, (int,), "axis")
return method(self, *args, **kwargs)
return new_method
def check_ngram(method):
"""A wrapper that wraps a parameter checker to the original function."""
@wraps(method)
def new_method(self, *args, **kwargs):
[n, left_pad, right_pad, separator], _ = parse_user_args(method, *args, **kwargs)
if isinstance(n, int):
n = [n]
if not (isinstance(n, list) and n != []):
raise ValueError("n needs to be a non-empty list of positive integers.")
for i, gram in enumerate(n):
type_check(gram, (int,), "gram[{0}]".format(i))
check_positive(gram, "gram_{}".format(i))
if not (isinstance(left_pad, tuple) and len(left_pad) == 2 and isinstance(left_pad[0], str) and isinstance(
left_pad[1], int)):
raise ValueError("left_pad needs to be a tuple of (str, int) str is pad token and int is pad_width.")
if not (isinstance(right_pad, tuple) and len(right_pad) == 2 and isinstance(right_pad[0], str) and isinstance(
right_pad[1], int)):
raise ValueError("right_pad needs to be a tuple of (str, int) str is pad token and int is pad_width.")
if not (left_pad[1] >= 0 and right_pad[1] >= 0):
raise ValueError("padding width need to be positive numbers.")
type_check(separator, (str,), "separator")
kwargs["n"] = n
kwargs["left_pad"] = left_pad
kwargs["right_pad"] = right_pad
kwargs["separator"] = separator
return method(self, **kwargs)
return new_method
def check_pair_truncate(method):
"""Wrapper method to check the parameters of number of pair truncate."""
@wraps(method)
def new_method(self, *args, **kwargs):
parse_user_args(method, *args, **kwargs)
return method(self, *args, **kwargs)
return new_method
def check_to_number(method):
"""A wrapper that wraps a parameter check to the original function (ToNumber)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[data_type], _ = parse_user_args(method, *args, **kwargs)
type_check(data_type, (typing.Type,), "data_type")
if data_type not in mstype.number_type:
raise TypeError("data_type: " + str(data_type) + " is not numeric data type.")
return method(self, *args, **kwargs)
return new_method
def check_python_tokenizer(method):
"""A wrapper that wraps a parameter check to the original function (PythonTokenizer)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[tokenizer], _ = parse_user_args(method, *args, **kwargs)
if not callable(tokenizer):
raise TypeError("tokenizer is not a callable Python function.")
return method(self, *args, **kwargs)
return new_method
def check_from_dataset_sentencepiece(method):
"""A wrapper that wraps a parameter checker to the original function (from_dataset)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[_, col_names, vocab_size, character_coverage, model_type, params], _ = parse_user_args(method, *args, **kwargs)
if col_names is not None:
type_check_list(col_names, (str,), "col_names")
if vocab_size is not None:
check_uint32(vocab_size, "vocab_size")
else:
raise TypeError("vocab_size must be provided.")
if character_coverage is not None:
type_check(character_coverage, (float,), "character_coverage")
if model_type is not None:
from .utils import SentencePieceModel
type_check(model_type, (str, SentencePieceModel), "model_type")
if params is not None:
type_check(params, (dict,), "params")
return method(self, *args, **kwargs)
return new_method
def check_from_file_sentencepiece(method):
"""A wrapper that wraps a parameter checker to the original function (from_file)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[file_path, vocab_size, character_coverage, model_type, params], _ = parse_user_args(method, *args, **kwargs)
if file_path is not None:
type_check(file_path, (list,), "file_path")
if vocab_size is not None:
check_uint32(vocab_size, "vocab_size")
if character_coverage is not None:
type_check(character_coverage, (float,), "character_coverage")
if model_type is not None:
from .utils import SentencePieceModel
type_check(model_type, (str, SentencePieceModel), "model_type")
if params is not None:
type_check(params, (dict,), "params")
return method(self, *args, **kwargs)
return new_method
def check_save_model(method):
"""A wrapper that wraps a parameter checker to the original function (save_model)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[vocab, path, filename], _ = parse_user_args(method, *args, **kwargs)
if vocab is not None:
type_check(vocab, (cde.SentencePieceVocab,), "vocab")
if path is not None:
type_check(path, (str,), "path")
if filename is not None:
type_check(filename, (str,), "filename")
return method(self, *args, **kwargs)
return new_method
def check_sentence_piece_tokenizer(method):
"""A wrapper that wraps a parameter checker to the original function."""
from .utils import SPieceTokenizerOutType
@wraps(method)
def new_method(self, *args, **kwargs):
[mode, out_type], _ = parse_user_args(method, *args, **kwargs)
type_check(mode, (str, cde.SentencePieceVocab), "mode is not an instance of str or cde.SentencePieceVocab.")
type_check(out_type, (SPieceTokenizerOutType,), "out_type is not an instance of SPieceTokenizerOutType")
return method(self, *args, **kwargs)
return new_method
def check_from_file_vectors(method):
"""A wrapper that wraps a parameter checker to from_file of class Vectors."""
@wraps(method)
def new_method(self, *args, **kwargs):
[file_path, max_vectors], _ = parse_user_args(method, *args, **kwargs)
type_check(file_path, (str,), "file_path")
check_filename(file_path)
if max_vectors is not None:
type_check(max_vectors, (int,), "max_vectors")
check_non_negative_int32(max_vectors, "max_vectors")
return method(self, *args, **kwargs)
return new_method
def check_to_vectors(method):
"""A wrapper that wraps a parameter checker to ToVectors."""
@wraps(method)
def new_method(self, *args, **kwargs):
[vectors, unk_init, lower_case_backup], _ = parse_user_args(method, *args, **kwargs)
type_check(vectors, (cde.Vectors,), "vectors")
if unk_init is not None:
type_check(unk_init, (list, tuple), "unk_init")
for i, value in enumerate(unk_init):
type_check(value, (int, float), "unk_init[{0}]".format(i))
type_check(lower_case_backup, (bool,), "lower_case_backup")
return method(self, *args, **kwargs)
return new_method
| 36.819646
| 120
| 0.645834
|
ef88e9106c9629a8f3b5c709a97328e073057876
| 267
|
py
|
Python
|
ovm/drivers/driver.py
|
lightcode/OVM
|
3c6c3528ef851f65d4bd75cafb8738c54fba7b6f
|
[
"MIT"
] | 1
|
2018-03-20T14:54:10.000Z
|
2018-03-20T14:54:10.000Z
|
ovm/drivers/driver.py
|
lightcode/OVM
|
3c6c3528ef851f65d4bd75cafb8738c54fba7b6f
|
[
"MIT"
] | null | null | null |
ovm/drivers/driver.py
|
lightcode/OVM
|
3c6c3528ef851f65d4bd75cafb8738c54fba7b6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from six import with_metaclass
from abc import ABCMeta
class Driver(with_metaclass(ABCMeta)):
def __init__(self):
self._params = {}
def set_params(self, **kargs):
self._params.update(kargs)
| 16.6875
| 38
| 0.655431
|
43391eb4478e68b79d333eb94c4a69b27598c657
| 746
|
py
|
Python
|
ci/fetch_pysteps_data.py
|
alexanderhucheerful/pysteps
|
efa52506bb9aff5076dc52311b72df872b09045f
|
[
"BSD-3-Clause"
] | 1
|
2021-08-25T03:07:07.000Z
|
2021-08-25T03:07:07.000Z
|
ci/fetch_pysteps_data.py
|
alexanderhucheerful/pysteps
|
efa52506bb9aff5076dc52311b72df872b09045f
|
[
"BSD-3-Clause"
] | null | null | null |
ci/fetch_pysteps_data.py
|
alexanderhucheerful/pysteps
|
efa52506bb9aff5076dc52311b72df872b09045f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Script used to install the pysteps data in a test environment and set a pystepsrc
configuration file that points to that data.
The test data is downloaded in the `PYSTEPS_DATA_DIR` environmental variable.
After this script is run, the `PYSTEPSRC` environmental variable should be set to
PYSTEPSRC=$PYSTEPS_DATA_DIR/pystepsrc for pysteps to use that configuration file.
"""
import os
from pysteps.datasets import create_default_pystepsrc, download_pysteps_data
tox_test_data_dir = os.environ['PYSTEPS_DATA_DIR']
download_pysteps_data(tox_test_data_dir, force=True)
create_default_pystepsrc(tox_test_data_dir,
config_dir=tox_test_data_dir,
file_name="pystepsrc")
| 32.434783
| 81
| 0.761394
|
682e16267f2664af558799467d83e69ff7e6a9a4
| 56,323
|
py
|
Python
|
test/serde/serde_helpers.py
|
ntuyoyo0/PySyft
|
15a0609e95d529ddbb33aa8199ba64cc645a9f8b
|
[
"Apache-2.0"
] | 1
|
2020-08-14T05:46:52.000Z
|
2020-08-14T05:46:52.000Z
|
test/serde/serde_helpers.py
|
ntuyoyo0/PySyft
|
15a0609e95d529ddbb33aa8199ba64cc645a9f8b
|
[
"Apache-2.0"
] | null | null | null |
test/serde/serde_helpers.py
|
ntuyoyo0/PySyft
|
15a0609e95d529ddbb33aa8199ba64cc645a9f8b
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
import pytest
import numpy
import torch
from functools import partial
import traceback
import io
import syft
from syft.serde import msgpack
# Make dict of type codes
CODE = OrderedDict()
for cls, simplifier in msgpack.serde.simplifiers.items():
CODE[cls] = simplifier[0]
FORCED_CODE = OrderedDict()
for cls, simplifier in msgpack.serde.forced_full_simplifiers.items():
FORCED_CODE[cls] = simplifier[0]
########################################################################
# Functions that return list of serde samples in the following format:
# [
# {
# "value": original_value,
# "simplified": simplified_value,
# "cmp_detailed": custom_detailed_values_comparison_function, # optional
# "cmp_simplified": custom_simplified_values_comparison_function, # optional
# "framework": None or torch, # optional, affects tensor serialization strategy
# "forced": (bool), # optional, enables forced full simplification
# },
# ...
# ]
########################################################################
########################################################################
# Native types.
########################################################################
# None
def make_none(**kwargs):
return [{"value": None}]
# Dict.
def make_dict(**kwargs):
return [
{
"value": {1: "hello", 2: "world"},
"simplified": (
CODE[dict],
(
(1, (CODE[str], (b"hello",))), # [not simplified tuple] # key # value
(2, (CODE[str], (b"world",))),
),
),
},
{
"value": {"hello": "world"},
"simplified": (
CODE[dict],
(
( # [not simplified tuple]
(CODE[str], (b"hello",)), # key
(CODE[str], (b"world",)), # value
),
),
),
},
{"value": {}, "simplified": (CODE[dict], tuple())},
]
# List.
def make_list(**kwargs):
return [
{
"value": ["hello", "world"],
"simplified": (
CODE[list],
((CODE[str], (b"hello",)), (CODE[str], (b"world",))), # item
),
},
{"value": ["hello"], "simplified": (CODE[list], ((CODE[str], (b"hello",)),))}, # item
{"value": [], "simplified": (CODE[list], tuple())},
# Tests that forced full simplify should return just simplified object if it doesn't have full simplifier
{
"forced": True,
"value": ["hello"],
"simplified": (CODE[list], ((CODE[str], (b"hello",)),)), # item
},
]
# Tuple.
def make_tuple(**kwargs):
return [
{
"value": ("hello", "world"),
"simplified": (CODE[tuple], ((CODE[str], (b"hello",)), (CODE[str], (b"world",)))),
},
{"value": ("hello",), "simplified": (CODE[tuple], ((CODE[str], (b"hello",)),))},
{"value": tuple(), "simplified": (CODE[tuple], tuple())},
]
# Set.
def make_set(**kwargs):
def compare_simplified(actual, expected):
"""When set is simplified and converted to tuple, elements order in tuple is random
We compare tuples as sets because the set order is undefined"""
assert actual[0] == expected[0]
assert set(actual[1]) == set(expected[1])
return True
return [
{
"value": {"hello", "world"},
"simplified": (CODE[set], ((CODE[str], (b"world",)), (CODE[str], (b"hello",)))),
"cmp_simplified": compare_simplified,
},
{"value": {"hello"}, "simplified": (CODE[set], ((CODE[str], (b"hello",)),))},
{"value": set([]), "simplified": (CODE[set], tuple())},
]
# Slice.
def make_slice(**kwargs):
return [
{"value": slice(10, 20, 30), "simplified": (CODE[slice], (10, 20, 30))},
{"value": slice(10, 20), "simplified": (CODE[slice], (10, 20, None))},
{"value": slice(10), "simplified": (CODE[slice], (None, 10, None))},
]
# Range.
def make_range(**kwargs):
return [
{"value": range(1, 3, 4), "simplified": (CODE[range], (1, 3, 4))},
{"value": range(1, 3), "simplified": (CODE[range], (1, 3, 1))},
]
# String.
def make_str(**kwargs):
return [
{"value": "a string", "simplified": (CODE[str], (b"a string",))},
{"value": "", "simplified": (CODE[str], (b"",))},
]
# Int.
def make_int(**kwargs):
return [
{"value": 5, "simplified": 5},
# Tests that forced full simplify should return just simplified object if it doesn't have full simplifier
{"forced": True, "value": 5, "simplified": 5},
]
# Float.
def make_float(**kwargs):
return [{"value": 5.1, "simplified": 5.1}]
# Ellipsis.
def make_ellipsis(**kwargs):
return [{"value": ..., "simplified": (CODE[type(Ellipsis)], (b"",))}]
########################################################################
# Numpy.
########################################################################
# numpy.ndarray
def make_numpy_ndarray(**kwargs):
np_array = numpy.random.random((2, 2))
def compare(detailed, original):
"""Compare numpy arrays"""
assert numpy.array_equal(detailed, original)
return True
return [
{
"value": np_array,
"simplified": (
CODE[type(np_array)],
(
np_array.tobytes(), # (bytes) serialized bin
(CODE[tuple], (2, 2)), # (tuple) shape
(CODE[str], (b"float64",)), # (str) dtype.name
),
),
"cmp_detailed": compare,
}
]
# numpy.float32, numpy.float64, numpy.int32, numpy.int64
def make_numpy_number(dtype, **kwargs):
num = numpy.array([2.2], dtype=dtype)[0]
return [
{
"value": num,
"simplified": (
CODE[dtype],
(
num.tobytes(), # (bytes)
(CODE[str], (num.dtype.name.encode("utf-8"),)), # (str) dtype.name
),
),
}
]
########################################################################
# PyTorch.
########################################################################
# Utility functions.
def compare_modules(detailed, original):
"""Compare ScriptModule instances"""
input = torch.randn(10, 3)
# NOTE: after serde TopLevelTracedModule or jit.ScriptFunction become
# ScriptModule (that's what torch.jit.load returns in detail function)
assert isinstance(detailed, torch.jit.ScriptModule)
# Code changes after torch.jit.load(): function becomes `forward` method
if type(original) != torch.jit.ScriptFunction:
assert detailed.code == original.code
# model outputs match
assert detailed(input).equal(original(input))
return True
def save_to_buffer(tensor) -> bin:
"""Serializes a pytorch tensor to binary"""
binary_stream = io.BytesIO()
torch.save(tensor, binary_stream)
return binary_stream.getvalue()
# torch.device
def make_torch_device(**kwargs):
torch_device = torch.device("cpu")
return [
{
"value": torch_device,
"simplified": (CODE[type(torch_device)], ((CODE[str], (b"cpu",)),)), # (str) device
}
]
# torch.jit.ScriptModule
def make_torch_scriptmodule(**kwargs):
class ScriptModule(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModule, self).__init__()
@torch.jit.script_method
def forward(self, x): # pragma: no cover
return x + 2
sm = ScriptModule()
return [
{
"value": sm,
"simplified": (
CODE[torch.jit.ScriptModule],
(sm.save_to_buffer(),), # (bytes) serialized torchscript
),
"cmp_detailed": compare_modules,
}
]
# torch.jit.ScriptFunction
def make_torch_scriptfunction(**kwargs):
@torch.jit.script
def func(x): # pragma: no cover
return x + 2
return [
{
"value": func,
"simplified": (
CODE[torch.jit.ScriptFunction],
(func.save_to_buffer(),), # (bytes) serialized torchscript
),
"cmp_detailed": compare_modules,
}
]
# torch.memory_format
def make_torch_memoryformat(**kwargs):
memory_format = torch.preserve_format
return [{"value": memory_format, "simplified": (CODE[torch.memory_format], 3)}]
# torch.jit.TopLevelTracedModule
# NOTE: if the model is created inside the function, it will be serialized differently depending on the context
class TopLevelTraceModel(torch.nn.Module):
def __init__(self):
super(TopLevelTraceModel, self).__init__()
self.w1 = torch.nn.Parameter(torch.randn(3, 1), requires_grad=True)
self.b1 = torch.nn.Parameter(torch.randn(1), requires_grad=True)
def forward(self, x):
x = x @ self.w1 + self.b1
return x
topLevelTraceModel = TopLevelTraceModel()
def make_torch_topleveltracedmodule(**kwargs):
tm = torch.jit.trace(topLevelTraceModel, torch.randn(10, 3))
return [
{
"value": tm,
"simplified": (
CODE[torch.jit.TopLevelTracedModule],
(tm.save_to_buffer(),), # (bytes) serialized torchscript
),
"cmp_detailed": compare_modules,
}
]
# torch.nn.parameter.Parameter
def make_torch_parameter(**kwargs):
param = torch.nn.Parameter(torch.randn(3, 3), requires_grad=True)
def compare(detailed, original):
assert type(detailed) == torch.nn.Parameter
assert detailed.data.equal(original.data)
assert detailed.id == original.id
assert detailed.requires_grad == original.requires_grad
return True
return [
{
"value": param,
"simplified": (
CODE[torch.nn.Parameter],
(
param.id, # (int) id
msgpack.serde._simplify(syft.hook.local_worker, param.data), # (Tensor) data
param.requires_grad, # (bool) requires_grad
None,
),
),
"cmp_detailed": compare,
}
]
# torch.Tensor
def make_torch_tensor(**kwargs):
tensor = torch.randn(3, 3)
tensor.tag("tag1")
tensor.describe("desc")
def compare(detailed, original):
assert type(detailed) == torch.Tensor
assert detailed.data.equal(original.data)
assert detailed.id == original.id
assert detailed.requires_grad == original.requires_grad
assert detailed.tags == original.tags
assert detailed.description == original.description
return True
return [
# Default pytorch tensor serialization strategy
{
"value": tensor,
"simplified": (
CODE[torch.Tensor],
(
tensor.id, # (int) id
save_to_buffer(tensor), # (bytes) serialized tensor
None, # (AbstractTensor) chain
None, # (AbstractTensor) grad_chain
(CODE[set], ((CODE[str], (b"tag1",)),)), # (set of str) tags
(CODE[str], (b"desc",)), # (str) description
(CODE[str], (b"torch",)), # (str) framework
),
),
"cmp_detailed": compare,
},
# "All" tensor serialization strategy
{
"framework": None,
"value": tensor,
"simplified": (
CODE[torch.Tensor],
(
tensor.id, # (int) id
(
CODE[tuple],
( # serialized tensor
(CODE[tuple], (3, 3)), # tensor.shape
(CODE[str], (b"float32",)), # tensor.dtype
(
CODE[list],
tuple(tensor.flatten().tolist()),
), # tensor contents as flat list
),
),
None, # (AbstractTensor) chain
None, # (AbstractTensor) grad_chain
(CODE[set], ((CODE[str], (b"tag1",)),)), # (set of str) tags
(CODE[str], (b"desc",)), # (str) description
(CODE[str], (b"all",)), # (str) framework
),
),
"cmp_detailed": compare,
},
]
# torch.Size
def make_torch_size(**kwargs):
return [
{
"value": torch.randn(3, 3).size(),
"simplified": (CODE[torch.Size], (3, 3)), # (int) *shape
}
]
########################################################################
# PySyft.
########################################################################
# Utility functions
def compare_operations(detailed, original):
"""Compare 2 Operation's"""
assert len(detailed) == len(original)
for i, detailed_op in enumerate(detailed):
original_op = original[i]
compare_placeholders_list(original_op.cmd_args, detailed_op.cmd_args)
# return_ids is not a list (why?)
compare_placeholders_list([original_op.return_ids], [detailed_op.return_ids])
assert original_op.cmd_name == detailed_op.cmd_name
assert original_op.cmd_kwargs == detailed_op.cmd_kwargs
return True
def compare_placeholders_list(detailed, original):
"""Compare 2 lists of placeholders"""
assert len(detailed) == len(original)
for i, detailed_ph in enumerate(detailed):
original_ph = original[i]
assert detailed_ph.id == original_ph.id
assert detailed_ph.tags == original_ph.tags
assert detailed_ph.description == original_ph.description
return True
def compare_placeholders_dict(detailed, original):
"""Compare 2 dicts of placeholders"""
assert len(detailed) == len(original)
for key, detailed_ph in detailed.items():
original_ph = original[key]
assert detailed_ph.id == original_ph.id
assert detailed_ph.tags == original_ph.tags
assert detailed_ph.description == original_ph.description
return True
# AdditiveSharingTensor
def make_additivesharingtensor(**kwargs):
workers = kwargs["workers"]
alice, bob, james = workers["alice"], workers["bob"], workers["james"]
tensor = torch.tensor([[3.1, 4.3]]).fix_prec().share(alice, bob, crypto_provider=james)
ast = tensor.child.child
def compare(detailed, original):
assert (
type(detailed)
== syft.frameworks.torch.tensors.interpreters.additive_shared.AdditiveSharingTensor
)
assert detailed.id == original.id
assert detailed.field == original.field
assert detailed.child.keys() == original.child.keys()
return True
return [
{
"value": ast,
"simplified": (
CODE[
syft.frameworks.torch.tensors.interpreters.additive_shared.AdditiveSharingTensor
],
(
ast.id, # (int or str) id
ast.field, # (int) field
(CODE[str], (ast.crypto_provider.id.encode("utf-8"),)), # (str) worker_id
msgpack.serde._simplify(
syft.hook.local_worker, ast.child
), # (dict of AbstractTensor) simplified chain
),
),
"cmp_detailed": compare,
}
]
# FixedPrecisionTensor
def make_fixedprecisiontensor(**kwargs):
workers = kwargs["workers"]
alice, bob, james = workers["alice"], workers["bob"], workers["james"]
t = torch.tensor([[3.1, 4.3]])
fpt_tensor = t.fix_prec(base=12, precision_fractional=5).share(
alice, bob, crypto_provider=james
)
fpt = fpt_tensor.child
fpt.tag("tag1")
fpt.describe("desc")
# AdditiveSharingTensor.simplify sets garbage_collect_data=False on child tensors during simplify
# This changes tensors' internal state in chain and is required to pass the test
msgpack.serde._simplify(syft.hook.local_worker, fpt)
def compare(detailed, original):
assert (
type(detailed)
== syft.frameworks.torch.tensors.interpreters.precision.FixedPrecisionTensor
)
assert detailed.id == original.id
assert detailed.field == original.field
assert detailed.base == original.base
assert detailed.precision_fractional == original.precision_fractional
assert detailed.kappa == original.kappa
assert detailed.tags == original.tags
assert detailed.description == original.description
return True
return [
{
"value": fpt,
"simplified": (
CODE[syft.frameworks.torch.tensors.interpreters.precision.FixedPrecisionTensor],
(
fpt.id, # (int or str) id
fpt.field, # (int) field
12, # (int) base
5, # (int) precision_fractional
fpt.kappa, # (int) kappa
(CODE[set], ((CODE[str], (b"tag1",)),)), # (set of str) tags
(CODE[str], (b"desc",)), # (str) description
msgpack.serde._simplify(
syft.hook.local_worker, fpt.child
), # (AbstractTensor) chain
),
),
"cmp_detailed": compare,
}
]
# CRTPrecisionTensor
def make_crtprecisiontensor(**kwargs):
workers = kwargs["workers"]
alice, bob, james = workers["alice"], workers["bob"], workers["james"]
t = torch.tensor([[3.1, 4.3]])
cpt = t.fix_prec(storage="crt").share(alice, bob, crypto_provider=james).child
# AdditiveSharingTensor.simplify sets garbage_collect_data=False on child tensors during simplify
# This changes tensors' internal state in chain and is required to pass the test
msgpack.serde._simplify(syft.hook.local_worker, cpt)
def compare(detailed, original):
assert (
type(detailed)
== syft.frameworks.torch.tensors.interpreters.crt_precision.CRTPrecisionTensor
)
assert detailed.id == original.id
assert detailed.base == original.base
assert detailed.precision_fractional == original.precision_fractional
return True
return [
{
"value": cpt,
"simplified": (
CODE[syft.frameworks.torch.tensors.interpreters.crt_precision.CRTPrecisionTensor],
(
cpt.id, # (int) id
cpt.base, # (int) base
cpt.precision_fractional, # (int) precision_fractional
msgpack.serde._simplify(
syft.hook.local_worker, cpt.child
), # (dict of AbstractTensor) simplified chain
),
),
"cmp_detailed": compare,
}
]
# LoggingTensor
def make_loggingtensor(**kwargs):
t = torch.randn(3, 3)
lt = syft.frameworks.torch.tensors.decorators.logging.LoggingTensor().on(t).child
def compare(detailed, original):
assert type(detailed) == syft.frameworks.torch.tensors.decorators.logging.LoggingTensor
assert detailed.id == original.id
assert detailed.child.equal(original.child)
return True
return [
{
"value": lt,
"simplified": (
CODE[syft.frameworks.torch.tensors.decorators.logging.LoggingTensor],
(
lt.id, # (int or str) id
msgpack.serde._simplify(
syft.hook.local_worker, lt.child
), # (AbstractTensor) chain
),
),
"cmp_detailed": compare,
}
]
# syft.generic.pointers.multi_pointer.MultiPointerTensor
def make_multipointertensor(**kwargs):
workers = kwargs["workers"]
alice, bob = workers["alice"], workers["bob"]
t = torch.randn(3, 3)
mpt = t.send(alice, bob).child
def compare(detailed, original):
assert type(detailed) == syft.generic.pointers.multi_pointer.MultiPointerTensor
assert detailed.id == original.id
assert detailed.child.keys() == original.child.keys()
return True
return [
{
"value": mpt,
"simplified": (
CODE[syft.generic.pointers.multi_pointer.MultiPointerTensor],
(
mpt.id, # (int or str) id
msgpack.serde._simplify(syft.hook.local_worker, mpt.child), # (dict)
),
),
"cmp_detailed": compare,
}
]
# syft.execution.plan.Plan
def make_plan(**kwargs):
# Function to plan
@syft.func2plan([torch.Size((3,))])
def plan(x):
x = x + x
y = torch.abs(x)
return x
# Model to plan
class Net(syft.Plan):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(3, 3)
self.fc2 = torch.nn.Linear(3, 2)
def forward(self, x):
x = torch.nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return torch.nn.functional.log_softmax(x, dim=0)
with syft.hook.local_worker.registration_enabled():
model_plan = Net()
model_plan.build(torch.tensor([1.0, 2.0, 3.0]))
def compare(detailed, original):
assert type(detailed) == syft.execution.plan.Plan
assert detailed.id == original.id
compare_placeholders_dict(detailed.placeholders, original.placeholders)
compare_operations(detailed.operations, original.operations)
# State
compare_placeholders_list(
detailed.state.state_placeholders, original.state.state_placeholders
)
assert detailed.include_state == original.include_state
assert detailed.is_built == original.is_built
compare_placeholders_dict(detailed.placeholders, original.placeholders)
assert detailed.name == original.name
assert detailed.tags == original.tags
assert detailed.description == original.description
with syft.hook.local_worker.registration_enabled():
t = torch.tensor([1.1, -2, 3])
res1 = detailed(t)
res2 = original(t)
assert res1.equal(res2)
return True
return [
{
"value": plan,
"simplified": (
CODE[syft.execution.plan.Plan],
(
plan.id, # (int or str) id
msgpack.serde._simplify(syft.hook.local_worker, plan.operations),
msgpack.serde._simplify(syft.hook.local_worker, plan.state), # (State)
plan.include_state, # (bool) include_state
plan.is_built, # (bool) is_built
msgpack.serde._simplify(syft.hook.local_worker, plan.name), # (str) name
msgpack.serde._simplify(syft.hook.local_worker, plan.tags), # (set of str) tags
msgpack.serde._simplify(
syft.hook.local_worker, plan.description
), # (str) description
# (PlaceHolder) placeholders
msgpack.serde._simplify(syft.hook.local_worker, plan.placeholders),
),
),
"cmp_detailed": compare,
},
{
"value": model_plan,
"simplified": (
CODE[syft.execution.plan.Plan],
(
model_plan.id, # (int or str) id
msgpack.serde._simplify(syft.hook.local_worker, model_plan.operations),
msgpack.serde._simplify(syft.hook.local_worker, model_plan.state), # (State)
model_plan.include_state, # (bool) include_state
model_plan.is_built, # (bool) is_built
msgpack.serde._simplify(syft.hook.local_worker, model_plan.name), # (str) name
msgpack.serde._simplify(syft.hook.local_worker, model_plan.tags), # (list) tags
msgpack.serde._simplify(
syft.hook.local_worker, model_plan.description
), # (str) description
# (PlaceHolder) placeholders
msgpack.serde._simplify(syft.hook.local_worker, model_plan.placeholders),
),
),
"cmp_detailed": compare,
},
]
# State
def make_state(**kwargs):
me = kwargs["workers"]["me"]
t1, t2 = torch.randn(3, 3), torch.randn(3, 3)
p1, p2 = syft.PlaceHolder(), syft.PlaceHolder()
p1.tag("state1"), p2.tag("state2")
p1.instantiate(t1), p2.instantiate(t2)
state = syft.execution.state.State(owner=me, state_placeholders=[p1, p2])
def compare(detailed, original):
assert type(detailed) == syft.execution.state.State
compare_placeholders_list(detailed.state_placeholders, original.state_placeholders)
for i in range(len(original.tensors())):
assert detailed.tensors()[i].equal(original.tensors()[i])
return True
return [
{
"value": state,
"simplified": (
CODE[syft.execution.state.State],
(
(
CODE[list],
( # (list) state_placeholders
msgpack.serde._simplify(syft.hook.local_worker, p1),
msgpack.serde._simplify(syft.hook.local_worker, p2),
),
),
(
CODE[list],
( # (list) tensors
msgpack.serde._simplify(syft.hook.local_worker, t1),
msgpack.serde._simplify(syft.hook.local_worker, t2),
),
),
),
),
"cmp_detailed": compare,
}
]
# Protocol
def make_protocol(**kwargs):
me = kwargs["workers"]["me"]
@syft.func2plan([torch.Size((1, 3))])
def plan(x):
x = x + x
x = torch.abs(x)
return x
with me.registration_enabled():
me.register_obj(plan)
protocol = syft.execution.protocol.Protocol([("me", plan), ("me", plan)])
protocol.tag("aaa")
protocol.describe("desc")
def compare(detailed, original):
assert type(detailed) == syft.execution.protocol.Protocol
assert detailed.id == original.id
assert detailed.tags == original.tags
assert detailed.description == original.description
assert detailed.plans == original.plans
assert detailed.owner == original.owner
assert detailed.workers_resolved == original.workers_resolved
return True
return [
{
"value": protocol,
"simplified": (
CODE[syft.execution.protocol.Protocol],
(
protocol.id, # (int)
(CODE[set], ((CODE[str], (b"aaa",)),)), # (set of strings) tags
(CODE[str], (b"desc",)), # (str) description
(
CODE[list], # (list) plans reference
(
# (tuple) reference: worker_id (int/str), plan_id (int/str)
(CODE[tuple], ((CODE[str], (b"me",)), plan.id)),
(CODE[tuple], ((CODE[str], (b"me",)), plan.id)),
),
),
False, # (bool) workers_resolved
),
),
"cmp_detailed": compare,
}
]
# syft.generic.pointers.pointer_tensor.PointerTensor
def make_pointertensor(**kwargs):
alice = kwargs["workers"]["alice"]
tensor = torch.randn(3, 3)
ptr = tensor.send(alice).child
def compare(detailed, original):
assert type(detailed) == syft.generic.pointers.pointer_tensor.PointerTensor
assert detailed.id == original.id
assert detailed.id_at_location == original.id_at_location
assert detailed.location == original.location
assert detailed.point_to_attr == original.point_to_attr
# Not testing grabage collect data as we are always setting it as False at receiver end
# irrespective of its initial value
assert detailed.garbage_collect_data == original.garbage_collect_data
assert detailed.get().equal(tensor)
return True
return [
{
"value": ptr,
"simplified": (
CODE[syft.generic.pointers.pointer_tensor.PointerTensor],
(
ptr.id, # (int or str) id
ptr.id_at_location, # (int or str) id_at_location
(CODE[str], (b"alice",)), # (str) worker_id
None, # (str) point_to_attr
(CODE[torch.Size], (3, 3)), # (torch.Size) _shape
True, # (bool) garbage_collect_data
ptr.tags,
ptr.description,
),
),
"cmp_detailed": compare,
}
]
# syft.generic.pointers.pointer_plan.PointerPlan
def make_pointerplan(**kwargs):
alice, me = kwargs["workers"]["alice"], kwargs["workers"]["me"]
@syft.func2plan([torch.Size((1, 3))])
def plan(x):
x = x + x
x = torch.abs(x)
return x
plan.send(alice)
ptr = me.request_search([plan.id], location=alice)[0]
def compare(detailed, original):
assert type(detailed) == syft.generic.pointers.pointer_plan.PointerPlan
assert detailed.id == original.id
assert detailed.id_at_location == original.id_at_location
assert detailed.location == original.location
assert detailed.garbage_collect_data == original.garbage_collect_data
# execute
t = torch.randn(3, 3).send(alice)
assert detailed(t).get().equal(original(t).get())
return True
return [
{
"value": ptr,
"simplified": (
CODE[syft.generic.pointers.pointer_plan.PointerPlan],
(
ptr.id, # (int) id
ptr.id_at_location, # (int) id_at_location
(CODE[str], (b"alice",)), # (str) worker_id
False, # (bool) garbage_collect_data
),
),
"cmp_detailed": compare,
}
]
# syft.generic.pointers.pointer_protocol.PointerProtocol
def make_pointerprotocol(**kwargs):
alice, me = kwargs["workers"]["alice"], kwargs["workers"]["me"]
@syft.func2plan([torch.Size((1, 3))])
def plan(x):
x = x + x
x = torch.abs(x)
return x
protocol = syft.execution.protocol.Protocol(
[("worker1", plan), ("worker2", plan)], tags=["aaa", "bbb"], description="desc"
)
protocol.send(alice)
ptr = me.request_search([protocol.id], location=alice)[0]
def compare(detailed, original):
assert type(detailed) == syft.generic.pointers.pointer_protocol.PointerProtocol
assert detailed.id == original.id
assert detailed.id_at_location == original.id_at_location
assert detailed.location == original.location
assert detailed.garbage_collect_data == original.garbage_collect_data
return True
return [
{
"value": ptr,
"simplified": (
CODE[syft.generic.pointers.pointer_protocol.PointerProtocol],
(
ptr.id, # (int or str) id
ptr.id_at_location, # (int) id_at_location
(CODE[str], (b"alice",)), # (str) location.id
False, # (bool) garbage_collect_data
),
),
"cmp_detailed": compare,
}
]
# syft.generic.pointers.object_wrapper.ObjectWrapper
def make_objectwrapper(**kwargs):
obj = torch.randn(3, 3)
wrapper = syft.generic.pointers.object_wrapper.ObjectWrapper(obj, id=123)
def compare(detailed, original):
assert type(detailed) == syft.generic.pointers.object_wrapper.ObjectWrapper
assert detailed.id == original.id
# tensors
assert detailed.obj.equal(original.obj)
return True
return [
{
"value": wrapper,
"simplified": (
CODE[syft.generic.pointers.object_wrapper.ObjectWrapper],
(
123, # (int) id
msgpack.serde._simplify(syft.hook.local_worker, obj), # (Any) obj
),
),
"cmp_detailed": compare,
}
]
# syft.generic.pointers.object_pointer.ObjectPointer
def make_objectpointer(**kwargs):
alice = kwargs["workers"]["alice"]
obj = torch.randn(3, 3)
obj_ptr = obj.send(alice)
ptr = syft.generic.pointers.object_pointer.ObjectPointer.create_pointer(obj, alice, obj.id)
def compare(detailed, original):
assert type(detailed) == syft.generic.pointers.object_pointer.ObjectPointer
assert detailed.id == original.id
assert detailed.id_at_location == original.id_at_location
assert detailed.location == original.location
assert detailed.point_to_attr == original.point_to_attr
assert detailed.garbage_collect_data == original.garbage_collect_data
return True
return [
{
"value": ptr,
"simplified": (
CODE[syft.generic.pointers.object_pointer.ObjectPointer],
(
ptr.id, # (int or str) id
ptr.id_at_location, # (int or str) id
(CODE[str], (b"alice",)), # (str) location.id
None, # (str) point_to_attr
True, # (bool) garbage_collect_data
),
),
"cmp_detailed": compare,
}
]
# syft.generic.string.String
def make_string(**kwargs):
def compare_simplified(actual, expected):
"""This is a custom comparison functino.
The reason for using this is that when set is that tags are use. Tags are sets.
When sets are simplified and converted to tuple, elements order in tuple is random
We compare tuples as sets because the set order is undefined.
This function is inspired by the one with the same name defined above in `make_set`.
"""
assert actual[0] == expected[0]
assert actual[1][0] == expected[1][0]
assert actual[1][1] == expected[1][1]
assert actual[1][2][0] == expected[1][2][0]
assert set(actual[1][2][1]) == set(expected[1][2][1])
assert actual[1][3] == expected[1][3]
return True
return [
{
"value": syft.generic.string.String(
"Hello World", id=1234, tags=set(["tag1", "tag2"]), description="description"
),
"simplified": (
CODE[syft.generic.string.String],
(
(CODE[str], (b"Hello World",)),
1234,
(CODE[set], ((CODE[str], (b"tag1",)), (CODE[str], (b"tag2",)))),
(CODE[str], (b"description",)),
),
),
"cmp_simplified": compare_simplified,
}
]
# syft.federated.train_config.TrainConfig
def make_trainconfig(**kwargs):
class Model(torch.jit.ScriptModule):
def __init__(self):
super(Model, self).__init__()
self.w1 = torch.nn.Parameter(torch.randn(10, 1), requires_grad=True)
self.b1 = torch.nn.Parameter(torch.randn(1), requires_grad=True)
@torch.jit.script_method
def forward(self, x): # pragma: no cover
x = x @ self.w1 + self.b1
return x
class Loss(torch.jit.ScriptModule):
def __init__(self):
super(Loss, self).__init__()
@torch.jit.script_method
def forward(self, pred, target): # pragma: no cover
return ((target.view(pred.shape).float() - pred.float()) ** 2).mean()
loss = Loss()
model = Model()
conf = syft.federated.train_config.TrainConfig(
model=model, loss_fn=loss, batch_size=2, optimizer="SGD", optimizer_args={"lr": 0.1}
)
def compare(detailed, original):
assert type(detailed) == syft.federated.train_config.TrainConfig
assert detailed.id == original.id
assert detailed._model_id == original._model_id
assert detailed._loss_fn_id == original._loss_fn_id
assert detailed.batch_size == original.batch_size
assert detailed.epochs == original.epochs
assert detailed.optimizer == original.optimizer
assert detailed.optimizer_args == original.optimizer_args
assert detailed.max_nr_batches == original.max_nr_batches
assert detailed.shuffle == original.shuffle
return True
return [
{
"value": conf,
"simplified": (
CODE[syft.federated.train_config.TrainConfig],
(
None, # (int) _model_id
None, # (int) _loss_fn_id
2, # (int) batch_size
1, # (int) epochs
(CODE[str], (b"SGD",)), # (str) optimizer
(CODE[dict], (((CODE[str], (b"lr",)), 0.1),)), # (dict) optimizer_args
conf.id, # (int or str)
-1, # (int) max_nr_batches
True, # (bool) shuffle
),
),
"cmp_detailed": compare,
}
]
# syft.workers.base.BaseWorker
def make_baseworker(**kwargs):
bob = kwargs["workers"]["bob"]
t = torch.rand(3, 3)
with bob.registration_enabled():
bob.register_obj(t)
def compare(detailed, original):
assert isinstance(detailed, syft.workers.base.BaseWorker)
assert detailed.id == original.id
return True
return [
{
"value": bob,
"simplified": (
CODE[syft.workers.base.BaseWorker],
((CODE[str], (b"bob",)),), # id (str)
),
"cmp_detailed": compare,
},
# Forced simplification
{
"forced": True,
"value": bob,
"simplified": (
FORCED_CODE[syft.workers.base.BaseWorker],
(
(CODE[str], (b"bob",)), # id (str)
msgpack.serde._simplify(
syft.hook.local_worker, bob._objects
), # (dict) _objects
True, # (bool) auto_add
),
),
"cmp_detailed": compare,
},
]
# syft.frameworks.torch.tensors.interpreters.autograd.AutogradTensor
def make_autogradtensor(**kwargs):
t = torch.tensor([1, 2, 3])
agt = syft.frameworks.torch.tensors.interpreters.autograd.AutogradTensor().on(t).child
agt.tag("aaa")
agt.describe("desc")
def compare(detailed, original):
assert type(detailed) == syft.frameworks.torch.tensors.interpreters.autograd.AutogradTensor
assert detailed.owner == original.owner
assert detailed.id == original.id
assert detailed.child.equal(original.child)
assert detailed.requires_grad == original.requires_grad
assert detailed.preinitialize_grad == original.preinitialize_grad
assert detailed.grad_fn == original.grad_fn
assert detailed.tags == original.tags
assert detailed.description == original.description
return True
return [
{
"value": agt,
"simplified": (
CODE[syft.frameworks.torch.tensors.interpreters.autograd.AutogradTensor],
(
None, # owner
agt.id, # (int)
msgpack.serde._simplify(
syft.hook.local_worker, agt.child
), # (AbstractTensor) chain
True, # (bool) requires_grad
False, # (bool) preinitialize_grad
None, # [always None, ignored in constructor] grad_fn
(CODE[set], ((CODE[str], (b"aaa",)),)), # (set of str) tags
(CODE[str], (b"desc",)), # (str) description
),
),
"cmp_detailed": compare,
}
]
# syft.frameworks.torch.tensors.interpreters.private.PrivateTensor
def make_privatetensor(**kwargs):
t = torch.tensor([1, 2, 3])
pt = t.private_tensor(allowed_users=("test",))
pt.tag("tag1")
pt.describe("private")
pt = pt.child
def compare(detailed, original):
assert type(detailed) == syft.frameworks.torch.tensors.interpreters.private.PrivateTensor
assert detailed.id == original.id
assert detailed.allowed_users == original.allowed_users
assert detailed.tags == original.tags
assert detailed.description == original.description
assert detailed.child.equal(original.child)
return True
return [
{
"value": pt,
"simplified": (
CODE[syft.frameworks.torch.tensors.interpreters.private.PrivateTensor],
(
pt.id, # (int or str) id
(CODE[tuple], ((CODE[str], (b"test",)),)), # (tuple of ?) allowed_users
(CODE[set], ((CODE[str], (b"tag1",)),)), # (set of str) tags
(CODE[str], (b"private",)), # (str) description
msgpack.serde._simplify(syft.hook.local_worker, t), # (AbstractTensor) chain
),
),
"cmp_detailed": compare,
}
]
# syft.frameworks.torch.tensors.interpreters.PlaceHolder
def make_placeholder(**kwargs):
ph = syft.frameworks.torch.tensors.interpreters.placeholder.PlaceHolder()
ph.tag("tag1")
ph.describe("just a placeholder")
def compare(detailed, original):
assert type(detailed) == syft.frameworks.torch.tensors.interpreters.placeholder.PlaceHolder
assert detailed.id == original.id
assert detailed.tags == original.tags
assert detailed.description == original.description
return True
return [
{
"value": ph,
"simplified": (
CODE[syft.frameworks.torch.tensors.interpreters.placeholder.PlaceHolder],
(
ph.id, # (int) id
(CODE[set], ((CODE[str], (b"tag1",)),)), # (set of str) tags
(CODE[str], (b"just a placeholder",)), # (str) description
),
),
"cmp_detailed": compare,
}
]
# Message
def make_message(**kwargs):
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.Message
assert detailed.contents == original.contents
return True
return [
{
"value": syft.messaging.message.Message([1, 2, 3]),
"simplified": (
CODE[syft.messaging.message.Message],
((CODE[list], (1, 2, 3)),), # (Any) simplified content
),
"cmp_detailed": compare,
},
{
"value": syft.messaging.message.Message((1, 2, 3)),
"simplified": (
CODE[syft.messaging.message.Message],
((CODE[tuple], (1, 2, 3)),), # (Any) simplified content
),
"cmp_detailed": compare,
},
]
# syft.messaging.message.Operation
def make_operation(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
x = torch.tensor([1, 2, 3, 4]).send(bob)
y = x * 2
op1 = bob._get_msg(-1)
a = torch.tensor([[1, 2], [3, 4]]).send(bob)
b = a.sum(1, keepdim=True)
op2 = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
detailed_msg = (
detailed.cmd_name,
detailed.cmd_owner,
detailed.cmd_args,
detailed.cmd_kwargs,
)
original_msg = (
original.cmd_name,
original.cmd_owner,
original.cmd_args,
original.cmd_kwargs,
)
assert type(detailed) == syft.messaging.message.Operation
for i in range(len(original_msg)):
if type(original_msg[i]) != torch.Tensor:
assert detailed_msg[i] == original_msg[i]
else:
assert detailed_msg[i].equal(original_msg[i])
assert detailed.return_ids == original.return_ids
return True
message1 = (op1.cmd_name, op1.cmd_owner, op1.cmd_args, op1.cmd_kwargs)
message2 = (op2.cmd_name, op2.cmd_owner, op2.cmd_args, op2.cmd_kwargs)
return [
{
"value": op1,
"simplified": (
CODE[syft.messaging.message.Operation],
(
msgpack.serde._simplify(syft.hook.local_worker, message1), # (Any) message
(CODE[tuple], (op1.return_ids[0],)), # (tuple) return_ids
),
),
"cmp_detailed": compare,
},
{
"value": op2,
"simplified": (
CODE[syft.messaging.message.Operation],
(
msgpack.serde._simplify(syft.hook.local_worker, message2), # (Any) message
(CODE[tuple], (op2.return_ids[0],)), # (tuple) return_ids
),
),
"cmp_detailed": compare,
},
]
# syft.messaging.message.ObjectMessage
def make_objectmessage(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
x = torch.tensor([1, 2, 3, 4]).send(bob)
obj = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.ObjectMessage
# torch tensors
assert detailed.contents.equal(original.contents)
return True
return [
{
"value": obj,
"simplified": (
CODE[syft.messaging.message.ObjectMessage],
(
msgpack.serde._simplify(
syft.hook.local_worker, obj.contents
), # (Any) simplified contents
),
),
"cmp_detailed": compare,
}
]
# ObjectRequestMessage
def make_objectrequestmessage(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
x = torch.tensor([1, 2, 3, 4]).send(bob)
x.get()
obj_req = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.ObjectRequestMessage
assert detailed.contents == original.contents
return True
return [
{
"value": obj_req,
"simplified": (
CODE[syft.messaging.message.ObjectRequestMessage],
(
msgpack.serde._simplify(
syft.hook.local_worker, obj_req.contents
), # (Any) simplified contents
),
),
"cmp_detailed": compare,
}
]
# IsNoneMessage
def make_isnonemessage(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
t = torch.tensor([1, 2, 3, 4])
x = t.send(bob)
x.child.is_none()
nm = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.IsNoneMessage
# torch tensors
assert detailed.contents.equal(original.contents)
return True
return [
{
"value": nm,
"simplified": (
CODE[syft.messaging.message.IsNoneMessage],
(
msgpack.serde._simplify(
syft.hook.local_worker, nm.contents
), # (Any) simplified contents
),
),
"cmp_detailed": compare,
}
]
# GetShapeMessage
def make_getshapemessage(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
t = torch.tensor([1, 2, 3, 4])
x = t.send(bob)
z = x + x
s = z.shape
shape_message = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.GetShapeMessage
# torch tensor
assert detailed.contents.equal(original.contents)
return True
return [
{
"value": shape_message,
"simplified": (
CODE[syft.messaging.message.GetShapeMessage],
(
msgpack.serde._simplify(
syft.hook.local_worker, shape_message.contents
), # (Any) simplified contents
),
),
"cmp_detailed": compare,
}
]
# ForceObjectDeleteMessage
def make_forceobjectdeletemessage(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
t = torch.tensor([1, 2, 3, 4])
id = t.id
x = t.send(bob)
del x
del_message = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.ForceObjectDeleteMessage
assert detailed.contents == original.contents
return True
return [
{
"value": del_message,
"simplified": (
CODE[syft.messaging.message.ForceObjectDeleteMessage],
(id,), # (int) id
),
"cmp_detailed": compare,
}
]
# SearchMessage
def make_searchmessage(**kwargs):
search_message = syft.messaging.message.SearchMessage([1, "test", 3])
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.SearchMessage
assert detailed.contents == original.contents
return True
return [
{
"value": search_message,
"simplified": (
CODE[syft.messaging.message.SearchMessage],
((CODE[list], (1, (CODE[str], (b"test",)), 3)),), # (Any) message
),
"cmp_detailed": compare,
}
]
# PlanCommandMessage
def make_plancommandmessage(**kwargs):
bob = kwargs["workers"]["bob"]
bob.log_msgs = True
@syft.func2plan(args_shape=[(1,)])
def plan(data):
return data * 3
plan.send(bob)
plan.owner.fetch_plan(plan.id, bob)
fetch_plan_cmd = bob._get_msg(-1)
bob.log_msgs = False
def compare(detailed, original):
assert type(detailed) == syft.messaging.message.PlanCommandMessage
assert detailed.contents == original.contents
return True
return [
{
"value": fetch_plan_cmd,
"simplified": (
CODE[syft.messaging.message.PlanCommandMessage],
(
(CODE[str], (b"fetch_plan",)), # (str) command
(CODE[tuple], (plan.id, False)), # (tuple) args
),
),
"cmp_detailed": compare,
}
]
# syft.exceptions.GetNotPermittedError
def make_getnotpermittederror(**kwargs):
try:
raise syft.exceptions.GetNotPermittedError()
except syft.exceptions.GetNotPermittedError as e:
err = e
def compare(detailed, original):
assert type(detailed) == syft.exceptions.GetNotPermittedError
assert (
traceback.format_tb(detailed.__traceback__)[-1]
== traceback.format_tb(original.__traceback__)[-1]
)
return True
return [
{
"value": err,
"simplified": (
CODE[syft.exceptions.GetNotPermittedError],
(
(CODE[str], (b"GetNotPermittedError",)), # (str) __name__
msgpack.serde._simplify(
syft.hook.local_worker,
"Traceback (most recent call last):\n"
+ "".join(traceback.format_tb(err.__traceback__)),
), # (str) traceback
(CODE[dict], tuple()), # (dict) attributes
),
),
"cmp_detailed": compare,
}
]
# syft.exceptions.ResponseSignatureError
def make_responsesignatureerror(**kwargs):
try:
raise syft.exceptions.ResponseSignatureError()
except syft.exceptions.ResponseSignatureError as e:
err = e
def compare(detailed, original):
assert type(detailed) == syft.exceptions.ResponseSignatureError
assert (
traceback.format_tb(detailed.__traceback__)[-1]
== traceback.format_tb(original.__traceback__)[-1]
)
assert detailed.get_attributes() == original.get_attributes()
return True
return [
{
"value": err,
"simplified": (
CODE[syft.exceptions.ResponseSignatureError],
(
(CODE[str], (b"ResponseSignatureError",)), # (str) __name__
msgpack.serde._simplify(
syft.hook.local_worker,
"Traceback (most recent call last):\n"
+ "".join(traceback.format_tb(err.__traceback__)),
), # (str) traceback
msgpack.serde._simplify(
syft.hook.local_worker, err.get_attributes()
), # (dict) attributes
),
),
"cmp_detailed": compare,
}
]
# syft.frameworks.torch.tensors.interpreters.gradients_core.GradFunc
def make_gradfn(**kwargs):
alice, bob = kwargs["workers"]["alice"], kwargs["workers"]["bob"]
t = torch.tensor([1, 2, 3])
x_share = t.share(alice, bob, requires_grad=True)
y_share = t.share(alice, bob, requires_grad=True)
z_share = x_share + y_share # AddBackward
# This is bad. We should find something robust
x_share.child.child.set_garbage_collect_data(False)
y_share.child.child.set_garbage_collect_data(False)
grad_fn = z_share.child.grad_fn
def compare(detailed, original):
assert isinstance(
detailed, syft.frameworks.torch.tensors.interpreters.gradients_core.GradFunc
)
assert detailed.__class__.__name__ == original.__class__.__name__
# This block only works only for syft tensor attributes
for detailed_attr, original_attr in zip(detailed._attributes, original._attributes):
assert detailed_attr.__class__.__name__ == original_attr.__class__.__name__
assert detailed_attr.get().equal(t)
return True
return [
{
"value": grad_fn,
"simplified": (
CODE[syft.frameworks.torch.tensors.interpreters.gradients_core.GradFunc],
(
CODE[list],
(
(CODE[str], (b"AddBackward",)),
msgpack.serde._simplify(syft.hook.local_worker, x_share.child),
msgpack.serde._simplify(syft.hook.local_worker, y_share.child),
),
),
),
"cmp_detailed": compare,
}
]
| 33.111699
| 113
| 0.535909
|
afa46cdcc5d3923089edd378ff4d22eeabc32083
| 2,154
|
py
|
Python
|
openstack_dashboard/dashboards/project/network_topology/utils.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | 37
|
2018-10-30T02:47:24.000Z
|
2021-12-04T10:29:40.000Z
|
openstack_dashboard/dashboards/project/network_topology/utils.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/network_topology/utils.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | 35
|
2018-11-26T03:36:31.000Z
|
2021-12-04T10:29:41.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from openstack_dashboard.api import base
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
def _quota_exceeded(request, quota):
usages = quotas.tenant_quota_usages(request, targets=(quota, ))
available = usages.get(quota, {}).get('available', 1)
return available <= 0
def get_context(request, context=None):
"""Returns common context data for network topology views."""
if context is None:
context = {}
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
context['launch_instance_allowed'] = policy.check(
(("compute", "os_compute_api:servers:create"),), request)
context['instance_quota_exceeded'] = _quota_exceeded(request, 'instances')
context['create_network_allowed'] = policy.check(
(("network", "create_network"),), request)
context['network_quota_exceeded'] = _quota_exceeded(request, 'networks')
context['create_router_allowed'] = (
network_config.get('enable_router', True) and
policy.check((("network", "create_router"),), request))
context['router_quota_exceeded'] = _quota_exceeded(request, 'routers')
context['console_type'] = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
context['show_ng_launch'] = (
base.is_service_enabled(request, 'compute') and
getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True))
context['show_legacy_launch'] = (
base.is_service_enabled(request, 'compute') and
getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False))
return context
| 42.235294
| 78
| 0.721448
|
5e144f276bb508b943e604379e0ab18fdc435e65
| 5,197
|
py
|
Python
|
huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/volumes_attached.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/volumes_attached.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/volumes_attached.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class VolumesAttached:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'boot_index': 'str',
'delete_on_termination': 'str',
'device': 'str'
}
attribute_map = {
'id': 'id',
'boot_index': 'bootIndex',
'delete_on_termination': 'delete_on_termination',
'device': 'device'
}
def __init__(self, id=None, boot_index=None, delete_on_termination=None, device=None):
"""VolumesAttached - a model defined in huaweicloud sdk"""
self._id = None
self._boot_index = None
self._delete_on_termination = None
self._device = None
self.discriminator = None
if id is not None:
self.id = id
if boot_index is not None:
self.boot_index = boot_index
if delete_on_termination is not None:
self.delete_on_termination = delete_on_termination
if device is not None:
self.device = device
@property
def id(self):
"""Gets the id of this VolumesAttached.
磁盘ID,格式为UUID。
:return: The id of this VolumesAttached.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VolumesAttached.
磁盘ID,格式为UUID。
:param id: The id of this VolumesAttached.
:type: str
"""
self._id = id
@property
def boot_index(self):
"""Gets the boot_index of this VolumesAttached.
启动标识。 - “0”代表系统盘 - 非“0”为数据盘。
:return: The boot_index of this VolumesAttached.
:rtype: str
"""
return self._boot_index
@boot_index.setter
def boot_index(self, boot_index):
"""Sets the boot_index of this VolumesAttached.
启动标识。 - “0”代表系统盘 - 非“0”为数据盘。
:param boot_index: The boot_index of this VolumesAttached.
:type: str
"""
self._boot_index = boot_index
@property
def delete_on_termination(self):
"""Gets the delete_on_termination of this VolumesAttached.
删边缘实例时是否一并删除该磁盘。 - true:是 - false:否
:return: The delete_on_termination of this VolumesAttached.
:rtype: str
"""
return self._delete_on_termination
@delete_on_termination.setter
def delete_on_termination(self, delete_on_termination):
"""Sets the delete_on_termination of this VolumesAttached.
删边缘实例时是否一并删除该磁盘。 - true:是 - false:否
:param delete_on_termination: The delete_on_termination of this VolumesAttached.
:type: str
"""
self._delete_on_termination = delete_on_termination
@property
def device(self):
"""Gets the device of this VolumesAttached.
硬盘挂载盘符,即磁盘挂载点。
:return: The device of this VolumesAttached.
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""Sets the device of this VolumesAttached.
硬盘挂载盘符,即磁盘挂载点。
:param device: The device of this VolumesAttached.
:type: str
"""
self._device = device
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumesAttached):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.515306
| 90
| 0.567058
|
4a17b17f55cc70462f3c6da0e9a412dda1d90844
| 617
|
py
|
Python
|
datasource_service/tests/test_main.py
|
airavata-courses/DCoders
|
cbe19a286f7e28abb031d0fa7f3576275b8999e6
|
[
"Apache-2.0"
] | 1
|
2022-02-03T08:41:39.000Z
|
2022-02-03T08:41:39.000Z
|
datasource_service/tests/test_main.py
|
airavata-courses/DCoders
|
cbe19a286f7e28abb031d0fa7f3576275b8999e6
|
[
"Apache-2.0"
] | 23
|
2022-01-24T04:51:36.000Z
|
2022-03-08T19:58:17.000Z
|
datasource_service/tests/test_main.py
|
airavata-courses/DCoders
|
cbe19a286f7e28abb031d0fa7f3576275b8999e6
|
[
"Apache-2.0"
] | null | null | null |
""" Pytest for the GET api { Mock the nexrad object so that it doesn't download the radar object for the
unit testing } """
from fastapi.testclient import TestClient
from datasource_service.main import app
test_client = TestClient(app)
def test_nexrad_data():
""" Test GET api """
response = test_client.get('/api/v1/2013/05/31/KTLX')
assert response.status_code == 200
def test_nexrad_error():
""" Test wrong station input """
response = test_client.get('/api/v1/2013/05/31/KTL')
assert response.status_code == 404
assert response.text == '{"detail":"Radar station is not found"}'
| 28.045455
| 104
| 0.705024
|
17956ed53b45276c7ce8a9c12d6469a7c6d4f9b3
| 386
|
py
|
Python
|
app_portfolio_contact/models.py
|
MichaelDoctor/Portfolio
|
41d9104ef6d34f8eb146230b19038b445351c713
|
[
"MIT"
] | null | null | null |
app_portfolio_contact/models.py
|
MichaelDoctor/Portfolio
|
41d9104ef6d34f8eb146230b19038b445351c713
|
[
"MIT"
] | 4
|
2021-06-09T18:02:18.000Z
|
2022-01-13T03:06:24.000Z
|
app_portfolio_contact/models.py
|
MichaelDoctor/Portfolio
|
41d9104ef6d34f8eb146230b19038b445351c713
|
[
"MIT"
] | null | null | null |
from django.db import models
from datetime import datetime
class Contact(models.Model):
name = models.CharField(max_length=150)
email = models.CharField(max_length=100)
subject = models.CharField(max_length=100)
message = models.TextField(blank=True)
contact_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
self.email
| 27.571429
| 73
| 0.738342
|
65b98c72acfc11fa334c61d425f16112e48a4d4b
| 6,419
|
py
|
Python
|
proxy/core/acceptor/pool.py
|
sakurai-youhei/proxy.py
|
88af8e2babc08c0ad158de35a5a0351571832388
|
[
"BSD-3-Clause"
] | 1,891
|
2015-01-17T13:53:36.000Z
|
2022-03-31T20:24:58.000Z
|
proxy/core/acceptor/pool.py
|
yg-park/proxy.py
|
b7b10de2a8f447d201bbb1414e39aa4bc4dcaf9f
|
[
"BSD-3-Clause"
] | 909
|
2015-03-04T06:27:37.000Z
|
2022-03-31T04:26:16.000Z
|
proxy/core/acceptor/pool.py
|
yg-park/proxy.py
|
b7b10de2a8f447d201bbb1414e39aa4bc4dcaf9f
|
[
"BSD-3-Clause"
] | 531
|
2015-02-05T16:43:01.000Z
|
2022-03-31T11:49:21.000Z
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import argparse
import logging
import multiprocessing
import socket
import threading
from multiprocessing import connection
from multiprocessing.reduction import send_handle
from typing import List, Optional, Type
from .acceptor import Acceptor
from .work import Work
from ..event import EventQueue, EventDispatcher
from ...common.flag import flags
from ...common.constants import DEFAULT_BACKLOG, DEFAULT_ENABLE_EVENTS
from ...common.constants import DEFAULT_IPV6_HOSTNAME, DEFAULT_NUM_WORKERS, DEFAULT_PORT
logger = logging.getLogger(__name__)
# Lock shared by worker processes
LOCK = multiprocessing.Lock()
flags.add_argument(
'--backlog',
type=int,
default=DEFAULT_BACKLOG,
help='Default: 100. Maximum number of pending connections to proxy server')
flags.add_argument(
'--enable-events',
action='store_true',
default=DEFAULT_ENABLE_EVENTS,
help='Default: False. Enables core to dispatch lifecycle events. '
'Plugins can be used to subscribe for core events.'
)
flags.add_argument(
'--hostname',
type=str,
default=str(DEFAULT_IPV6_HOSTNAME),
help='Default: ::1. Server IP address.')
flags.add_argument(
'--port', type=int, default=DEFAULT_PORT,
help='Default: 8899. Server port.')
flags.add_argument(
'--num-workers',
type=int,
default=DEFAULT_NUM_WORKERS,
help='Defaults to number of CPU cores.')
class AcceptorPool:
"""AcceptorPool.
Pre-spawns worker processes to utilize all cores available on the system.
A server socket is initialized and dispatched over a pipe to these workers.
Each worker process then accepts new client connection.
Example usage:
pool = AcceptorPool(flags=..., work_klass=...)
try:
pool.setup()
while True:
time.sleep(1)
finally:
pool.shutdown()
`work_klass` must implement `work.Work` class.
Optionally, AcceptorPool also initialize a global event queue.
It is a multiprocess safe queue which can be used to build pubsub patterns
for message sharing or signaling within proxy.py.
"""
def __init__(self, flags: argparse.Namespace,
work_klass: Type[Work]) -> None:
self.flags = flags
self.socket: Optional[socket.socket] = None
self.acceptors: List[Acceptor] = []
self.work_queues: List[connection.Connection] = []
self.work_klass = work_klass
self.event_queue: Optional[EventQueue] = None
self.event_dispatcher: Optional[EventDispatcher] = None
self.event_dispatcher_thread: Optional[threading.Thread] = None
self.event_dispatcher_shutdown: Optional[threading.Event] = None
self.manager: Optional[multiprocessing.managers.SyncManager] = None
if self.flags.enable_events:
self.manager = multiprocessing.Manager()
self.event_queue = EventQueue(self.manager.Queue())
def listen(self) -> None:
self.socket = socket.socket(self.flags.family, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((str(self.flags.hostname), self.flags.port))
self.socket.listen(self.flags.backlog)
self.socket.setblocking(False)
logger.info(
'Listening on %s:%d' %
(self.flags.hostname, self.flags.port))
def start_workers(self) -> None:
"""Start worker processes."""
for acceptor_id in range(self.flags.num_workers):
work_queue = multiprocessing.Pipe()
acceptor = Acceptor(
idd=acceptor_id,
work_queue=work_queue[1],
flags=self.flags,
work_klass=self.work_klass,
lock=LOCK,
event_queue=self.event_queue,
)
acceptor.start()
logger.debug(
'Started acceptor#%d process %d',
acceptor_id,
acceptor.pid)
self.acceptors.append(acceptor)
self.work_queues.append(work_queue[0])
logger.info('Started %d workers' % self.flags.num_workers)
def start_event_dispatcher(self) -> None:
self.event_dispatcher_shutdown = threading.Event()
assert self.event_dispatcher_shutdown
assert self.event_queue
self.event_dispatcher = EventDispatcher(
shutdown=self.event_dispatcher_shutdown,
event_queue=self.event_queue
)
self.event_dispatcher_thread = threading.Thread(
target=self.event_dispatcher.run
)
self.event_dispatcher_thread.start()
logger.debug('Thread ID: %d', self.event_dispatcher_thread.ident)
def shutdown(self) -> None:
logger.info('Shutting down %d workers' % self.flags.num_workers)
for acceptor in self.acceptors:
acceptor.running.set()
if self.flags.enable_events:
assert self.event_dispatcher_shutdown
assert self.event_dispatcher_thread
self.event_dispatcher_shutdown.set()
self.event_dispatcher_thread.join()
logger.debug(
'Shutdown of global event dispatcher thread %d successful',
self.event_dispatcher_thread.ident)
for acceptor in self.acceptors:
acceptor.join()
logger.debug('Acceptors shutdown')
def setup(self) -> None:
"""Listen on port, setup workers and pass server socket to workers."""
self.listen()
if self.flags.enable_events:
logger.info('Core Event enabled')
self.start_event_dispatcher()
self.start_workers()
# Send server socket to all acceptor processes.
assert self.socket is not None
for index in range(self.flags.num_workers):
send_handle(
self.work_queues[index],
self.socket.fileno(),
self.acceptors[index].pid
)
self.work_queues[index].close()
self.socket.close()
| 34.510753
| 88
| 0.651503
|
f148ca3a9a323b038fd8f550d618063c2d934dc3
| 1,265
|
py
|
Python
|
arcfire/core/settings/test.py
|
allanberry/arcfire
|
c41bad3ae7792406e169f9f7acd02f7e52467cbe
|
[
"MIT"
] | null | null | null |
arcfire/core/settings/test.py
|
allanberry/arcfire
|
c41bad3ae7792406e169f9f7acd02f7e52467cbe
|
[
"MIT"
] | 38
|
2015-10-21T19:10:36.000Z
|
2015-12-18T11:57:12.000Z
|
arcfire/core/settings/test.py
|
allanberry/arcfire
|
c41bad3ae7792406e169f9f7acd02f7e52467cbe
|
[
"MIT"
] | null | null | null |
from .base import *
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'0.0.0.0'
]
DEBUG = True
# If debug is enabled, Compressor is turned off; # this will manually activate
# it. This is important to provide Django tags (like 'static') to JS files
# COMPRESS_ENABLED = False
# However, in development, we had better override the JSMinFilter
# COMPRESS_JS_FILTERS = [
# 'compressor.filters.template.TemplateFilter',
# # 'compressor.filters.jsmin.JSMinFilter',
# ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'arcfire_test',
'USER': 'aljabear',
'PASSWORD': secret['DATABASE_PASSWORD'],
'HOST': '127.0.0.1',
# 'PORT': '5432', #(using default)
# 'TEST': {
# 'NAME': 'arcfire_test'
# }
}
}
STATIC_URL = '/static/'
STATIC_ROOT = PROJ_DIR.child('static')
MEDIA_URL = '/media/'
MEDIA_ROOT = PROJ_DIR.child('media').child('test')
MEDIA_URL = 'http://localhost:1917/media/'
UPLOAD_ROOT = MEDIA_ROOT + 'uploads/'
#email
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
#EMAIL_HOST_USER = '<mailbox>'
#EMAIL_HOST_PASSWORD = '<password>'
#DEFAULT_FROM_EMAIL = '<address>'
#SERVER_EMAIL = '<address>'
| 23.425926
| 78
| 0.616601
|
64517f2d79950d88b7b8bd3f77ad88c15c43f0a1
| 559
|
py
|
Python
|
continuous_control/tests/unit/unit_test_suites.py
|
kadysongbb/dr-trpo
|
c4d994944226dccc74609377c0e1e0589c84acba
|
[
"MIT"
] | 4
|
2021-05-07T00:59:50.000Z
|
2022-01-15T01:43:09.000Z
|
continuous_control/tests/unit/unit_test_suites.py
|
kadysongbb/dr-trpo
|
c4d994944226dccc74609377c0e1e0589c84acba
|
[
"MIT"
] | null | null | null |
continuous_control/tests/unit/unit_test_suites.py
|
kadysongbb/dr-trpo
|
c4d994944226dccc74609377c0e1e0589c84acba
|
[
"MIT"
] | 2
|
2020-07-13T18:46:52.000Z
|
2020-07-17T22:03:11.000Z
|
import os
import unittest
import tests.unit.GAC.networks as test_networks
import tests.unit.GAC.helpers as helpers
def create_and_run_test_suite(test_module):
print('Running tests from: ' + str(test_module.__file__))
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(test_module))
unittest.TextTestRunner(verbosity=1).run(suite)
print('')
def main():
create_and_run_test_suite(test_networks)
create_and_run_test_suite(helpers)
if __name__ == '__main__':
main()
| 20.703704
| 61
| 0.747764
|
c6663c476bbc083670301d0b8ac3af5c54b27feb
| 128
|
py
|
Python
|
projet-2021/iads/__init__.py
|
tripincloud/Machine-Learning-Google-Play-Store
|
acd5189dd194190d2877180fa9ae5e14ec1d9bda
|
[
"MIT"
] | null | null | null |
projet-2021/iads/__init__.py
|
tripincloud/Machine-Learning-Google-Play-Store
|
acd5189dd194190d2877180fa9ae5e14ec1d9bda
|
[
"MIT"
] | null | null | null |
projet-2021/iads/__init__.py
|
tripincloud/Machine-Learning-Google-Play-Store
|
acd5189dd194190d2877180fa9ae5e14ec1d9bda
|
[
"MIT"
] | 1
|
2022-03-07T17:13:47.000Z
|
2022-03-07T17:13:47.000Z
|
# -*- coding: utf-8 -*-
"""
Package: iads
File: __init__.py
Année: LU3IN026 - semestre 2 - 2020-2021, Sorbonne Université
"""
| 14.222222
| 61
| 0.648438
|
36de06e30ead5fb40685b6aa38604eef8b563195
| 95
|
py
|
Python
|
tests/sanity_test.py
|
cmccandless/homectl
|
338b09583b9817a11bf55df7c4db6143b43d74f9
|
[
"MIT"
] | 7
|
2018-04-20T03:43:43.000Z
|
2021-11-18T23:13:17.000Z
|
tests/sanity_test.py
|
lifeModder19135/vstask
|
1c256618c6b3ed0cc4c1ac50b6eca1a23881f9c5
|
[
"MIT"
] | 3
|
2018-10-16T23:39:11.000Z
|
2020-06-09T17:16:32.000Z
|
tests/sanity_test.py
|
cmccandless/easy_getch
|
fdd23f308cdd830d599c522a3b6d56ade3cf76c2
|
[
"MIT"
] | 4
|
2020-11-02T02:21:51.000Z
|
2022-02-23T07:21:47.000Z
|
import unittest
class SanityTest(unittest.TestCase):
def test_sanity(self):
pass
| 13.571429
| 36
| 0.705263
|
6c206db21e0c0e9ee1a70471d4f949a6e41e1232
| 712
|
py
|
Python
|
s02/method/handler.py
|
hiparker/study-python
|
262f3f8f22f886e83c3bd19b7326e92257ead556
|
[
"Apache-2.0"
] | 1
|
2021-01-07T14:29:34.000Z
|
2021-01-07T14:29:34.000Z
|
s02/method/handler.py
|
hiparker/study-python
|
262f3f8f22f886e83c3bd19b7326e92257ead556
|
[
"Apache-2.0"
] | null | null | null |
s02/method/handler.py
|
hiparker/study-python
|
262f3f8f22f886e83c3bd19b7326e92257ead556
|
[
"Apache-2.0"
] | null | null | null |
"""
处理方法
"""
# 处理打印
def println(str):
print(str)
# 验证是否非法
def inputConditionValidata(condition, inputTup):
# 非法判断
try:
inputNumT = int(condition)
# 参数非法 -- 为空
if inputNumT is None:
return False
# 输入参数不包含
ifExist = inputNumT in inputTup
if not ifExist:
return False
return True
except BaseException as error:
return False
# 比较输赢
def compareValidata(cval, pval):
if (pval == 0 and cval == 2) or (pval == 1 and cval == 0) \
or (pval == 2 and cval == 1):
println("---- 电脑赢了!")
elif pval == cval:
println("---- 平局!")
else:
println("---- 你赢了!")
println("")
| 19.243243
| 63
| 0.519663
|
f1fea52ae670eb331346c917133f091f04bab89e
| 3,295
|
py
|
Python
|
clean_res_helper.py
|
hcq0618/Android-CleanResource
|
c65a4fc0ab6ec6b9e49ac88cb5208c1f35b643bf
|
[
"MIT"
] | 9
|
2017-06-23T06:38:37.000Z
|
2021-09-26T13:18:10.000Z
|
clean_res_helper.py
|
hcq0618/Android-CleanResource
|
c65a4fc0ab6ec6b9e49ac88cb5208c1f35b643bf
|
[
"MIT"
] | null | null | null |
clean_res_helper.py
|
hcq0618/Android-CleanResource
|
c65a4fc0ab6ec6b9e49ac88cb5208c1f35b643bf
|
[
"MIT"
] | 1
|
2018-09-26T12:36:32.000Z
|
2018-09-26T12:36:32.000Z
|
#!/usr/local/bin/python
# -*-coding:utf-8-*-
import json
import os
import sys
class ConfigReader:
def __init__(self):
config_path = os.path.abspath('.') + os.sep + "clean_res_config.json"
if not os.path.exists(config_path):
print "Warning:there is no file - " + config_path
sys.exit()
f = None
try:
if os.path.exists(config_path):
f = open(config_path, 'r')
self.config = json.load(f)
finally:
if f:
f.close()
@staticmethod
def encode_path(path):
if path:
return path.encode('gbk')
else:
return path
def get_path(self, path_key):
paths = None
if self.config and 'paths' in self.config:
paths = self.config['paths']
if paths and path_key in paths:
path = paths[path_key]
if isinstance(path, list):
path_list = path
for index in range(len(path_list)):
path_list[index] = self.encode_path(path_list[index])
return path_list
else:
return self.encode_path(path)
return None
def get_params(self, params_key):
params = None
if self.config and 'params' in self.config:
return self.config['params']
if params and params_key in params:
return params[params_key]
return None
def get_project_path(self):
return self.get_path('projectPath')
def get_lib_paths(self):
return self.get_path('libPaths')
def get_lint_path(self):
return self.get_path('lintPath')
def get_matrix_json_result_path(self):
return self.get_path('matrixJsonResultPath')
def get_keep_file_path_keys(self):
return self.get_params('keepFilePathKeys')
def get_module_name(self):
return self.get_params('moduleName')
class Utils:
def __init__(self):
return
# 获取文件的大小,结果保留两位小数,单位为KB
@staticmethod
def get_file_size(file_path):
if os.path.exists(file_path):
file_size = os.path.getsize(file_path)
file_size = file_size / float(1024)
return round(file_size, 2)
# 是否为需要忽略的目录
@staticmethod
def is_keep_file_path(keep_file_path_keys, file_path):
if keep_file_path_keys:
for key in keep_file_path_keys:
if os.sep + key in file_path:
# print key
return True
return False
@staticmethod
def print_result(unused_file_count, unused_file_total_size, delete_file_count, delete_file_total_size):
print "Total unused resources count is %s and total size is %s kb" % \
(str(unused_file_count), str(unused_file_total_size))
print "Total deleted file resources count is %s and total size is %s kb" % \
(str(delete_file_count), str(delete_file_total_size))
@staticmethod
def delete_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
print file_path + " was deleted!"
return True
else:
print file_path + " is not exists"
return False
| 28.162393
| 107
| 0.58786
|
598eb2a3e5b0ab7aee2ef0a496b4a7b7450221f1
| 377
|
py
|
Python
|
test/test_coordinates.py
|
simonpf/gprof_nn
|
c00fb88266d665e82fd872a77768293945abf301
|
[
"MIT"
] | 1
|
2021-08-05T04:00:53.000Z
|
2021-08-05T04:00:53.000Z
|
test/test_coordinates.py
|
simonpf/gprof_nn
|
c00fb88266d665e82fd872a77768293945abf301
|
[
"MIT"
] | null | null | null |
test/test_coordinates.py
|
simonpf/gprof_nn
|
c00fb88266d665e82fd872a77768293945abf301
|
[
"MIT"
] | null | null | null |
"""
Test for the coordinates submodule.
"""
import numpy as np
from gprof_nn.coordinates import latlon_to_ecef
def test_latlon_to_ecef():
lons = [0, 90, 180, 270, 360]
lats = [0, 0, 0, 0, 0]
x, y, z = latlon_to_ecef(lons, lats)
assert np.all(np.isclose(x[[1, 3]], 0.0))
assert np.all(np.isclose(y[[0, 2, 4]], 0.0))
assert np.all(np.isclose(z, 0.0))
| 22.176471
| 48
| 0.62069
|
dc1a10f06992e81f44bc7fe33f6b0d854ab2c7ac
| 194,456
|
py
|
Python
|
lbry/lbry/extras/daemon/Daemon.py
|
ju-sh/lbry-sdk
|
077281a77a82cfdfca0ec02922d4e582233bae43
|
[
"MIT"
] | null | null | null |
lbry/lbry/extras/daemon/Daemon.py
|
ju-sh/lbry-sdk
|
077281a77a82cfdfca0ec02922d4e582233bae43
|
[
"MIT"
] | null | null | null |
lbry/lbry/extras/daemon/Daemon.py
|
ju-sh/lbry-sdk
|
077281a77a82cfdfca0ec02922d4e582233bae43
|
[
"MIT"
] | null | null | null |
import os
import asyncio
import logging
import json
import time
import inspect
import typing
import base58
import random
import ecdsa
import hashlib
from urllib.parse import urlencode, quote
from typing import Callable, Optional, List
from binascii import hexlify, unhexlify
from traceback import format_exc
from aiohttp import web
from functools import wraps, partial
from google.protobuf.message import DecodeError
from torba.client.wallet import Wallet
from torba.client.baseaccount import SingleKey, HierarchicalDeterministic
from lbry import utils
from lbry.conf import Config, Setting
from lbry.blob.blob_file import is_valid_blobhash, BlobBuffer
from lbry.blob_exchange.downloader import download_blob
from lbry.dht.peer import make_kademlia_peer
from lbry.error import DownloadSDTimeout, ComponentsNotStarted
from lbry.error import NullFundsError, NegativeFundsError, ComponentStartConditionNotMet
from lbry.extras import system_info
from lbry.extras.daemon import analytics
from lbry.extras.daemon.Components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
from lbry.extras.daemon.Components import STREAM_MANAGER_COMPONENT
from lbry.extras.daemon.Components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT
from lbry.extras.daemon.ComponentManager import RequiredCondition
from lbry.extras.daemon.ComponentManager import ComponentManager
from lbry.extras.daemon.json_response_encoder import JSONResponseEncoder
from lbry.extras.daemon import comment_client
from lbry.extras.daemon.undecorated import undecorated
from lbry.wallet.transaction import Transaction, Output, Input
from lbry.wallet.account import Account as LBCAccount
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies
from lbry.schema.claim import Claim
from lbry.schema.url import URL
if typing.TYPE_CHECKING:
from lbry.blob.blob_manager import BlobManager
from lbry.dht.node import Node
from lbry.extras.daemon.Components import UPnPComponent
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet.manager import LbryWalletManager
from lbry.wallet.ledger import MainNetLedger
from lbry.stream.stream_manager import StreamManager
log = logging.getLogger(__name__)
def requires(*components, **conditions):
if conditions and ["conditions"] != list(conditions.keys()):
raise SyntaxError("invalid conditions argument")
condition_names = conditions.get("conditions", [])
def _wrap(fn):
@wraps(fn)
def _inner(*args, **kwargs):
component_manager = args[0].component_manager
for condition_name in condition_names:
condition_result, err_msg = component_manager.evaluate_condition(condition_name)
if not condition_result:
raise ComponentStartConditionNotMet(err_msg)
if not component_manager.all_components_running(*components):
raise ComponentsNotStarted("the following required components have not yet started: "
"%s" % json.dumps(components))
return fn(*args, **kwargs)
return _inner
return _wrap
def deprecated(new_command=None):
def _deprecated_wrapper(f):
f.new_command = new_command
f._deprecated = True
return f
return _deprecated_wrapper
INITIALIZING_CODE = 'initializing'
# TODO: make this consistent with the stages in Downloader.py
DOWNLOAD_METADATA_CODE = 'downloading_metadata'
DOWNLOAD_TIMEOUT_CODE = 'timeout'
DOWNLOAD_RUNNING_CODE = 'running'
DOWNLOAD_STOPPED_CODE = 'stopped'
STREAM_STAGES = [
(INITIALIZING_CODE, 'Initializing'),
(DOWNLOAD_METADATA_CODE, 'Downloading metadata'),
(DOWNLOAD_RUNNING_CODE, 'Started %s, got %s/%s blobs, stream status: %s'),
(DOWNLOAD_STOPPED_CODE, 'Paused stream'),
(DOWNLOAD_TIMEOUT_CODE, 'Stream timed out')
]
CONNECTION_STATUS_CONNECTED = 'connected'
CONNECTION_STATUS_NETWORK = 'network_connection'
CONNECTION_MESSAGES = {
CONNECTION_STATUS_CONNECTED: 'No connection problems detected',
CONNECTION_STATUS_NETWORK: "Your internet connection appears to have been interrupted",
}
SHORT_ID_LEN = 20
MAX_UPDATE_FEE_ESTIMATE = 0.3
def encode_pagination_doc(items):
return {
"page": "Page number of the current items.",
"page_size": "Number of items to show on a page.",
"total_pages": "Total number of pages.",
"total_items": "Total number of items.",
"items": [items],
}
async def maybe_paginate(get_records: Callable, get_record_count: Callable,
page: Optional[int], page_size: Optional[int], **constraints):
if None not in (page, page_size):
constraints.update({
"offset": page_size * (page - 1),
"limit": page_size
})
total_items = await get_record_count(**constraints)
return {
"items": await get_records(**constraints),
"total_pages": int((total_items + (page_size - 1)) / page_size),
"total_items": total_items,
"page": page, "page_size": page_size
}
return await get_records(**constraints)
def sort_claim_results(claims):
claims.sort(key=lambda d: (d['height'], d['name'], d['claim_id'], d['txid'], d['nout']))
DHT_HAS_CONTACTS = "dht_has_contacts"
WALLET_IS_UNLOCKED = "wallet_is_unlocked"
class DHTHasContacts(RequiredCondition):
name = DHT_HAS_CONTACTS
component = DHT_COMPONENT
message = "your node is not connected to the dht"
@staticmethod
def evaluate(component):
return len(component.contacts) > 0
class WalletIsUnlocked(RequiredCondition):
name = WALLET_IS_UNLOCKED
component = WALLET_COMPONENT
message = "your wallet is locked"
@staticmethod
def evaluate(component):
return not component.check_locked()
class JSONRPCError:
# http://www.jsonrpc.org/specification#error_object
CODE_PARSE_ERROR = -32700 # Invalid JSON. Error while parsing the JSON text.
CODE_INVALID_REQUEST = -32600 # The JSON sent is not a valid Request object.
CODE_METHOD_NOT_FOUND = -32601 # The method does not exist / is not available.
CODE_INVALID_PARAMS = -32602 # Invalid method parameter(s).
CODE_INTERNAL_ERROR = -32603 # Internal JSON-RPC error (I think this is like a 500?)
CODE_APPLICATION_ERROR = -32500 # Generic error with our app??
CODE_AUTHENTICATION_ERROR = -32501 # Authentication failed
MESSAGES = {
CODE_PARSE_ERROR: "Parse Error. Data is not valid JSON.",
CODE_INVALID_REQUEST: "JSON data is not a valid Request",
CODE_METHOD_NOT_FOUND: "Method Not Found",
CODE_INVALID_PARAMS: "Invalid Params",
CODE_INTERNAL_ERROR: "Internal Error",
CODE_AUTHENTICATION_ERROR: "Authentication Failed",
}
HTTP_CODES = {
CODE_INVALID_REQUEST: 400,
CODE_PARSE_ERROR: 400,
CODE_INVALID_PARAMS: 400,
CODE_METHOD_NOT_FOUND: 404,
CODE_INTERNAL_ERROR: 500,
CODE_APPLICATION_ERROR: 500,
CODE_AUTHENTICATION_ERROR: 401,
}
def __init__(self, message, code=CODE_APPLICATION_ERROR, traceback=None, data=None):
assert isinstance(code, int), "'code' must be an int"
assert (data is None or isinstance(data, dict)), "'data' must be None or a dict"
self.code = code
if message is None:
message = self.MESSAGES[code] if code in self.MESSAGES else "API Error"
self.message = message
self.data = {} if data is None else data
self.traceback = []
if traceback is not None:
self.traceback = trace_lines = traceback.split("\n")
for i, t in enumerate(trace_lines):
if "--- <exception caught here> ---" in t:
if len(trace_lines) > i + 1:
self.traceback = [j for j in trace_lines[i + 1:] if j]
break
def to_dict(self):
return {
'code': self.code,
'message': self.message,
'data': self.traceback
}
@classmethod
def create_from_exception(cls, message, code=CODE_APPLICATION_ERROR, traceback=None):
return cls(message, code=code, traceback=traceback)
class UnknownAPIMethodError(Exception):
pass
def jsonrpc_dumps_pretty(obj, **kwargs):
if isinstance(obj, JSONRPCError):
data = {"jsonrpc": "2.0", "error": obj.to_dict()}
else:
data = {"jsonrpc": "2.0", "result": obj}
return json.dumps(data, cls=JSONResponseEncoder, sort_keys=True, indent=2, **kwargs) + "\n"
def trap(err, *to_trap):
err.trap(*to_trap)
class JSONRPCServerType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
klass.callable_methods = {}
klass.deprecated_methods = {}
for methodname in dir(klass):
if methodname.startswith("jsonrpc_"):
method = getattr(klass, methodname)
if not hasattr(method, '_deprecated'):
klass.callable_methods.update({methodname.split("jsonrpc_")[1]: method})
else:
klass.deprecated_methods.update({methodname.split("jsonrpc_")[1]: method})
return klass
class Daemon(metaclass=JSONRPCServerType):
"""
LBRYnet daemon, a jsonrpc interface to lbry functions
"""
def __init__(self, conf: Config, component_manager: typing.Optional[ComponentManager] = None):
self.conf = conf
self._node_id = None
self._installation_id = None
self.session_id = base58.b58encode(utils.generate_id()).decode()
self.analytics_manager = analytics.AnalyticsManager(conf, self.installation_id, self.session_id)
self.component_manager = component_manager or ComponentManager(
conf, analytics_manager=self.analytics_manager,
skip_components=conf.components_to_skip or []
)
self.component_startup_task = None
self._connection_status: typing.Tuple[float, bool] = [self.component_manager.loop.time(), False]
logging.getLogger('aiohttp.access').setLevel(logging.WARN)
rpc_app = web.Application()
rpc_app.router.add_get('/lbryapi', self.handle_old_jsonrpc)
rpc_app.router.add_post('/lbryapi', self.handle_old_jsonrpc)
rpc_app.router.add_post('/', self.handle_old_jsonrpc)
self.rpc_runner = web.AppRunner(rpc_app)
streaming_app = web.Application()
streaming_app.router.add_get('/get/{claim_name}', self.handle_stream_get_request)
streaming_app.router.add_get('/get/{claim_name}/{claim_id}', self.handle_stream_get_request)
streaming_app.router.add_get('/stream/{sd_hash}', self.handle_stream_range_request)
self.streaming_runner = web.AppRunner(streaming_app)
@property
def dht_node(self) -> typing.Optional['Node']:
return self.component_manager.get_component(DHT_COMPONENT)
@property
def wallet_manager(self) -> typing.Optional['LbryWalletManager']:
return self.component_manager.get_component(WALLET_COMPONENT)
@property
def storage(self) -> typing.Optional['SQLiteStorage']:
return self.component_manager.get_component(DATABASE_COMPONENT)
@property
def stream_manager(self) -> typing.Optional['StreamManager']:
return self.component_manager.get_component(STREAM_MANAGER_COMPONENT)
@property
def exchange_rate_manager(self) -> typing.Optional['ExchangeRateManager']:
return self.component_manager.get_component(EXCHANGE_RATE_MANAGER_COMPONENT)
@property
def blob_manager(self) -> typing.Optional['BlobManager']:
return self.component_manager.get_component(BLOB_COMPONENT)
@property
def upnp(self) -> typing.Optional['UPnPComponent']:
return self.component_manager.get_component(UPNP_COMPONENT)
@classmethod
def get_api_definitions(cls):
prefix = 'jsonrpc_'
not_grouped = ['routing_table_get']
api = {
'groups': {
group_name[:-len('_DOC')].lower(): getattr(cls, group_name).strip()
for group_name in dir(cls) if group_name.endswith('_DOC')
},
'commands': {}
}
for jsonrpc_method in dir(cls):
if jsonrpc_method.startswith(prefix):
full_name = jsonrpc_method[len(prefix):]
method = getattr(cls, jsonrpc_method)
if full_name in not_grouped:
name_parts = [full_name]
else:
name_parts = full_name.split('_', 1)
if len(name_parts) == 1:
group = None
name, = name_parts
elif len(name_parts) == 2:
group, name = name_parts
assert group in api['groups'], \
f"Group {group} does not have doc string for command {full_name}."
else:
raise NameError(f'Could not parse method name: {jsonrpc_method}')
api['commands'][full_name] = {
'api_method_name': full_name,
'name': name,
'group': group,
'doc': method.__doc__,
'method': method,
}
if hasattr(method, '_deprecated'):
api['commands'][full_name]['replaced_by'] = method.new_command
for command in api['commands'].values():
if 'replaced_by' in command:
command['replaced_by'] = api['commands'][command['replaced_by']]
return api
@property
def db_revision_file_path(self):
return os.path.join(self.conf.data_dir, 'db_revision')
@property
def installation_id(self):
install_id_filename = os.path.join(self.conf.data_dir, "install_id")
if not self._installation_id:
if os.path.isfile(install_id_filename):
with open(install_id_filename, "r") as install_id_file:
self._installation_id = str(install_id_file.read()).strip()
if not self._installation_id:
self._installation_id = base58.b58encode(utils.generate_id()).decode()
with open(install_id_filename, "w") as install_id_file:
install_id_file.write(self._installation_id)
return self._installation_id
def ensure_data_dir(self):
if not os.path.isdir(self.conf.data_dir):
os.makedirs(self.conf.data_dir)
if not os.path.isdir(os.path.join(self.conf.data_dir, "blobfiles")):
os.makedirs(os.path.join(self.conf.data_dir, "blobfiles"))
return self.conf.data_dir
def ensure_wallet_dir(self):
if not os.path.isdir(self.conf.wallet_dir):
os.makedirs(self.conf.wallet_dir)
def ensure_download_dir(self):
if not os.path.isdir(self.conf.download_dir):
os.makedirs(self.conf.download_dir)
async def update_connection_status(self):
connected = await utils.async_check_connection()
if connected and not self._connection_status[1]:
log.info("detected internet connection is working")
elif not connected and self._connection_status[1]:
log.warning("detected internet connection was lost")
self._connection_status = (self.component_manager.loop.time(), connected)
async def get_connection_status(self) -> str:
if self._connection_status[0] + 300 > self.component_manager.loop.time():
if not self._connection_status[1]:
await self.update_connection_status()
else:
await self.update_connection_status()
return CONNECTION_STATUS_CONNECTED if self._connection_status[1] else CONNECTION_STATUS_NETWORK
async def start(self):
log.info("Starting LBRYNet Daemon")
log.debug("Settings: %s", json.dumps(self.conf.settings_dict, indent=2))
log.info("Platform: %s", json.dumps(system_info.get_platform(), indent=2))
await self.analytics_manager.send_server_startup()
await self.rpc_runner.setup()
await self.streaming_runner.setup()
try:
rpc_site = web.TCPSite(self.rpc_runner, self.conf.api_host, self.conf.api_port, shutdown_timeout=.5)
await rpc_site.start()
log.info('RPC server listening on TCP %s:%i', *rpc_site._server.sockets[0].getsockname()[:2])
except OSError as e:
log.error('RPC server failed to bind TCP %s:%i', self.conf.api_host, self.conf.api_port)
await self.analytics_manager.send_server_startup_error(str(e))
raise SystemExit()
try:
streaming_site = web.TCPSite(self.streaming_runner, self.conf.streaming_host, self.conf.streaming_port,
shutdown_timeout=.5)
await streaming_site.start()
log.info('media server listening on TCP %s:%i', *streaming_site._server.sockets[0].getsockname()[:2])
except OSError as e:
log.error('media server failed to bind TCP %s:%i', self.conf.streaming_host, self.conf.streaming_port)
await self.analytics_manager.send_server_startup_error(str(e))
raise SystemExit()
try:
await self.initialize()
except asyncio.CancelledError:
log.info("shutting down before finished starting")
await self.analytics_manager.send_server_startup_error("shutting down before finished starting")
raise
except Exception as e:
await self.analytics_manager.send_server_startup_error(str(e))
log.exception('Failed to start lbrynet')
raise SystemExit()
await self.analytics_manager.send_server_startup_success()
async def initialize(self):
self.ensure_data_dir()
self.ensure_wallet_dir()
self.ensure_download_dir()
if not self.analytics_manager.is_started:
await self.analytics_manager.start()
self.component_startup_task = asyncio.create_task(self.component_manager.start())
await self.component_startup_task
async def stop(self, shutdown_runner=True):
if self.component_startup_task is not None:
if self.component_startup_task.done():
await self.component_manager.stop()
else:
self.component_startup_task.cancel()
log.info("stopped api components")
if shutdown_runner:
await self.rpc_runner.shutdown()
await self.streaming_runner.shutdown()
await self.rpc_runner.cleanup()
await self.streaming_runner.cleanup()
log.info("stopped api server")
if self.analytics_manager.is_started:
self.analytics_manager.stop()
log.info("finished shutting down")
async def handle_old_jsonrpc(self, request):
data = await request.json()
include_protobuf = data.get('params', {}).pop('include_protobuf', False)
result = await self._process_rpc_call(data)
ledger = None
if 'wallet' in self.component_manager.get_components_status():
# self.ledger only available if wallet component is not skipped
ledger = self.ledger
try:
encoded_result = jsonrpc_dumps_pretty(
result, ledger=ledger, include_protobuf=include_protobuf)
except:
log.exception('Failed to encode JSON RPC result:')
encoded_result = jsonrpc_dumps_pretty(JSONRPCError(
'After successfully executing the command, failed to encode result for JSON RPC response.',
JSONRPCError.CODE_APPLICATION_ERROR, format_exc()
), ledger=ledger)
return web.Response(
text=encoded_result,
content_type='application/json'
)
async def handle_stream_get_request(self, request: web.Request):
if not self.conf.streaming_get:
log.warning("streaming_get is disabled, rejecting request")
raise web.HTTPForbidden()
name_and_claim_id = request.path.split("/get/")[1]
if "/" not in name_and_claim_id:
uri = f"lbry://{name_and_claim_id}"
else:
name, claim_id = name_and_claim_id.split("/")
uri = f"lbry://{name}#{claim_id}"
if not self.stream_manager.started.is_set():
await self.stream_manager.started.wait()
stream = await self.jsonrpc_get(uri)
if isinstance(stream, dict):
raise web.HTTPServerError(text=stream['error'])
raise web.HTTPFound(f"/stream/{stream.sd_hash}")
async def handle_stream_range_request(self, request: web.Request):
try:
return await self._handle_stream_range_request(request)
except web.HTTPException as err:
log.warning("http code during /stream range request: %s", err)
raise err
except asyncio.CancelledError:
# if not excepted here, it would bubble up the error to the console. every time you closed
# a running tab, you'd get this error in the console
log.debug("/stream range request cancelled")
except Exception:
log.exception("error handling /stream range request")
raise
finally:
log.debug("finished handling /stream range request")
async def _handle_stream_range_request(self, request: web.Request):
sd_hash = request.path.split("/stream/")[1]
if not self.stream_manager.started.is_set():
await self.stream_manager.started.wait()
if sd_hash not in self.stream_manager.streams:
return web.HTTPNotFound()
return await self.stream_manager.stream_partial_content(request, sd_hash)
async def _process_rpc_call(self, data):
args = data.get('params', {})
try:
function_name = data['method']
except KeyError:
return JSONRPCError(
"Missing 'method' value in request.", JSONRPCError.CODE_METHOD_NOT_FOUND
)
try:
fn = self._get_jsonrpc_method(function_name)
except UnknownAPIMethodError:
return JSONRPCError(
f"Invalid method requested: {function_name}.", JSONRPCError.CODE_METHOD_NOT_FOUND
)
if args in ([{}], []):
_args, _kwargs = (), {}
elif isinstance(args, dict):
_args, _kwargs = (), args
elif len(args) == 1 and isinstance(args[0], dict):
# TODO: this is for backwards compatibility. Remove this once API and UI are updated
# TODO: also delete EMPTY_PARAMS then
_args, _kwargs = (), args[0]
elif len(args) == 2 and isinstance(args[0], list) and isinstance(args[1], dict):
_args, _kwargs = args
else:
return JSONRPCError(
f"Invalid parameters format.", JSONRPCError.CODE_INVALID_PARAMS
)
params_error, erroneous_params = self._check_params(fn, _args, _kwargs)
if params_error is not None:
params_error_message = '{} for {} command: {}'.format(
params_error, function_name, ', '.join(erroneous_params)
)
log.warning(params_error_message)
return JSONRPCError(
params_error_message, JSONRPCError.CODE_INVALID_PARAMS
)
try:
result = fn(self, *_args, **_kwargs)
if asyncio.iscoroutine(result):
result = await result
return result
except asyncio.CancelledError:
log.info("cancelled API call for: %s", function_name)
raise
except Exception as e: # pylint: disable=broad-except
log.exception("error handling api request")
return JSONRPCError(
f"Error calling {function_name} with args {args}\n" + str(e),
JSONRPCError.CODE_APPLICATION_ERROR,
format_exc()
)
def _verify_method_is_callable(self, function_path):
if function_path not in self.callable_methods:
raise UnknownAPIMethodError(function_path)
def _get_jsonrpc_method(self, function_path):
if function_path in self.deprecated_methods:
new_command = self.deprecated_methods[function_path].new_command
log.warning('API function \"%s\" is deprecated, please update to use \"%s\"',
function_path, new_command)
function_path = new_command
self._verify_method_is_callable(function_path)
return self.callable_methods.get(function_path)
@staticmethod
def _check_params(function, args_tup, args_dict):
argspec = inspect.getfullargspec(undecorated(function))
num_optional_params = 0 if argspec.defaults is None else len(argspec.defaults)
duplicate_params = [
duplicate_param
for duplicate_param in argspec.args[1:len(args_tup) + 1]
if duplicate_param in args_dict
]
if duplicate_params:
return 'Duplicate parameters', duplicate_params
missing_required_params = [
required_param
for required_param in argspec.args[len(args_tup) + 1:-num_optional_params]
if required_param not in args_dict
]
if len(missing_required_params):
return 'Missing required parameters', missing_required_params
extraneous_params = [] if argspec.varkw is not None else [
extra_param
for extra_param in args_dict
if extra_param not in argspec.args[1:]
]
if len(extraneous_params):
return 'Extraneous parameters', extraneous_params
return None, None
@property
def ledger(self) -> Optional['MainNetLedger']:
try:
return self.wallet_manager.default_account.ledger
except AttributeError:
return None
async def get_est_cost_from_uri(self, uri: str) -> typing.Optional[float]:
"""
Resolve a name and return the estimated stream cost
"""
resolved = await self.resolve(uri)
if resolved:
claim_response = resolved[uri]
else:
claim_response = None
if claim_response and 'claim' in claim_response:
if 'value' in claim_response['claim'] and claim_response['claim']['value'] is not None:
claim_value = Claim.from_bytes(claim_response['claim']['value'])
if not claim_value.stream.has_fee:
return 0.0
return round(
self.exchange_rate_manager.convert_currency(
claim_value.stream.fee.currency, "LBC", claim_value.stream.fee.amount
), 5
)
else:
log.warning("Failed to estimate cost for %s", uri)
############################################################################
# #
# JSON-RPC API methods start here #
# #
############################################################################
def jsonrpc_stop(self):
"""
Stop lbrynet API server.
Usage:
stop
Options:
None
Returns:
(string) Shutdown message
"""
def shutdown():
raise web.GracefulExit()
log.info("Shutting down lbrynet daemon")
asyncio.get_event_loop().call_later(0, shutdown)
return "Shutting down"
async def jsonrpc_status(self):
"""
Get daemon status
Usage:
status
Options:
None
Returns:
(dict) lbrynet-daemon status
{
'installation_id': (str) installation id - base58,
'is_running': (bool),
'skipped_components': (list) [names of skipped components (str)],
'startup_status': { Does not include components which have been skipped
'blob_manager': (bool),
'blockchain_headers': (bool),
'database': (bool),
'dht': (bool),
'exchange_rate_manager': (bool),
'hash_announcer': (bool),
'peer_protocol_server': (bool),
'stream_manager': (bool),
'upnp': (bool),
'wallet': (bool),
},
'connection_status': {
'code': (str) connection status code,
'message': (str) connection status message
},
'blockchain_headers': {
'downloading_headers': (bool),
'download_progress': (float) 0-100.0
},
'wallet': {
'blocks': (int) local blockchain height,
'blocks_behind': (int) remote_height - local_height,
'best_blockhash': (str) block hash of most recent block,
'is_encrypted': (bool),
'is_locked': (bool),
'connected_servers': (list) [
{
'host': (str) server hostname,
'port': (int) server port,
'latency': (int) milliseconds
}
],
},
'dht': {
'node_id': (str) lbry dht node id - hex encoded,
'peers_in_routing_table': (int) the number of peers in the routing table,
},
'blob_manager': {
'finished_blobs': (int) number of finished blobs in the blob manager,
'connections': {
'incoming_bps': {
<source ip and tcp port>: (int) bytes per second received,
},
'outgoing_bps': {
<destination ip and tcp port>: (int) bytes per second sent,
},
'total_outgoing_mps': (float) megabytes per second sent,
'total_incoming_mps': (float) megabytes per second received,
'time': (float) timestamp
}
},
'hash_announcer': {
'announce_queue_size': (int) number of blobs currently queued to be announced
},
'stream_manager': {
'managed_files': (int) count of files in the stream manager,
},
'upnp': {
'aioupnp_version': (str),
'redirects': {
<TCP | UDP>: (int) external_port,
},
'gateway': (str) manufacturer and model,
'dht_redirect_set': (bool),
'peer_redirect_set': (bool),
'external_ip': (str) external ip address,
}
}
"""
connection_code = await self.get_connection_status()
response = {
'installation_id': self.installation_id,
'is_running': all(self.component_manager.get_components_status().values()),
'skipped_components': self.component_manager.skip_components,
'startup_status': self.component_manager.get_components_status(),
'connection_status': {
'code': connection_code,
'message': CONNECTION_MESSAGES[connection_code],
},
}
for component in self.component_manager.components:
status = await component.get_status()
if status:
response[component.component_name] = status
return response
def jsonrpc_version(self):
"""
Get lbrynet API server version information
Usage:
version
Options:
None
Returns:
(dict) Dictionary of lbry version information
{
'processor': (str) processor type,
'python_version': (str) python version,
'platform': (str) platform string,
'os_release': (str) os release string,
'os_system': (str) os name,
'lbrynet_version': (str) lbrynet version,
'torba_version': (str) torba version,
'build': (str) "dev" | "qa" | "rc" | "release",
}
"""
platform_info = system_info.get_platform()
log.info("Get version info: " + json.dumps(platform_info))
return platform_info
@requires(WALLET_COMPONENT)
async def jsonrpc_resolve(self, urls: typing.Union[str, list]):
"""
Get the claim that a URL refers to.
Usage:
resolve <urls>...
Options:
--urls=<urls> : (str, list) one or more urls to resolve
Returns:
Dictionary of results, keyed by url
'<url>': {
If a resolution error occurs:
'error': Error message
If the url resolves to a channel or a claim in a channel:
'certificate': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number (or -1 if unknown),
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'confirmations': (int) claim depth,
'timestamp': (int) timestamp of the block that included this claim tx,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'permanent_url': (str) permanent url of the certificate claim,
'supports: (list) list of supports [{'txid': (str) txid,
'nout': (int) nout,
'amount': (float) amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
If the url resolves to a channel:
'claims_in_channel': (int) number of claims in the channel,
If the url resolves to a claim:
'claim': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number (or -1 if unknown),
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'permanent_url': (str) permanent url of the claim,
'channel_name': (str) channel name if claim is in a channel
'supports: (list) list of supports [{'txid': (str) txid,
'nout': (int) nout,
'amount': (float) amount}]
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
}
"""
if isinstance(urls, str):
urls = [urls]
results = {}
valid_urls = set()
for u in urls:
try:
URL.parse(u)
valid_urls.add(u)
except ValueError:
results[u] = {"error": f"{u} is not a valid url"}
resolved = await self.resolve(list(valid_urls))
for resolved_uri in resolved:
results[resolved_uri] = resolved[resolved_uri] if resolved[resolved_uri] is not None else \
{"error": f"{resolved_uri} did not resolve to a claim"}
return results
@requires(WALLET_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT,
STREAM_MANAGER_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_get(self, uri, file_name=None, download_directory=None, timeout=None, save_file=None):
"""
Download stream from a LBRY name.
Usage:
get <uri> [<file_name> | --file_name=<file_name>]
[<download_directory> | --download_directory=<download_directory>] [<timeout> | --timeout=<timeout>]
[--save_file=<save_file>]
Options:
--uri=<uri> : (str) uri of the content to download
--file_name=<file_name> : (str) specified name for the downloaded file, overrides the stream file name
--download_directory=<download_directory> : (str) full path to the directory to download into
--timeout=<timeout> : (int) download timeout in number of seconds
--save_file=<save_file> : (bool) save the file to the downloads directory
Returns: {File}
"""
if download_directory and not os.path.isdir(download_directory):
return {"error": f"specified download directory \"{download_directory}\" does not exist"}
try:
stream = await self.stream_manager.download_stream_from_uri(
uri, self.exchange_rate_manager, timeout, file_name, download_directory, save_file=save_file
)
if not stream:
raise DownloadSDTimeout(uri)
except Exception as e:
log.warning("Error downloading %s: %s", uri, str(e))
return {"error": str(e)}
return stream
SETTINGS_DOC = """
Settings management.
"""
def jsonrpc_settings_get(self):
"""
Get daemon settings
Usage:
settings_get
Options:
None
Returns:
(dict) Dictionary of daemon settings
See ADJUSTABLE_SETTINGS in lbry/conf.py for full list of settings
"""
return self.conf.settings_dict
def jsonrpc_settings_set(self, key, value):
"""
Set daemon settings
Usage:
settings_set (<key>) (<value>)
Options:
None
Returns:
(dict) Updated dictionary of daemon settings
"""
with self.conf.update_config() as c:
attr: Setting = getattr(type(c), key)
cleaned = attr.deserialize(value)
setattr(c, key, cleaned)
return {key: cleaned}
PREFERENCE_DOC = """
Preferences management.
"""
def jsonrpc_preference_get(self, key=None, account_id=None, wallet_id=None):
"""
Get preference value for key or all values if not key is passed in.
Usage:
preference_get [<key>] [--wallet_id=<wallet_id>]
Options:
--key=<key> : (str) key associated with value
--account_id=<account_id> : (str) id of the account containing value
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(dict) Dictionary of preference(s)
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
if key:
if key in account.preferences:
return {key: account.preferences[key]}
return
return account.preferences
def jsonrpc_preference_set(self, key, value, account_id=None, wallet_id=None):
"""
Set preferences
Usage:
preference_set (<key>) (<value>) [--wallet_id=<wallet_id>]
Options:
--key=<key> : (str) key associated with value
--value=<key> : (str) key associated with value
--account_id=<account_id> : (str) id of the account containing value
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(dict) Dictionary with key/value of new preference
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
if value and isinstance(value, str) and value[0] in ('[', '{'):
value = json.loads(value)
account.preferences[key] = value
account.modified_on = time.time()
wallet.save()
return {key: value}
WALLET_DOC = """
Create, modify and inspect wallets.
"""
@requires("wallet")
def jsonrpc_wallet_list(self, wallet_id=None):
"""
List wallets.
Usage:
wallet_list [--wallet_id=<wallet_id>]
Options:
--wallet_id=<wallet_id> : (str) show specific wallet only
Returns: {List[Wallet]}
"""
if wallet_id:
return [self.wallet_manager.get_wallet_or_error(wallet_id)]
return self.wallet_manager.wallets
@requires("wallet")
async def jsonrpc_wallet_create(
self, wallet_id, skip_on_startup=False, create_account=False, single_key=False):
"""
Create a new wallet.
Usage:
wallet_create (<wallet_id> | --wallet_id=<wallet_id>) [--skip_on_startup]
[--create_account] [--single_key]
Options:
--wallet_id=<wallet_id> : (str) wallet file name
--skip_on_startup : (bool) don't add wallet to daemon_settings.yml
--create_account : (bool) generates the default account
--single_key : (bool) used with --create_account, creates single-key account
Returns: {Wallet}
"""
wallet_path = os.path.join(self.conf.wallet_dir, 'wallets', wallet_id)
for wallet in self.wallet_manager.wallets:
if wallet.id == wallet_id:
raise Exception(f"Wallet at path '{wallet_path}' already exists and is loaded.")
if os.path.exists(wallet_path):
raise Exception(f"Wallet at path '{wallet_path}' already exists, use 'wallet_add' to load wallet.")
wallet = self.wallet_manager.import_wallet(wallet_path)
if not wallet.accounts and create_account:
account = LBCAccount.generate(
self.ledger, wallet, address_generator={
'name': SingleKey.name if single_key else HierarchicalDeterministic.name
}
)
if self.ledger.network.is_connected:
await self.ledger.subscribe_account(account)
wallet.save()
if not skip_on_startup:
with self.conf.update_config() as c:
c.wallets += [wallet_id]
return wallet
@requires("wallet")
async def jsonrpc_wallet_add(self, wallet_id):
"""
Add existing wallet.
Usage:
wallet_add (<wallet_id> | --wallet_id=<wallet_id>)
Options:
--wallet_id=<wallet_id> : (str) wallet file name
Returns: {Wallet}
"""
wallet_path = os.path.join(self.conf.wallet_dir, 'wallets', wallet_id)
for wallet in self.wallet_manager.wallets:
if wallet.id == wallet_id:
raise Exception(f"Wallet at path '{wallet_path}' is already loaded.")
if not os.path.exists(wallet_path):
raise Exception(f"Wallet at path '{wallet_path}' was not found.")
return self.wallet_manager.import_wallet(wallet_path)
@requires("wallet")
def jsonrpc_wallet_remove(self, wallet_id):
"""
Remove an existing wallet.
Usage:
wallet_remove (<wallet_id> | --wallet_id=<wallet_id>)
Options:
--wallet_id=<wallet_id> : (str) name of wallet to remove
Returns: {Wallet}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
self.wallet_manager.wallets.remove(wallet)
return wallet
ACCOUNT_DOC = """
Create, modify and inspect wallet accounts.
"""
@requires("wallet")
def jsonrpc_account_list(self, account_id=None, wallet_id=None, confirmations=0,
include_claims=False, show_seed=False):
"""
List details of all of the accounts or a specific account.
Usage:
account_list [<account_id>] [--wallet_id=<wallet_id>]
[--confirmations=<confirmations>]
[--include_claims] [--show_seed]
Options:
--account_id=<account_id> : (str) If provided only the balance for this
account will be given
--wallet_id=<wallet_id> : (str) accounts in specific wallet
--confirmations=<confirmations> : (int) required confirmations (default: 0)
--include_claims : (bool) include claims, requires than a
LBC account is specified (default: false)
--show_seed : (bool) show the seed for the account
Returns: {List[Account]}
"""
kwargs = {
'confirmations': confirmations,
'show_seed': show_seed
}
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
return wallet.get_account_or_error(account_id).get_details(**kwargs)
else:
return wallet.get_detailed_accounts(**kwargs)
@requires("wallet")
async def jsonrpc_account_balance(self, account_id=None, wallet_id=None, confirmations=0, reserved_subtotals=False):
"""
Return the balance of an account
Usage:
account_balance [<account_id>] [<address> | --address=<address>] [--wallet_id=<wallet_id>]
[<confirmations> | --confirmations=<confirmations>] [--reserved_subtotals]
Options:
--account_id=<account_id> : (str) If provided only the balance for this
account will be given. Otherwise default account.
--wallet_id=<wallet_id> : (str) balance for specific wallet
--confirmations=<confirmations> : (int) Only include transactions with this many
confirmed blocks.
--reserved_subtotals : (bool) Include detailed reserved balances on
claims, tips and supports.
Returns:
(decimal) amount of lbry credits in wallet
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
return await account.get_granular_balances(confirmations=confirmations, reserved_subtotals=reserved_subtotals)
@requires("wallet")
async def jsonrpc_account_add(
self, account_name, wallet_id=None, single_key=False,
seed=None, private_key=None, public_key=None):
"""
Add a previously created account from a seed, private key or public key (read-only).
Specify --single_key for single address or vanity address accounts.
Usage:
account_add (<account_name> | --account_name=<account_name>)
(--seed=<seed> | --private_key=<private_key> | --public_key=<public_key>)
[--single_key] [--wallet_id=<wallet_id>]
Options:
--account_name=<account_name> : (str) name of the account to add
--seed=<seed> : (str) seed to generate new account from
--private_key=<private_key> : (str) private key for new account
--public_key=<public_key> : (str) public key for new account
--single_key : (bool) create single key account, default is multi-key
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns: {Account}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = LBCAccount.from_dict(
self.ledger, wallet, {
'name': account_name,
'seed': seed,
'private_key': private_key,
'public_key': public_key,
'address_generator': {
'name': SingleKey.name if single_key else HierarchicalDeterministic.name
}
}
)
wallet.save()
if self.ledger.network.is_connected:
await self.ledger.subscribe_account(account)
return account
@requires("wallet")
async def jsonrpc_account_create(self, account_name, single_key=False, wallet_id=None):
"""
Create a new account. Specify --single_key if you want to use
the same address for all transactions (not recommended).
Usage:
account_create (<account_name> | --account_name=<account_name>)
[--single_key] [--wallet_id=<wallet_id>]
Options:
--account_name=<account_name> : (str) name of the account to create
--single_key : (bool) create single key account, default is multi-key
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns: {Account}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = LBCAccount.generate(
self.ledger, wallet, account_name, {
'name': SingleKey.name if single_key else HierarchicalDeterministic.name
}
)
wallet.save()
if self.ledger.network.is_connected:
await self.ledger.subscribe_account(account)
return account
@requires("wallet")
def jsonrpc_account_remove(self, account_id, wallet_id=None):
"""
Remove an existing account.
Usage:
account_remove (<account_id> | --account_id=<account_id>) [--wallet_id=<wallet_id>]
Options:
--account_id=<account_id> : (str) id of the account to remove
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns: {Account}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_error(account_id)
wallet.accounts.remove(account)
wallet.save()
return account
@requires("wallet")
def jsonrpc_account_set(
self, account_id, wallet_id=None, default=False, new_name=None,
change_gap=None, change_max_uses=None, receiving_gap=None, receiving_max_uses=None):
"""
Change various settings on an account.
Usage:
account_set (<account_id> | --account_id=<account_id>) [--wallet_id=<wallet_id>]
[--default] [--new_name=<new_name>]
[--change_gap=<change_gap>] [--change_max_uses=<change_max_uses>]
[--receiving_gap=<receiving_gap>] [--receiving_max_uses=<receiving_max_uses>]
Options:
--account_id=<account_id> : (str) id of the account to change
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--default : (bool) make this account the default
--new_name=<new_name> : (str) new name for the account
--receiving_gap=<receiving_gap> : (int) set the gap for receiving addresses
--receiving_max_uses=<receiving_max_uses> : (int) set the maximum number of times to
use a receiving address
--change_gap=<change_gap> : (int) set the gap for change addresses
--change_max_uses=<change_max_uses> : (int) set the maximum number of times to
use a change address
Returns: {Account}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_error(account_id)
change_made = False
if account.receiving.name == HierarchicalDeterministic.name:
address_changes = {
'change': {'gap': change_gap, 'maximum_uses_per_address': change_max_uses},
'receiving': {'gap': receiving_gap, 'maximum_uses_per_address': receiving_max_uses},
}
for chain_name in address_changes:
chain = getattr(account, chain_name)
for attr, value in address_changes[chain_name].items():
if value is not None:
setattr(chain, attr, value)
change_made = True
if new_name is not None:
account.name = new_name
change_made = True
if default:
wallet.accounts.remove(account)
wallet.accounts.insert(0, account)
change_made = True
if change_made:
account.modified_on = time.time()
wallet.save()
return account
@requires(WALLET_COMPONENT)
def jsonrpc_account_unlock(self, password, account_id=None, wallet_id=None):
"""
Unlock an encrypted account
Usage:
account_unlock (<password> | --password=<password>)
[<account_id> | --account_id=<account_id>]
[--wallet_id=<wallet_id>]
Options:
--password=<password> : (str) password to use for unlocking
--account_id=<account_id> : (str) id for the account to unlock, unlocks default account
if not provided
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(bool) true if account is unlocked, otherwise false
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return self.wallet_manager.unlock_account(
password, wallet.get_account_or_default(account_id)
)
@requires(WALLET_COMPONENT)
def jsonrpc_account_lock(self, account_id=None, wallet_id=None):
"""
Lock an unlocked account
Usage:
account_lock [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
Options:
--account_id=<account_id> : (str) id for the account to lock
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(bool) true if account is locked, otherwise false
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return self.wallet_manager.lock_account(
wallet.get_account_or_default(account_id)
)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_account_decrypt(self, account_id=None, wallet_id=None):
"""
Decrypt an encrypted account, this will remove the wallet password. The account must be unlocked to decrypt it
Usage:
account_decrypt [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
Options:
--account_id=<account_id> : (str) id for the account to decrypt
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(bool) true if wallet is decrypted, otherwise false
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return self.wallet_manager.decrypt_account(
wallet.get_account_or_default(account_id)
)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_account_encrypt(self, new_password, account_id=None, wallet_id=None):
"""
Encrypt an unencrypted account with a password
Usage:
account_encrypt (<new_password> | --new_password=<new_password>)
[<account_id> | --account_id=<account_id>]
[--wallet_id=<wallet_id>]
Options:
--new_password=<new_password> : (str) password to encrypt account
--account_id=<account_id> : (str) id for the account to encrypt, encrypts
default account if not provided
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(bool) true if wallet is decrypted, otherwise false
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return self.wallet_manager.encrypt_account(
new_password, wallet.get_account_or_default(account_id)
)
@requires("wallet")
def jsonrpc_account_max_address_gap(self, account_id, wallet_id=None):
"""
Finds ranges of consecutive addresses that are unused and returns the length
of the longest such range: for change and receiving address chains. This is
useful to figure out ideal values to set for 'receiving_gap' and 'change_gap'
account settings.
Usage:
account_max_address_gap (<account_id> | --account_id=<account_id>)
[--wallet_id=<wallet_id>]
Options:
--account_id=<account_id> : (str) account for which to get max gaps
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(map) maximum gap for change and receiving addresses
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return wallet.get_account_or_error(account_id).get_max_gap()
@requires("wallet")
def jsonrpc_account_fund(self, to_account=None, from_account=None, amount='0.0',
everything=False, outputs=1, broadcast=False, wallet_id=None):
"""
Transfer some amount (or --everything) to an account from another
account (can be the same account). Amounts are interpreted as LBC.
You can also spread the transfer across a number of --outputs (cannot
be used together with --everything).
Usage:
account_fund [<to_account> | --to_account=<to_account>]
[<from_account> | --from_account=<from_account>]
(<amount> | --amount=<amount> | --everything)
[<outputs> | --outputs=<outputs>] [--wallet_id=<wallet_id>]
[--broadcast]
Options:
--to_account=<to_account> : (str) send to this account
--from_account=<from_account> : (str) spend from this account
--amount=<amount> : (str) the amount to transfer lbc
--everything : (bool) transfer everything (excluding claims), default: false.
--outputs=<outputs> : (int) split payment across many outputs, default: 1.
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
--broadcast : (bool) actually broadcast the transaction, default: false.
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
to_account = wallet.get_account_or_default(to_account)
from_account = wallet.get_account_or_default(from_account)
amount = self.get_dewies_or_error('amount', amount) if amount else None
if not isinstance(outputs, int):
raise ValueError("--outputs must be an integer.")
if everything and outputs > 1:
raise ValueError("Using --everything along with --outputs is not supported.")
return from_account.fund(
to_account=to_account, amount=amount, everything=everything,
outputs=outputs, broadcast=broadcast
)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False):
"""
Send the same number of credits to multiple addresses.
Usage:
account_send <amount> <addresses>... [--account_id=<account_id>] [--wallet_id=<wallet_id>] [--preview]
Options:
--account_id=<account_id> : (str) account to fund the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
amount = self.get_dewies_or_error("amount", amount)
if not amount:
raise NullFundsError
if amount < 0:
raise NegativeFundsError()
if addresses and not isinstance(addresses, list):
addresses = [addresses]
outputs = []
for address in addresses:
self.valid_address_or_error(address)
outputs.append(
Output.pay_pubkey_hash(
amount, self.ledger.address_to_hash160(address)
)
)
tx = await Transaction.create(
[], outputs, accounts, account
)
if not preview:
await self.ledger.broadcast(tx)
await self.analytics_manager.send_credits_sent()
else:
await self.ledger.release_tx(tx)
return tx
SYNC_DOC = """
Wallet synchronization.
"""
@requires("wallet")
def jsonrpc_sync_hash(self, wallet_id=None):
"""
Deterministic hash of the wallet.
Usage:
sync_hash [<wallet_id> | --wallet_id=<wallet_id>]
Options:
--wallet_id=<wallet_id> : (str) wallet for which to generate hash
Returns:
(str) sha256 hash of wallet
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return hexlify(wallet.hash).decode()
@requires("wallet")
def jsonrpc_sync_apply(self, password, data=None, encrypt_password=None, wallet_id=None):
"""
Apply incoming synchronization data, if provided, and then produce a sync hash and
an encrypted wallet.
Usage:
sync_apply <password> [--data=<data>] [--encrypt-password=<encrypt_password>]
[--wallet_id=<wallet_id>]
Options:
--password=<password> : (str) password to decrypt incoming and encrypt outgoing data
--data=<data> : (str) incoming sync data, if any
--encrypt-password=<encrypt_password> : (str) password to encrypt outgoing data if different
from the decrypt password, used during password changes
--wallet_id=<wallet_id> : (str) wallet being sync'ed
Returns:
(map) sync hash and data
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if data is not None:
decrypted_data = Wallet.unpack(password, data)
for account_data in decrypted_data['accounts']:
_, _, pubkey = LBCAccount.keys_from_dict(self.ledger, account_data)
account_id = pubkey.address
local_match = None
for local_account in wallet.accounts:
if account_id == local_account.id:
local_match = local_account
break
if local_match is not None:
local_match.apply(account_data)
else:
new_account = LBCAccount.from_dict(self.ledger, wallet, account_data)
if self.ledger.network.is_connected:
asyncio.create_task(self.ledger.subscribe_account(new_account))
wallet.save()
encrypted = wallet.pack(encrypt_password or password)
return {
'hash': self.jsonrpc_sync_hash(wallet_id),
'data': encrypted.decode()
}
ADDRESS_DOC = """
List, generate and verify addresses.
"""
@requires(WALLET_COMPONENT)
async def jsonrpc_address_is_mine(self, address, account_id=None, wallet_id=None):
"""
Checks if an address is associated with the current wallet.
Usage:
address_is_mine (<address> | --address=<address>)
[<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
Options:
--address=<address> : (str) address to check
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(bool) true, if address is associated with current wallet
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
match = await self.ledger.db.get_address(address=address, accounts=[account])
if match is not None:
return True
return False
@requires(WALLET_COMPONENT)
def jsonrpc_address_list(self, address=None, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List account addresses or details of single address.
Usage:
address_list [--address=<address>] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--address=<address> : (str) just show details for single address
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns: {Paginated[Address]}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
constraints = {
'cols': ('address', 'account', 'used_times', 'pubkey', 'chain_code', 'n', 'depth')
}
if address:
constraints['address'] = address
if account_id:
constraints['accounts'] = [wallet.get_account_or_error(account_id)]
else:
constraints['accounts'] = wallet.accounts
return maybe_paginate(
self.ledger.get_addresses,
self.ledger.get_address_count,
page, page_size, **constraints
)
@requires(WALLET_COMPONENT)
def jsonrpc_address_unused(self, account_id=None, wallet_id=None):
"""
Return an address containing no balance, will create
a new address if there is none.
Usage:
address_unused [--account_id=<account_id>] [--wallet_id=<wallet_id>]
Options:
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns: {Address}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return wallet.get_account_or_default(account_id).receiving.get_or_create_usable_address()
FILE_DOC = """
File management.
"""
@requires(STREAM_MANAGER_COMPONENT)
def jsonrpc_file_list(self, sort=None, reverse=False, comparison=None, **kwargs):
"""
List files limited by optional filters
Usage:
file_list [--sd_hash=<sd_hash>] [--file_name=<file_name>] [--stream_hash=<stream_hash>]
[--rowid=<rowid>] [--claim_id=<claim_id>] [--outpoint=<outpoint>] [--txid=<txid>] [--nout=<nout>]
[--channel_claim_id=<channel_claim_id>] [--channel_name=<channel_name>]
[--claim_name=<claim_name>] [--blobs_in_stream=<blobs_in_stream>]
[--blobs_remaining=<blobs_remaining>] [--sort=<sort_by>]
[--comparison=<comparison>] [--full_status=<full_status>] [--reverse]
Options:
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--claim_id=<claim_id> : (str) get file with matching claim id
--outpoint=<outpoint> : (str) get file with matching claim outpoint
--txid=<txid> : (str) get file with matching claim txid
--nout=<nout> : (int) get file with matching claim nout
--channel_claim_id=<channel_claim_id> : (str) get file with matching channel claim id
--channel_name=<channel_name> : (str) get file with matching channel name
--claim_name=<claim_name> : (str) get file with matching claim name
--blobs_in_stream<blobs_in_stream> : (int) get file with matching blobs in stream
--blobs_remaining=<blobs_remaining> : (int) amount of remaining blobs to download
--sort=<sort_by> : (str) field to sort by (one of the above filter fields)
--comparison=<comparison> : (str) logical comparison, (eq | ne | g | ge | l | le)
Returns: {List[File]}
"""
sort = sort or 'rowid'
comparison = comparison or 'eq'
return self.stream_manager.get_filtered_streams(
sort, reverse, comparison, **kwargs
)
@requires(STREAM_MANAGER_COMPONENT)
async def jsonrpc_file_set_status(self, status, **kwargs):
"""
Start or stop downloading a file
Usage:
file_set_status (<status> | --status=<status>) [--sd_hash=<sd_hash>]
[--file_name=<file_name>] [--stream_hash=<stream_hash>] [--rowid=<rowid>]
Options:
--status=<status> : (str) one of "start" or "stop"
--sd_hash=<sd_hash> : (str) set status of file with matching sd hash
--file_name=<file_name> : (str) set status of file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) set status of file with matching stream hash
--rowid=<rowid> : (int) set status of file with matching row id
Returns:
(str) Confirmation message
"""
if status not in ['start', 'stop']:
raise Exception('Status must be "start" or "stop".')
streams = self.stream_manager.get_filtered_streams(**kwargs)
if not streams:
raise Exception(f'Unable to find a file for {kwargs}')
stream = streams[0]
if status == 'start' and not stream.running:
await stream.save_file(node=self.stream_manager.node)
msg = "Resumed download"
elif status == 'stop' and stream.running:
await stream.stop()
msg = "Stopped download"
else:
msg = (
"File was already being downloaded" if status == 'start'
else "File was already stopped"
)
return msg
@requires(STREAM_MANAGER_COMPONENT)
async def jsonrpc_file_delete(self, delete_from_download_dir=False, delete_all=False, **kwargs):
"""
Delete a LBRY file
Usage:
file_delete [--delete_from_download_dir] [--delete_all] [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>] [--claim_id=<claim_id>] [--txid=<txid>]
[--nout=<nout>] [--claim_name=<claim_name>] [--channel_claim_id=<channel_claim_id>]
[--channel_name=<channel_name>]
Options:
--delete_from_download_dir : (bool) delete file from download directory,
instead of just deleting blobs
--delete_all : (bool) if there are multiple matching files,
allow the deletion of multiple files.
Otherwise do not delete anything.
--sd_hash=<sd_hash> : (str) delete by file sd hash
--file_name=<file_name> : (str) delete by file name in downloads folder
--stream_hash=<stream_hash> : (str) delete by file stream hash
--rowid=<rowid> : (int) delete by file row id
--claim_id=<claim_id> : (str) delete by file claim id
--txid=<txid> : (str) delete by file claim txid
--nout=<nout> : (int) delete by file claim nout
--claim_name=<claim_name> : (str) delete by file claim name
--channel_claim_id=<channel_claim_id> : (str) delete by file channel claim id
--channel_name=<channel_name> : (str) delete by file channel claim name
Returns:
(bool) true if deletion was successful
"""
streams = self.stream_manager.get_filtered_streams(**kwargs)
if len(streams) > 1:
if not delete_all:
log.warning("There are %i files to delete, use narrower filters to select one",
len(streams))
return False
else:
log.warning("Deleting %i files",
len(streams))
if not streams:
log.warning("There is no file to delete")
return False
else:
for stream in streams:
message = f"Deleted file {stream.file_name}"
await self.stream_manager.delete_stream(stream, delete_file=delete_from_download_dir)
log.info(message)
result = True
return result
@requires(STREAM_MANAGER_COMPONENT)
async def jsonrpc_file_save(self, file_name=None, download_directory=None, **kwargs):
"""
Start saving a file to disk.
Usage:
file_save [--file_name=<file_name>] [--download_directory=<download_directory>] [--sd_hash=<sd_hash>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>] [--claim_id=<claim_id>] [--txid=<txid>]
[--nout=<nout>] [--claim_name=<claim_name>] [--channel_claim_id=<channel_claim_id>]
[--channel_name=<channel_name>]
Options:
--file_name=<file_name> : (str) file name to save to
--download_directory=<download_directory> : (str) directory to save into
--sd_hash=<sd_hash> : (str) save file with matching sd hash
--stream_hash=<stream_hash> : (str) save file with matching stream hash
--rowid=<rowid> : (int) save file with matching row id
--claim_id=<claim_id> : (str) save file with matching claim id
--txid=<txid> : (str) save file with matching claim txid
--nout=<nout> : (int) save file with matching claim nout
--claim_name=<claim_name> : (str) save file with matching claim name
--channel_claim_id=<channel_claim_id> : (str) save file with matching channel claim id
--channel_name=<channel_name> : (str) save file with matching channel claim name
Returns: {File}
"""
streams = self.stream_manager.get_filtered_streams(**kwargs)
if len(streams) > 1:
log.warning("There are %i matching files, use narrower filters to select one", len(streams))
return False
if not streams:
log.warning("There is no file to save")
return False
stream = streams[0]
await stream.save_file(file_name, download_directory)
return stream
CLAIM_DOC = """
List and search all types of claims.
"""
@requires(WALLET_COMPONENT)
def jsonrpc_claim_list(self, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List my stream and channel claims.
Usage:
claim_list [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns: {Paginated[Output]}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account: LBCAccount = wallet.get_account_or_error(account_id)
claims = account.get_claims
claim_count = account.get_claim_count
else:
claims = partial(self.ledger.get_claims, wallet=wallet, accounts=wallet.accounts)
claim_count = partial(self.ledger.get_claim_count, wallet=wallet, accounts=wallet.accounts)
return maybe_paginate(claims, claim_count, page, page_size)
@requires(WALLET_COMPONENT)
async def jsonrpc_claim_search(self, **kwargs):
"""
Search for stream and channel claims on the blockchain.
Arguments marked with "supports equality constraints" allow prepending the
value with an equality constraint such as '>', '>=', '<' and '<='
eg. --height=">400000" would limit results to only claims above 400k block height.
Usage:
claim_search [<name> | --name=<name>] [--claim_id=<claim_id>] [--txid=<txid>] [--nout=<nout>]
[--channel=<channel> |
[[--channel_ids=<channel_ids>...] [--not_channel_ids=<not_channel_ids>...]]]
[--has_channel_signature] [--valid_channel_signature | --invalid_channel_signature]
[--is_controlling] [--release_time=<release_time>] [--public_key_id=<public_key_id>]
[--timestamp=<timestamp>] [--creation_timestamp=<creation_timestamp>]
[--height=<height>] [--creation_height=<creation_height>]
[--activation_height=<activation_height>] [--expiration_height=<expiration_height>]
[--amount=<amount>] [--effective_amount=<effective_amount>]
[--support_amount=<support_amount>] [--trending_group=<trending_group>]
[--trending_mixed=<trending_mixed>] [--trending_local=<trending_local>]
[--trending_global=<trending_global]
[--claim_type=<claim_type>] [--stream_types=<stream_types>...] [--media_types=<media_types>...]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>]
[--any_tags=<any_tags>...] [--all_tags=<all_tags>...] [--not_tags=<not_tags>...]
[--any_languages=<any_languages>...] [--all_languages=<all_languages>...]
[--not_languages=<not_languages>...]
[--any_locations=<any_locations>...] [--all_locations=<all_locations>...]
[--not_locations=<not_locations>...]
[--order_by=<order_by>...] [--page=<page>] [--page_size=<page_size>]
Options:
--name=<name> : (str) claim name (normalized)
--claim_id=<claim_id> : (str) full or partial claim id
--txid=<txid> : (str) transaction id
--nout=<nout> : (str) position in the transaction
--channel=<channel> : (str) claims signed by this channel (argument is
a URL which automatically gets resolved),
see --channel_ids if you need to filter by
multiple channels at the same time,
includes claims with invalid signatures,
use in conjunction with --valid_channel_signature
--channel_ids=<channel_ids> : (list) claims signed by any of these channels
(arguments must be claim ids of the channels),
includes claims with invalid signatures,
implies --has_channel_signature,
use in conjunction with --valid_channel_signature
--not_channel_ids=<not_channel_ids>: (list) exclude claims signed by any of these channels
(arguments must be claim ids of the channels)
--has_channel_signature : (bool) claims with a channel signature (valid or invalid)
--valid_channel_signature : (bool) claims with a valid channel signature or no signature,
use in conjunction with --has_channel_signature to
only get claims with valid signatures
--invalid_channel_signature : (bool) claims with invalid channel signature or no signature,
use in conjunction with --has_channel_signature to
only get claims with invalid signatures
--is_controlling : (bool) winning claims of their respective name
--public_key_id=<public_key_id> : (str) only return channels having this public key id, this is
the same key as used in the wallet file to map
channel certificate private keys: {'public_key_id': 'private key'}
--height=<height> : (int) last updated block height (supports equality constraints)
--timestamp=<timestamp> : (int) last updated timestamp (supports equality constraints)
--creation_height=<creation_height> : (int) created at block height (supports equality constraints)
--creation_timestamp=<creation_timestamp>: (int) created at timestamp (supports equality constraints)
--activation_height=<activation_height> : (int) height at which claim starts competing for name
(supports equality constraints)
--expiration_height=<expiration_height> : (int) height at which claim will expire
(supports equality constraints)
--release_time=<release_time> : (int) limit to claims self-described as having been
released to the public on or after this UTC
timestamp, when claim does not provide
a release time the publish time is used instead
(supports equality constraints)
--amount=<amount> : (int) limit by claim value (supports equality constraints)
--support_amount=<support_amount>: (int) limit by supports and tips received (supports
equality constraints)
--effective_amount=<effective_amount>: (int) limit by total value (initial claim value plus
all tips and supports received), this amount is
blank until claim has reached activation height
(supports equality constraints)
--trending_group=<trending_group>: (int) group numbers 1 through 4 representing the
trending groups of the content: 4 means
content is trending globally and independently,
3 means content is not trending globally but is
trending independently (locally), 2 means it is
trending globally but not independently and 1
means it's not trending globally or locally
(supports equality constraints)
--trending_mixed=<trending_mixed>: (int) trending amount taken from the global or local
value depending on the trending group:
4 - global value, 3 - local value, 2 - global
value, 1 - local value (supports equality
constraints)
--trending_local=<trending_local>: (int) trending value calculated relative only to
the individual contents past history (supports
equality constraints)
--trending_global=<trending_global>: (int) trending value calculated relative to all
trending content globally (supports
equality constraints)
--claim_type=<claim_type> : (str) filter by 'channel', 'stream' or 'unknown'
--stream_types=<stream_types> : (list) filter by 'video', 'image', 'document', etc
--media_types=<media_types> : (list) filter by 'video/mp4', 'image/png', etc
--fee_currency=<fee_currency> : (string) specify fee currency: LBC, BTC, USD
--fee_amount=<fee_amount> : (decimal) content download fee (supports equality constraints)
--any_tags=<any_tags> : (list) find claims containing any of the tags
--all_tags=<all_tags> : (list) find claims containing every tag
--not_tags=<not_tags> : (list) find claims not containing any of these tags
--any_languages=<any_languages> : (list) find claims containing any of the languages
--all_languages=<all_languages> : (list) find claims containing every language
--not_languages=<not_languages> : (list) find claims not containing any of these languages
--any_locations=<any_locations> : (list) find claims containing any of the locations
--all_locations=<all_locations> : (list) find claims containing every location
--not_locations=<not_locations> : (list) find claims not containing any of these locations
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
--order_by=<order_by> : (list) field to order by, default is descending order, to do an
ascending order prepend ^ to the field name, eg. '^amount'
available fields: 'name', 'height', 'release_time',
'publish_time', 'amount', 'effective_amount',
'support_amount', 'trending_group', 'trending_mixed',
'trending_local', 'trending_global', 'activation_height'
--no_totals : (bool) do not calculate the total number of pages and items in result set
(significant performance boost)
Returns: {Paginated[Output]}
"""
if kwargs.pop('valid_channel_signature', False):
kwargs['signature_valid'] = 1
if kwargs.pop('invalid_channel_signature', False):
kwargs['signature_valid'] = 0
page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', 10)), 50)
kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})
txos, offset, total = await self.ledger.claim_search(**kwargs)
result = {"items": txos, "page": page_num, "page_size": page_size}
if not kwargs.pop('no_totals', False):
result['total_pages'] = int((total + (page_size - 1)) / page_size)
result['total_items'] = total
return result
CHANNEL_DOC = """
Create, update, abandon and list your channel claims.
"""
@deprecated('channel_create')
def jsonrpc_channel_new(self):
""" deprecated """
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_channel_create(
self, name, bid, allow_duplicate_name=False, account_id=None, wallet_id=None,
claim_address=None, funding_account_ids=None, preview=False, blocking=False, **kwargs):
"""
Create a new channel by generating a channel private key and establishing an '@' prefixed claim.
Usage:
channel_create (<name> | --name=<name>) (<bid> | --bid=<bid>)
[--allow_duplicate_name=<allow_duplicate_name>]
[--title=<title>] [--description=<description>] [--email=<email>]
[--website_url=<website_url>] [--featured=<featured>...]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the channel prefixed with '@'
--bid=<bid> : (decimal) amount to back the claim
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new channel even if one already exists with
given name. default: false.
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--email=<email> : (str) email of channel owner
--website_url=<website_url> : (str) website url
--featured=<featured> : (list) claim_ids of featured content in channel
--tags=<tags> : (list) content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations of the channel, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--cover_url=<cover_url> : (str) url of cover image
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the channel is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
self.valid_channel_name_or_error(name)
amount = self.get_dewies_or_error('bid', bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
existing_channels = await self.ledger.get_channels(accounts=wallet.accounts, claim_name=name)
if len(existing_channels) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a channel under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
claim = Claim()
claim.channel.update(**kwargs)
tx = await Transaction.claim_create(
name, claim, amount, claim_address, funding_accounts, funding_accounts[0]
)
txo = tx.outputs[0]
txo.generate_channel_private_key()
if not preview:
await tx.sign(funding_accounts)
account.add_channel_private_key(txo.private_key)
wallet.save()
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims([self._old_get_temp_claim_info(
tx, txo, claim_address, claim, name, dewies_to_lbc(amount)
)])
await self.analytics_manager.send_new_channel()
else:
await account.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_channel_update(
self, claim_id, bid=None, account_id=None, wallet_id=None, claim_address=None,
funding_account_ids=None, new_signing_key=False, preview=False,
blocking=False, replace=False, **kwargs):
"""
Update an existing channel claim.
Usage:
channel_update (<claim_id> | --claim_id=<claim_id>) [<bid> | --bid=<bid>]
[--title=<title>] [--description=<description>] [--email=<email>]
[--website_url=<website_url>]
[--featured=<featured>...] [--clear_featured]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--new_signing_key]
[--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) claim_id of the channel to update
--bid=<bid> : (decimal) amount to back the claim
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--email=<email> : (str) email of channel owner
--website_url=<website_url> : (str) website url
--featured=<featured> : (list) claim_ids of featured content in channel
--clear_featured : (bool) clear existing featured content (prior to adding new ones)
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations of the channel, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--cover_url=<cover_url> : (str) url of cover image
--account_id=<account_id> : (str) account in which to look for channel (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the channel is sent
--new_signing_key : (bool) generate a new signing key, will invalidate all previous publishes
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the channel, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_channels = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_channels) != 1:
account_ids = ', '.join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the channel '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_channels[0]
if not old_txo.claim.is_channel:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a channel."
)
if bid is not None:
amount = self.get_dewies_or_error('bid', bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
if replace:
claim = Claim()
claim.channel.public_key_bytes = old_txo.claim.channel.public_key_bytes
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.channel.update(**kwargs)
tx = await Transaction.claim_update(
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0]
)
new_txo = tx.outputs[0]
if new_signing_key:
new_txo.generate_channel_private_key()
else:
new_txo.private_key = old_txo.private_key
new_txo.script.generate()
if not preview:
await tx.sign(funding_accounts)
account.add_channel_private_key(new_txo.private_key)
wallet.save()
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
)])
await self.analytics_manager.send_new_channel()
else:
await account.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_channel_abandon(
self, claim_id=None, txid=None, nout=None, account_id=None, wallet_id=None,
preview=False, blocking=True):
"""
Abandon one of my channel claims.
Usage:
channel_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, **{'txo.txid': txid, 'txo.position': nout}
)
elif claim_id is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception('Must specify claim_id, or txid and nout')
if not claims:
raise Exception('No claim found for the specified claim_id or txid:nout')
tx = await Transaction.create(
[Input.spend(txo) for txo in claims], [], [account], account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action('abandon')
else:
await account.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT)
def jsonrpc_channel_list(self, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List my channel claims.
Usage:
channel_list [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns: {Paginated[Output]}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account: LBCAccount = wallet.get_account_or_error(account_id)
channels = account.get_channels
channel_count = account.get_channel_count
else:
channels = partial(self.ledger.get_channels, wallet=wallet, accounts=wallet.accounts)
channel_count = partial(self.ledger.get_channel_count, wallet=wallet, accounts=wallet.accounts)
return maybe_paginate(channels, channel_count, page, page_size)
@requires(WALLET_COMPONENT)
async def jsonrpc_channel_export(self, channel_id=None, channel_name=None, account_id=None, wallet_id=None):
"""
Export channel private key.
Usage:
channel_export (<channel_id> | --channel_id=<channel_id> | --channel_name=<channel_name>)
[--account_id=<account_id>...] [--wallet_id=<wallet_id>]
Options:
--channel_id=<channel_id> : (str) claim id of channel to export
--channel_name=<channel_name> : (str) name of channel to export
--account_id=<account_id> : (str) one or more account ids for accounts
to look in for channels, defaults to
all accounts.
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(str) serialized channel private key
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
channel = await self.get_channel_or_error(wallet, account_id, channel_id, channel_name, for_signing=True)
address = channel.get_address(self.ledger)
public_key = await self.ledger.get_public_key_for_address(wallet, address)
if not public_key:
raise Exception("Can't find public key for address holding the channel.")
export = {
'name': channel.claim_name,
'channel_id': channel.claim_id,
'holding_address': address,
'holding_public_key': public_key.extended_key_string(),
'signing_private_key': channel.private_key.to_pem().decode()
}
return base58.b58encode(json.dumps(export, separators=(',', ':')))
@requires(WALLET_COMPONENT)
async def jsonrpc_channel_import(self, channel_data, wallet_id=None):
"""
Import serialized channel private key (to allow signing new streams to the channel)
Usage:
channel_import (<channel_data> | --channel_data=<channel_data>) [--wallet_id=<wallet_id>]
Options:
--channel_data=<channel_data> : (str) serialized channel, as exported by channel export
--wallet_id=<wallet_id> : (str) import into specific wallet
Returns:
(dict) Result dictionary
"""
decoded = base58.b58decode(channel_data)
data = json.loads(decoded)
channel_private_key = ecdsa.SigningKey.from_pem(
data['signing_private_key'], hashfunc=hashlib.sha256
)
public_key_der = channel_private_key.get_verifying_key().to_der()
# check that the holding_address hasn't changed since the export was made
holding_address = data['holding_address']
channels, _, _ = await self.ledger.claim_search(
public_key_id=self.ledger.public_key_to_address(public_key_der)
)
if channels and channels[0].get_address(self.ledger) != holding_address:
holding_address = channels[0].get_address(self.ledger)
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account: LBCAccount = await self.ledger.get_account_for_address(wallet, holding_address)
if account:
# Case 1: channel holding address is in one of the accounts we already have
# simply add the certificate to existing account
pass
else:
# Case 2: channel holding address hasn't changed and thus is in the bundled read-only account
# create a single-address holding account to manage the channel
if holding_address == data['holding_address']:
account = LBCAccount.from_dict(self.ledger, wallet, {
'name': f"Holding Account For Channel {data['name']}",
'public_key': data['holding_public_key'],
'address_generator': {'name': 'single-address'}
})
if self.ledger.network.is_connected:
await self.ledger.subscribe_account(account)
await self.ledger._update_tasks.done.wait()
# Case 3: the holding address has changed and we can't create or find an account for it
else:
raise Exception(
"Channel owning account has changed since the channel was exported and "
"it is not an account to which you have access."
)
account.add_channel_private_key(channel_private_key)
wallet.save()
return f"Added channel signing key for {data['name']}."
STREAM_DOC = """
Create, update, abandon, list and inspect your stream claims.
"""
@requires(WALLET_COMPONENT, STREAM_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_publish(self, name, **kwargs):
"""
Create or replace a stream claim at a given name (use 'stream create/update' for more control).
Usage:
publish (<name> | --name=<name>) [--bid=<bid>] [--file_path=<file_path>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>] [--fee_address=<fee_address>]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
log.info("publishing: name: %s params: %s", name, kwargs)
self.valid_stream_name_or_error(name)
wallet = self.wallet_manager.get_wallet_or_default(kwargs.get('wallet_id'))
account = wallet.get_account_or_default(kwargs.get('account_id'))
claims = await account.get_claims(claim_name=name)
if len(claims) == 0:
if 'bid' not in kwargs:
raise Exception("'bid' is a required argument for new publishes.")
if 'file_path' not in kwargs:
raise Exception("'file_path' is a required argument for new publishes.")
return await self.jsonrpc_stream_create(name, **kwargs)
elif len(claims) == 1:
assert claims[0].claim.is_stream, f"Claim at name '{name}' is not a stream claim."
return await self.jsonrpc_stream_update(claims[0].claim_id, replace=True, **kwargs)
raise Exception(
f"There are {len(claims)} claims for '{name}', please use 'stream update' command "
f"to update a specific stream claim."
)
@requires(WALLET_COMPONENT, STREAM_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_stream_create(
self, name, bid, file_path, allow_duplicate_name=False,
channel_id=None, channel_name=None, channel_account_id=None,
account_id=None, wallet_id=None, claim_address=None, funding_account_ids=None,
preview=False, blocking=False, **kwargs):
"""
Make a new stream claim and announce the associated file to lbrynet.
Usage:
stream_create (<name> | --name=<name>) (<bid> | --bid=<bid>) (<file_path> | --file_path=<file_path>)
[--allow_duplicate_name=<allow_duplicate_name>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>] [--fee_address=<fee_address>]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
self.valid_stream_name_or_error(name)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
channel = await self.get_channel_or_none(wallet, channel_account_id, channel_id, channel_name, for_signing=True)
amount = self.get_dewies_or_error('bid', bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
kwargs['fee_address'] = self.get_fee_address(kwargs, claim_address)
claims = await account.get_claims(claim_name=name)
if len(claims) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a stream claim published under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
claim = Claim()
claim.stream.update(file_path=file_path, sd_hash='0' * 96, **kwargs)
tx = await Transaction.claim_create(
name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
)
new_txo = tx.outputs[0]
file_stream = None
if not preview:
file_stream = await self.stream_manager.create_stream(file_path)
claim.stream.source.sd_hash = file_stream.sd_hash
new_txo.script.generate()
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount)
)])
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
await self.analytics_manager.send_claim_action('publish')
else:
await account.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT, STREAM_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_stream_update(
self, claim_id, bid=None, file_path=None,
channel_id=None, channel_name=None, channel_account_id=None, clear_channel=False,
account_id=None, wallet_id=None, claim_address=None, funding_account_ids=None,
preview=False, blocking=False, replace=False, **kwargs):
"""
Update an existing stream claim and if a new file is provided announce it to lbrynet.
Usage:
stream_update (<claim_id> | --claim_id=<claim_id>) [--bid=<bid>] [--file_path=<file_path>]
[--file_name=<file_name>] [--file_size=<file_size>] [--file_hash=<file_hash>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>]
[--fee_address=<fee_address>] [--clear_fee]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name> | --clear_channel]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) id of the stream claim to update
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--file_name=<file_name> : (str) override file name, defaults to name from file_path.
--file_size=<file_size> : (str) override file size, otherwise automatically computed.
--file_hash=<file_hash> : (str) override file hash, otherwise automatically computed.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--clear_fee : (bool) clear previously set fee
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--clear_channel : (bool) remove channel signature
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account in which to look for stream (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the stream, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_claims) != 1:
account_ids = ', '.join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the stream '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_claims[0]
if not old_txo.claim.is_stream:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a stream claim."
)
if bid is not None:
amount = self.get_dewies_or_error('bid', bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
channel = None
if channel_id or channel_name:
channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True)
elif old_txo.claim.is_signed and not clear_channel and not replace:
channel = old_txo.channel
fee_address = self.get_fee_address(kwargs, claim_address)
if fee_address:
kwargs['fee_address'] = fee_address
if replace:
claim = Claim()
claim.stream.message.source.CopyFrom(
old_txo.claim.stream.message.source
)
stream_type = old_txo.claim.stream.stream_type
if stream_type:
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
new_stream_type = getattr(claim.stream.message, stream_type)
new_stream_type.CopyFrom(old_stream_type)
claim.stream.update(file_path=file_path, **kwargs)
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.stream.update(file_path=file_path, **kwargs)
tx = await Transaction.claim_update(
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
)
new_txo = tx.outputs[0]
stream_hash = None
if not preview:
old_stream_hash = await self.storage.get_stream_hash_for_sd_hash(old_txo.claim.stream.source.sd_hash)
if file_path is not None:
if old_stream_hash:
stream_to_delete = self.stream_manager.get_stream_by_stream_hash(old_stream_hash)
await self.stream_manager.delete_stream(stream_to_delete, delete_file=False)
file_stream = await self.stream_manager.create_stream(file_path)
new_txo.claim.stream.source.sd_hash = file_stream.sd_hash
new_txo.script.generate()
stream_hash = file_stream.stream_hash
else:
stream_hash = old_stream_hash
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
)])
if stream_hash:
await self.storage.save_content_claim(stream_hash, new_txo.id)
await self.analytics_manager.send_claim_action('publish')
else:
await account.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_stream_abandon(
self, claim_id=None, txid=None, nout=None, account_id=None, wallet_id=None,
preview=False, blocking=False):
"""
Abandon one of my stream claims.
Usage:
stream_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, **{'txo.txid': txid, 'txo.position': nout}
)
elif claim_id is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception('Must specify claim_id, or txid and nout')
if not claims:
raise Exception('No claim found for the specified claim_id or txid:nout')
tx = await Transaction.create(
[Input.spend(txo) for txo in claims], [], accounts, account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action('abandon')
else:
await self.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT)
def jsonrpc_stream_list(self, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List my stream claims.
Usage:
stream_list [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns: {Paginated[Output]}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account: LBCAccount = wallet.get_account_or_error(account_id)
streams = account.get_streams
stream_count = account.get_stream_count
else:
streams = partial(self.ledger.get_streams, wallet=wallet, accounts=wallet.accounts)
stream_count = partial(self.ledger.get_stream_count, wallet=wallet, accounts=wallet.accounts)
return maybe_paginate(streams, stream_count, page, page_size)
@requires(WALLET_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT,
DHT_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_stream_cost_estimate(self, uri):
"""
Get estimated cost for a lbry stream
Usage:
stream_cost_estimate (<uri> | --uri=<uri>)
Options:
--uri=<uri> : (str) uri to use
Returns:
(float) Estimated cost in lbry credits, returns None if uri is not
resolvable
"""
return self.get_est_cost_from_uri(uri)
SUPPORT_DOC = """
Create, list and abandon all types of supports.
"""
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_support_create(
self, claim_id, amount, tip=False, account_id=None, wallet_id=None, funding_account_ids=None,
preview=False, blocking=False):
"""
Create a support or a tip for name claim.
Usage:
support_create (<claim_id> | --claim_id=<claim_id>) (<amount> | --amount=<amount>)
[--tip] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking] [--funding_account_ids=<funding_account_ids>...]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to support
--amount=<amount> : (decimal) amount of support
--tip : (bool) send support to claim owner, default: false.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
amount = self.get_dewies_or_error("amount", amount)
claim = await self.ledger.get_claim_by_claim_id(claim_id)
claim_address = claim.get_address(self.ledger)
if not tip:
account = wallet.get_account_or_default(account_id)
claim_address = await account.receiving.get_or_create_usable_address()
tx = await Transaction.support(
claim.claim_name, claim_id, amount, claim_address, funding_accounts, funding_accounts[0]
)
if not preview:
await tx.sign(funding_accounts)
await self.broadcast_or_release(tx, blocking)
await self.storage.save_supports({claim_id: [{
'txid': tx.id,
'nout': tx.position,
'address': claim_address,
'claim_id': claim_id,
'amount': dewies_to_lbc(amount)
}]})
await self.analytics_manager.send_claim_action('new_support')
else:
await self.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT)
def jsonrpc_support_list(self, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List supports and tips in my control.
Usage:
support_list [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns: {Paginated[Output]}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account: LBCAccount = wallet.get_account_or_error(account_id)
supports = account.get_supports
support_count = account.get_support_count
else:
supports = partial(self.ledger.get_supports, wallet=wallet, accounts=wallet.accounts)
support_count = partial(self.ledger.get_support_count, wallet=wallet, accounts=wallet.accounts)
return maybe_paginate(supports, support_count, page, page_size)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_support_abandon(
self, claim_id=None, txid=None, nout=None, keep=None,
account_id=None, wallet_id=None, preview=False, blocking=False):
"""
Abandon supports, including tips, of a specific claim, optionally
keeping some amount as supports.
Usage:
support_abandon [--claim_id=<claim_id>] [(--txid=<txid> --nout=<nout>)] [--keep=<keep>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the support to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--keep=<keep> : (decimal) amount of lbc to keep as support
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
if account_id:
account = self.get_account_or_error(account_id)
funding_accounts = [account]
get_supports = account.get_supports
else:
funding_accounts = self.ledger.accounts
get_supports = self.ledger.get_supports
if txid is not None and nout is not None:
supports = await get_supports(**{'txo.txid': txid, 'txo.position': nout})
elif claim_id is not None:
supports = await get_supports(claim_id=claim_id)
else:
raise Exception('Must specify claim_id, or txid and nout')
if not supports:
raise Exception('No supports found for the specified claim_id or txid:nout')
if keep is not None:
keep = self.get_dewies_or_error('keep', keep)
else:
keep = 0
outputs = []
if keep > 0:
outputs = [
Output.pay_support_pubkey_hash(
keep, supports[0].claim_name, supports[0].claim_id, supports[0].pubkey_hash
)
]
tx = await Transaction.create(
[Input.spend(txo) for txo in supports], outputs, funding_accounts, funding_accounts[0]
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action('abandon')
else:
await self.ledger.release_tx(tx)
return tx
TRANSACTION_DOC = """
Transaction management.
"""
@requires(WALLET_COMPONENT)
def jsonrpc_transaction_list(self, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List transactions belonging to wallet
Usage:
transaction_list [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns:
(list) List of transactions
{
"claim_info": (list) claim info if in txn [{
"address": (str) address of claim,
"balance_delta": (float) bid amount,
"amount": (float) claim amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"nout": (int) nout
}],
"abandon_info": (list) abandon info if in txn [{
"address": (str) address of abandoned claim,
"balance_delta": (float) returned amount,
"amount": (float) claim amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"nout": (int) nout
}],
"confirmations": (int) number of confirmations for the txn,
"date": (str) date and time of txn,
"fee": (float) txn fee,
"support_info": (list) support info if in txn [{
"address": (str) address of support,
"balance_delta": (float) support amount,
"amount": (float) support amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"is_tip": (bool),
"nout": (int) nout
}],
"timestamp": (int) timestamp,
"txid": (str) txn id,
"update_info": (list) update info if in txn [{
"address": (str) address of claim,
"balance_delta": (float) credited/debited
"amount": (float) absolute amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"nout": (int) nout
}],
"value": (float) value of txn
}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account: LBCAccount = wallet.get_account_or_error(account_id)
transactions = account.get_transaction_history
transaction_count = account.get_transaction_history_count
else:
transactions = partial(
self.ledger.get_transaction_history, wallet=wallet, accounts=wallet.accounts)
transaction_count = partial(
self.ledger.get_transaction_history_count, wallet=wallet, accounts=wallet.accounts)
return maybe_paginate(transactions, transaction_count, page, page_size)
@requires(WALLET_COMPONENT)
def jsonrpc_transaction_show(self, txid):
"""
Get a decoded transaction from a txid
Usage:
transaction_show (<txid> | --txid=<txid>)
Options:
--txid=<txid> : (str) txid of the transaction
Returns: {Transaction}
"""
return self.wallet_manager.get_transaction(txid)
UTXO_DOC = """
Unspent transaction management.
"""
@requires(WALLET_COMPONENT)
def jsonrpc_utxo_list(self, account_id=None, wallet_id=None, page=None, page_size=None):
"""
List unspent transaction outputs
Usage:
utxo_list [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns: {Paginated[Output]}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if account_id:
account = wallet.get_account_or_error(account_id)
utxos = account.get_utxos
utxo_count = account.get_utxo_count
else:
utxos = partial(self.ledger.get_utxos, wallet=wallet, accounts=wallet.accounts)
utxo_count = partial(self.ledger.get_utxo_count, wallet=wallet, accounts=wallet.accounts)
return maybe_paginate(utxos, utxo_count, page, page_size)
@requires(WALLET_COMPONENT)
def jsonrpc_utxo_release(self, account_id=None, wallet_id=None):
"""
When spending a UTXO it is locally locked to prevent double spends;
occasionally this can result in a UTXO being locked which ultimately
did not get spent (failed to broadcast, spend transaction was not
accepted by blockchain node, etc). This command releases the lock
on all UTXOs in your account.
Usage:
utxo_release [<account_id> | --account_id=<account_id>] [--wallet_id=<wallet_id>]
Options:
--account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
None
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
return wallet.get_account_or_default(account_id).release_all_outputs()
BLOB_DOC = """
Blob management.
"""
@requires(WALLET_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_blob_get(self, blob_hash, timeout=None, read=False):
"""
Download and return a blob
Usage:
blob_get (<blob_hash> | --blob_hash=<blob_hash>) [--timeout=<timeout>] [--read]
Options:
--blob_hash=<blob_hash> : (str) blob hash of the blob to get
--timeout=<timeout> : (int) timeout in number of seconds
Returns:
(str) Success/Fail message or (dict) decoded data
"""
blob = await download_blob(asyncio.get_event_loop(), self.conf, self.blob_manager, self.dht_node, blob_hash)
if read:
with blob.reader_context() as handle:
return handle.read().decode()
elif isinstance(blob, BlobBuffer):
log.warning("manually downloaded blob buffer could have missed garbage collection, clearing it")
blob.delete()
return "Downloaded blob %s" % blob_hash
@requires(BLOB_COMPONENT, DATABASE_COMPONENT)
async def jsonrpc_blob_delete(self, blob_hash):
"""
Delete a blob
Usage:
blob_delete (<blob_hash> | --blob_hash=<blob_hash>)
Options:
--blob_hash=<blob_hash> : (str) blob hash of the blob to delete
Returns:
(str) Success/fail message
"""
if not blob_hash or not is_valid_blobhash(blob_hash):
return f"Invalid blob hash to delete '{blob_hash}'"
streams = self.stream_manager.get_filtered_streams(sd_hash=blob_hash)
if streams:
await self.stream_manager.delete_stream(streams[0])
else:
await self.blob_manager.delete_blobs([blob_hash])
return "Deleted %s" % blob_hash
PEER_DOC = """
DHT / Blob Exchange peer commands.
"""
@requires(DHT_COMPONENT)
async def jsonrpc_peer_list(self, blob_hash, search_bottom_out_limit=None):
"""
Get peers for blob hash
Usage:
peer_list (<blob_hash> | --blob_hash=<blob_hash>)
[<search_bottom_out_limit> | --search_bottom_out_limit=<search_bottom_out_limit>]
Options:
--blob_hash=<blob_hash> : (str) find available peers for this blob hash
--search_bottom_out_limit=<search_bottom_out_limit> : (int) the number of search probes in a row
that don't find any new peers
before giving up and returning
Returns:
(list) List of contact dictionaries {'address': <peer ip>, 'udp_port': <dht port>, 'tcp_port': <peer port>,
'node_id': <peer node id>}
"""
if not is_valid_blobhash(blob_hash):
raise Exception("invalid blob hash")
if search_bottom_out_limit is not None:
search_bottom_out_limit = int(search_bottom_out_limit)
if search_bottom_out_limit <= 0:
raise Exception("invalid bottom out limit")
else:
search_bottom_out_limit = 4
peers = []
peer_q = asyncio.Queue(loop=self.component_manager.loop)
await self.dht_node._value_producer(blob_hash, peer_q)
while not peer_q.empty():
peers.extend(peer_q.get_nowait())
results = [
{
"node_id": hexlify(peer.node_id).decode(),
"address": peer.address,
"udp_port": peer.udp_port,
"tcp_port": peer.tcp_port,
}
for peer in peers
]
return results
@requires(DATABASE_COMPONENT)
async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
"""
Announce blobs to the DHT
Usage:
blob_announce (<blob_hash> | --blob_hash=<blob_hash>
| --stream_hash=<stream_hash> | --sd_hash=<sd_hash>)
Options:
--blob_hash=<blob_hash> : (str) announce a blob, specified by blob_hash
--stream_hash=<stream_hash> : (str) announce all blobs associated with
stream_hash
--sd_hash=<sd_hash> : (str) announce all blobs associated with
sd_hash and the sd_hash itself
Returns:
(bool) true if successful
"""
blob_hashes = []
if blob_hash:
blob_hashes.append(blob_hash)
elif stream_hash or sd_hash:
if sd_hash and stream_hash:
raise Exception("either the sd hash or the stream hash should be provided, not both")
if sd_hash:
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
blobs = await self.storage.get_blobs_for_stream(stream_hash, only_completed=True)
blob_hashes.extend(blob.blob_hash for blob in blobs if blob.blob_hash is not None)
else:
raise Exception('single argument must be specified')
await self.storage.should_single_announce_blobs(blob_hashes, immediate=True)
return True
@requires(BLOB_COMPONENT, WALLET_COMPONENT)
async def jsonrpc_blob_list(self, uri=None, stream_hash=None, sd_hash=None, needed=None,
finished=None, page_size=None, page=None):
"""
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Usage:
blob_list [--needed] [--finished] [<uri> | --uri=<uri>]
[<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>]
[<page_size> | --page_size=<page_size>]
[<page> | --page=<page>]
Options:
--needed : (bool) only return needed blobs
--finished : (bool) only return finished blobs
--uri=<uri> : (str) filter blobs by stream in a uri
--stream_hash=<stream_hash> : (str) filter blobs by stream hash
--sd_hash=<sd_hash> : (str) filter blobs by sd hash
--page_size=<page_size> : (int) results page size
--page=<page> : (int) page of results to return
Returns:
(list) List of blob hashes
"""
if uri or stream_hash or sd_hash:
if uri:
metadata = (await self.resolve(uri))[uri]
sd_hash = utils.get_sd_hash(metadata)
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
elif stream_hash:
sd_hash = await self.storage.get_sd_blob_hash_for_stream(stream_hash)
elif sd_hash:
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
sd_hash = await self.storage.get_sd_blob_hash_for_stream(stream_hash)
if sd_hash:
blobs = [sd_hash]
else:
blobs = []
if stream_hash:
blobs.extend([b.blob_hash for b in (await self.storage.get_blobs_for_stream(stream_hash))[:-1]])
else:
blobs = list(self.blob_manager.completed_blob_hashes)
if needed:
blobs = [blob_hash for blob_hash in blobs if not self.blob_manager.is_blob_verified(blob_hash)]
if finished:
blobs = [blob_hash for blob_hash in blobs if self.blob_manager.is_blob_verified(blob_hash)]
page_size = page_size or len(blobs)
page = page or 0
start_index = page * page_size
stop_index = start_index + page_size
return blobs[start_index:stop_index]
@requires(BLOB_COMPONENT)
async def jsonrpc_blob_reflect(self, blob_hashes, reflector_server=None):
"""
Reflects specified blobs
Usage:
blob_reflect (<blob_hashes>...) [--reflector_server=<reflector_server>]
Options:
--reflector_server=<reflector_server> : (str) reflector address
Returns:
(list) reflected blob hashes
"""
raise NotImplementedError()
@requires(BLOB_COMPONENT)
async def jsonrpc_blob_reflect_all(self):
"""
Reflects all saved blobs
Usage:
blob_reflect_all
Options:
None
Returns:
(bool) true if successful
"""
raise NotImplementedError()
@requires(STREAM_MANAGER_COMPONENT)
async def jsonrpc_file_reflect(self, **kwargs):
"""
Reflect all the blobs in a file matching the filter criteria
Usage:
file_reflect [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>]
[--reflector=<reflector>]
Options:
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--reflector=<reflector> : (str) reflector server, ip address or url
by default choose a server from the config
Returns:
(list) list of blobs reflected
"""
server, port = kwargs.get('server'), kwargs.get('port')
if server and port:
port = int(port)
else:
server, port = random.choice(self.conf.reflector_servers)
reflected = await asyncio.gather(*[
stream.upload_to_reflector(server, port)
for stream in self.stream_manager.get_filtered_streams(**kwargs)
])
total = []
for reflected_for_stream in reflected:
total.extend(reflected_for_stream)
return total
@requires(DHT_COMPONENT)
async def jsonrpc_peer_ping(self, node_id, address, port):
"""
Send a kademlia ping to the specified peer. If address and port are provided the peer is directly pinged,
if not provided the peer is located first.
Usage:
peer_ping (<node_id> | --node_id=<node_id>) (<address> | --address=<address>) (<port> | --port=<port>)
Options:
None
Returns:
(str) pong, or {'error': <error message>} if an error is encountered
"""
peer = None
if node_id and address and port:
peer = make_kademlia_peer(unhexlify(node_id), address, udp_port=int(port))
try:
return await self.dht_node.protocol.get_rpc_peer(peer).ping()
except asyncio.TimeoutError:
return {'error': 'timeout'}
if not peer:
return {'error': 'peer not found'}
@requires(DHT_COMPONENT)
def jsonrpc_routing_table_get(self):
"""
Get DHT routing information
Usage:
routing_table_get
Options:
None
Returns:
(dict) dictionary containing routing and peer information
{
"buckets": {
<bucket index>: [
{
"address": (str) peer address,
"udp_port": (int) peer udp port,
"tcp_port": (int) peer tcp port,
"node_id": (str) peer node id,
}
]
},
"node_id": (str) the local dht node id
}
"""
result = {
'buckets': {}
}
for i in range(len(self.dht_node.protocol.routing_table.buckets)):
result['buckets'][i] = []
for peer in self.dht_node.protocol.routing_table.buckets[i].peers:
host = {
"address": peer.address,
"udp_port": peer.udp_port,
"tcp_port": peer.tcp_port,
"node_id": hexlify(peer.node_id).decode(),
}
result['buckets'][i].append(host)
result['node_id'] = hexlify(self.dht_node.protocol.node_id).decode()
return result
COMMENT_DOC = """
View, create and abandon comments.
"""
@requires(WALLET_COMPONENT)
async def jsonrpc_comment_list(self, claim_id, parent_id=None, page=1, page_size=50,
include_replies=True, is_channel_signature_valid=False,
hidden=False, visible=False):
"""
List comments associated with a claim.
Usage:
comment_list (<claim_id> | --claim_id=<claim_id>)
[(--page=<page> --page_size=<page_size>)]
[--parent_id=<parent_id>] [--include_replies]
[--is_channel_signature_valid]
[--visible | --hidden]
Options:
--claim_id=<claim_id> : (str) The claim on which the comment will be made on
--parent_id=<parent_id> : (str) CommentId of a specific thread you'd like to see
--page=<page> : (int) The page you'd like to see in the comment list.
--page_size=<page_size> : (int) The amount of comments that you'd like to retrieve
--include_replies : (bool) Whether or not you want to include replies in list
--is_channel_signature_valid : (bool) Only include comments with valid signatures.
[Warning: Paginated total size will not change, even
if list reduces]
--visible : (bool) Select only Visible Comments
--hidden : (bool) Select only Hidden Comments
Returns:
(dict) Containing the list, and information about the paginated content:
{
"page": "Page number of the current items.",
"page_size": "Number of items to show on a page.",
"total_pages": "Total number of pages.",
"total_items": "Total number of items.",
"items": "A List of dict objects representing comments."
[
{
"comment": (str) The actual string as inputted by the user,
"comment_id": (str) The Comment's unique identifier,
"channel_name": (str) Name of the channel this was posted under, prepended with a '@',
"channel_id": (str) The Channel Claim ID that this comment was posted under,
"signature": (str) The signature of the comment,
"channel_url": (str) Channel's URI in the ClaimTrie,
"parent_id": (str) Comment this is replying to, (None) if this is the root,
"timestamp": (int) The time at which comment was entered into the server at, in nanoseconds.
},
...
]
}
"""
if hidden ^ visible:
result = await comment_client.jsonrpc_post(
self.conf.comment_server,
'get_claim_hidden_comments',
claim_id=claim_id,
hidden=hidden,
page=page,
page_size=page_size
)
else:
result = await comment_client.jsonrpc_post(
self.conf.comment_server,
'get_claim_comments',
claim_id=claim_id,
parent_id=parent_id,
page=page,
page_size=page_size,
top_level=not include_replies
)
for comment in result.get('items', []):
channel_url = comment.get('channel_url')
if not channel_url:
continue
resolve_response = await self.resolve([channel_url])
if isinstance(resolve_response[channel_url], Output):
comment['is_channel_signature_valid'] = comment_client.is_comment_signed_by_channel(
comment, resolve_response[channel_url]
)
else:
comment['is_channel_signature_valid'] = False
if is_channel_signature_valid:
result['items'] = [
c for c in result.get('items', []) if c.get('is_channel_signature_valid', False)
]
return result
@requires(WALLET_COMPONENT)
async def jsonrpc_comment_create(self, claim_id, comment, parent_id=None, channel_account_id=None,
channel_name=None, channel_id=None, wallet_id=None):
"""
Create and associate a comment with a claim using your channel identity.
Usage:
comment_create (<comment> | --comment=<comment>)
(<claim_id> | --claim_id=<claim_id>)
[--parent_id=<parent_id>]
[--channel_id=<channel_id>] [--channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>]
Options:
--comment=<comment> : (str) Comment to be made, should be at most 2000 characters.
--claim_id=<claim_id> : (str) The ID of the claim to comment on
--parent_id=<parent_id> : (str) The ID of a comment to make a response to
--channel_id=<channel_id> : (str) The ID of the channel you want to post under
--channel_name=<channel_name> : (str) The channel you want to post as, prepend with a '@'
--channel_account_id=<channel_account_id> : (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(dict) Comment object if successfully made, (None) otherwise
{
"comment": (str) The actual string as inputted by the user,
"comment_id": (str) The Comment's unique identifier,
"channel_name": (str) Name of the channel this was posted under, prepended with a '@',
"channel_id": (str) The Channel Claim ID that this comment was posted under,
"signature": (str) The signature of the comment,
"channel_url": (str) Channel's URI in the ClaimTrie,
"parent_id": (str) Comment this is replying to, (None) if this is the root,
"timestamp": (int) The time at which comment was entered into the server at, in nanoseconds.
}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
comment_body = {
'comment': comment.strip(),
'claim_id': claim_id,
'parent_id': parent_id,
}
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
if channel:
comment_body.update({
'channel_id': channel.claim_id,
'channel_name': channel.claim_name,
})
comment_client.sign_comment(comment_body, channel)
response = await comment_client.jsonrpc_post(self.conf.comment_server, 'create_comment', comment_body)
if 'signature' in response:
response['is_claim_signature_valid'] = comment_client.is_comment_signed_by_channel(response, channel)
return response
@requires(WALLET_COMPONENT)
async def jsonrpc_comment_abandon(self, comment_id, wallet_id=None):
"""
Abandon a comment published under your channel identity.
Usage:
comment_abandon (<comment_id> | --comment_id=<comment_id>) [--wallet_id=<wallet_id>]
Options:
--comment_id=<comment_id> : (str) The ID of the comment to be abandoned.
--wallet_id=<wallet_id : (str) restrict operation to specific wallet
Returns:
(dict) Object with the `comment_id` passed in as the key, and a flag indicating if it was abandoned
{
<comment_id> (str): {
"abandoned": (bool)
}
}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
abandon_comment_body = {'comment_id': comment_id}
channel = await comment_client.jsonrpc_post(
self.conf.comment_server, 'get_channel_from_comment_id', comment_id=comment_id
)
if 'error' in channel:
return {comment_id: {'abandoned': False}}
channel = await self.get_channel_or_none(wallet, None, **channel)
abandon_comment_body.update({
'channel_id': channel.claim_id,
'channel_name': channel.claim_name,
})
comment_client.sign_comment(abandon_comment_body, channel, abandon=True)
return await comment_client.jsonrpc_post(self.conf.comment_server, 'abandon_comment', abandon_comment_body)
@requires(WALLET_COMPONENT)
async def jsonrpc_comment_hide(self, comment_ids: typing.Union[str, list], wallet_id=None):
"""
Hide a comment published to a claim you control.
Usage:
comment_hide <comment_ids>... [--wallet_id=<wallet_id>]
Options:
--comment_ids=<comment_ids> : (str, list) one or more comment_id to hide.
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
Returns:
(dict) keyed by comment_id, containing success info
'<comment_id>': {
"hidden": (bool) flag indicating if comment_id was hidden
}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if isinstance(comment_ids, str):
comment_ids = [comment_ids]
comments = await comment_client.jsonrpc_post(
self.conf.comment_server, 'get_comments_by_id', comment_ids=comment_ids
)
claim_ids = {comment['claim_id'] for comment in comments}
claims = {cid: await self.ledger.get_claim_by_claim_id(claim_id=cid) for cid in claim_ids}
pieces = []
for comment in comments:
claim = claims.get(comment['claim_id'])
if claim:
channel = await self.get_channel_or_none(
wallet,
account_ids=[],
channel_id=claim.channel.claim_id,
channel_name=claim.channel.claim_name,
for_signing=True
)
piece = {'comment_id': comment['comment_id']}
comment_client.sign_comment(piece, channel, abandon=True)
pieces.append(piece)
return await comment_client.jsonrpc_post(self.conf.comment_server, 'hide_comments', pieces=pieces)
async def broadcast_or_release(self, tx, blocking=False):
try:
await self.ledger.broadcast(tx)
if blocking:
await self.ledger.wait(tx)
except:
await self.ledger.release_tx(tx)
raise
def valid_address_or_error(self, address):
try:
assert self.ledger.is_valid_address(address)
except:
raise Exception(f"'{address}' is not a valid address")
@staticmethod
def valid_stream_name_or_error(name: str):
try:
if not name:
raise Exception('Stream name cannot be blank.')
parsed = URL.parse(name)
if parsed.has_channel:
raise Exception(
"Stream names cannot start with '@' symbol. This is reserved for channels claims."
)
if not parsed.has_stream or parsed.stream.name != name:
raise Exception('Stream name has invalid characters.')
except (TypeError, ValueError):
raise Exception("Invalid stream name.")
@staticmethod
def valid_channel_name_or_error(name: str):
try:
if not name:
raise Exception(
"Channel name cannot be blank."
)
parsed = URL.parse(name)
if not parsed.has_channel:
raise Exception("Channel names must start with '@' symbol.")
if parsed.channel.name != name:
raise Exception("Channel name has invalid character")
except (TypeError, ValueError):
raise Exception("Invalid channel name.")
def get_fee_address(self, kwargs: dict, claim_address: str) -> str:
if 'fee_address' in kwargs:
self.valid_address_or_error(kwargs['fee_address'])
return kwargs['fee_address']
if 'fee_currency' in kwargs or 'fee_amount' in kwargs:
return claim_address
async def get_receiving_address(self, address: str, account: Optional[LBCAccount]) -> str:
if address is None and account is not None:
return await account.receiving.get_or_create_usable_address()
self.valid_address_or_error(address)
return address
async def get_channel_or_none(
self, wallet: Wallet, account_ids: List[str], channel_id: str = None,
channel_name: str = None, for_signing: bool = False) -> Output:
if channel_id is not None or channel_name is not None:
return await self.get_channel_or_error(
wallet, account_ids, channel_id, channel_name, for_signing
)
async def get_channel_or_error(
self, wallet: Wallet, account_ids: List[str], channel_id: str = None,
channel_name: str = None, for_signing: bool = False) -> Output:
if channel_id:
key, value = 'id', channel_id
elif channel_name:
key, value = 'name', channel_name
else:
raise ValueError("Couldn't find channel because a channel_id or channel_name was not provided.")
channels = await self.ledger.get_channels(
wallet=wallet, accounts=wallet.get_accounts_or_all(account_ids),
**{f'claim_{key}': value}
)
if len(channels) == 1:
if for_signing and not channels[0].has_private_key:
raise Exception(f"Couldn't find private key for {key} '{value}'. ")
return channels[0]
elif len(channels) > 1:
raise ValueError(
f"Multiple channels found with channel_{key} '{value}', "
f"pass a channel_id to narrow it down."
)
raise ValueError(f"Couldn't find channel with channel_{key} '{value}'.")
@staticmethod
def get_dewies_or_error(argument: str, lbc: str, positive_value=False):
try:
dewies = lbc_to_dewies(lbc)
if positive_value and dewies <= 0:
raise ValueError(f"'{argument}' value must be greater than 0.0")
return dewies
except ValueError as e:
raise ValueError(f"Invalid value for '{argument}': {e.args[0]}")
async def resolve(self, urls):
results = await self.ledger.resolve(urls)
if results:
try:
claims = self.stream_manager._convert_to_old_resolve_output(results)
await self.storage.save_claims_for_resolve([
value for value in claims.values() if 'error' not in value
])
except DecodeError:
pass
return results
def _old_get_temp_claim_info(self, tx, txo, address, claim_dict, name, bid):
return {
"claim_id": txo.claim_id,
"name": name,
"amount": bid,
"address": address,
"txid": tx.id,
"nout": txo.position,
"value": claim_dict,
"height": -1,
"claim_sequence": -1,
}
def loggly_time_string(dt):
formatted_dt = dt.strftime("%Y-%m-%dT%H:%M:%S")
milliseconds = str(round(dt.microsecond * (10.0 ** -5), 3))
return quote(formatted_dt + milliseconds + "Z")
def get_loggly_query_string(installation_id):
base_loggly_search_url = "https://lbry.loggly.com/search#"
now = utils.now()
yesterday = now - utils.timedelta(days=1)
params = {
'terms': 'json.installation_id:{}*'.format(installation_id[:SHORT_ID_LEN]),
'from': loggly_time_string(yesterday),
'to': loggly_time_string(now)
}
data = urlencode(params)
return base_loggly_search_url + data
| 47.672469
| 120
| 0.555025
|
2e482fc9bf05b969026d957e06273747c0f81cf3
| 847
|
py
|
Python
|
venv_py36/Lib/site-packages/PyInstaller/hooks/hook-pydoc.py
|
PeterMoresco/RefriCalcSoft
|
1ed728ef1937fdda248cee19d97b3d13bd98af03
|
[
"MIT"
] | 1
|
2018-09-12T06:30:21.000Z
|
2018-09-12T06:30:21.000Z
|
venv_py36/Lib/site-packages/PyInstaller/hooks/hook-pydoc.py
|
PeterMoresco/RefriCalcSoft
|
1ed728ef1937fdda248cee19d97b3d13bd98af03
|
[
"MIT"
] | 1
|
2018-09-12T06:32:17.000Z
|
2018-09-12T19:03:50.000Z
|
venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-pydoc.py
|
rilakkyuma/tweetdelete
|
5ac4001b2ba7c7d87379e616c93361c2090ed4ae
|
[
"MIT"
] | 2
|
2018-12-29T07:49:59.000Z
|
2020-03-18T02:44:31.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2018, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Python 2 module 'pydoc' causes the inclusion of Tcl/Tk library even in case
of simple hello_world script. Most of the we do not want this behavior.
'pydoc' from Python 3 does not have this dependency.
This hook just removes this implicit dependency on Tcl/Tk.
"""
from PyInstaller.compat import is_py2, modname_tkinter
# Ignore 'Tkinter' to prevent inclusion of Tcl/Tk library.
if is_py2:
excludedimports = [modname_tkinter]
| 33.88
| 78
| 0.62928
|
66a1fea5f9fbb55747a42dabb0b5bb2b5d891907
| 583
|
py
|
Python
|
P897.py
|
Muntaha-Islam0019/Leetcode-Solutions
|
0bc56ce43a6d8ad10461b69078166a2a5b913e7f
|
[
"MIT"
] | null | null | null |
P897.py
|
Muntaha-Islam0019/Leetcode-Solutions
|
0bc56ce43a6d8ad10461b69078166a2a5b913e7f
|
[
"MIT"
] | null | null | null |
P897.py
|
Muntaha-Islam0019/Leetcode-Solutions
|
0bc56ce43a6d8ad10461b69078166a2a5b913e7f
|
[
"MIT"
] | null | null | null |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def increasingBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def increasingBSTHelper(root, tail):
if not root:
return tail
result = increasingBSTHelper(root.left, root)
root.left = None
root.right = increasingBSTHelper(root.right, tail)
return result
return increasingBSTHelper(root, None)
| 26.5
| 62
| 0.555746
|
c655eed4110d81f441ae2f7440e6a17791bd9285
| 4,085
|
py
|
Python
|
test/functional/feature_minchainwork.py
|
derek-mckinney/GrumpyCat
|
4f3a54396e55f4bd8b94ec0b59756bceb335d457
|
[
"MIT"
] | 431
|
2015-01-21T03:57:18.000Z
|
2022-03-30T17:17:18.000Z
|
test/functional/feature_minchainwork.py
|
derek-mckinney/GrumpyCat
|
4f3a54396e55f4bd8b94ec0b59756bceb335d457
|
[
"MIT"
] | 140
|
2015-02-04T07:15:14.000Z
|
2022-02-07T03:37:28.000Z
|
test/functional/feature_minchainwork.py
|
derek-mckinney/GrumpyCat
|
4f3a54396e55f4bd8b94ec0b59756bceb335d457
|
[
"MIT"
] | 249
|
2015-01-03T19:48:55.000Z
|
2022-02-23T09:46:33.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| 43.457447
| 108
| 0.70355
|
94b9d8da4857c0aee72b42480c8c11c8eaac3409
| 13,028
|
py
|
Python
|
etl/parsers/etw/Intel_iaLPSS2_GPIO2.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Intel_iaLPSS2_GPIO2.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Intel_iaLPSS2_GPIO2.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Intel-iaLPSS2-GPIO2
GUID : 63848cff-3ec7-4ddf-8072-5f95e8c8eb98
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1001, version=0)
class Intel_iaLPSS2_GPIO2_1001_0(Etw):
pattern = Struct(
"Message" / WString,
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1002, version=0)
class Intel_iaLPSS2_GPIO2_1002_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1003, version=0)
class Intel_iaLPSS2_GPIO2_1003_0(Etw):
pattern = Struct(
"Message" / CString
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1004, version=0)
class Intel_iaLPSS2_GPIO2_1004_0(Etw):
pattern = Struct(
"Message" / CString
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1005, version=0)
class Intel_iaLPSS2_GPIO2_1005_0(Etw):
pattern = Struct(
"Message" / CString,
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1013, version=0)
class Intel_iaLPSS2_GPIO2_1013_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1014, version=0)
class Intel_iaLPSS2_GPIO2_1014_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1017, version=0)
class Intel_iaLPSS2_GPIO2_1017_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1018, version=0)
class Intel_iaLPSS2_GPIO2_1018_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1023, version=0)
class Intel_iaLPSS2_GPIO2_1023_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1024, version=0)
class Intel_iaLPSS2_GPIO2_1024_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1025, version=0)
class Intel_iaLPSS2_GPIO2_1025_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1026, version=0)
class Intel_iaLPSS2_GPIO2_1026_0(Etw):
pattern = Struct(
"Instance" / Int32ul,
"Version" / Int32ul,
"Revision" / Int32ul,
"Mode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1027, version=0)
class Intel_iaLPSS2_GPIO2_1027_0(Etw):
pattern = Struct(
"Instance" / Int32ul,
"Version" / Int32ul,
"Revision" / Int32ul,
"Mode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1031, version=0)
class Intel_iaLPSS2_GPIO2_1031_0(Etw):
pattern = Struct(
"Message" / CString
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1101, version=0)
class Intel_iaLPSS2_GPIO2_1101_0(Etw):
pattern = Struct(
"BankNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1102, version=0)
class Intel_iaLPSS2_GPIO2_1102_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1103, version=0)
class Intel_iaLPSS2_GPIO2_1103_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinOwnership" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1104, version=0)
class Intel_iaLPSS2_GPIO2_1104_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1110, version=0)
class Intel_iaLPSS2_GPIO2_1110_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1111, version=0)
class Intel_iaLPSS2_GPIO2_1111_0(Etw):
pattern = Struct(
"MBAR" / Int32ul,
"PA" / Int64ul,
"LEN" / Int32ul,
"VA" / Int64ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1112, version=0)
class Intel_iaLPSS2_GPIO2_1112_0(Etw):
pattern = Struct(
"MBAR" / Int32ul,
"PA" / Int64ul,
"LEN" / Int32ul,
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1113, version=0)
class Intel_iaLPSS2_GPIO2_1113_0(Etw):
pattern = Struct(
"MBAR" / Int32ul,
"PA" / Int64ul,
"LEN" / Int32ul,
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1114, version=0)
class Intel_iaLPSS2_GPIO2_1114_0(Etw):
pattern = Struct(
"Vector" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1115, version=0)
class Intel_iaLPSS2_GPIO2_1115_0(Etw):
pattern = Struct(
"MBAR_current" / Int32ul,
"MBAR_expected" / Int32ul,
"INT_current" / Int32ul,
"INT_expected" / Int32ul,
"Status" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1121, version=0)
class Intel_iaLPSS2_GPIO2_1121_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"IntMode" / Int32ul,
"IntPolartity" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1122, version=0)
class Intel_iaLPSS2_GPIO2_1122_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"IntMode" / Int32ul,
"IntPolartity" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1123, version=0)
class Intel_iaLPSS2_GPIO2_1123_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PullMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1124, version=0)
class Intel_iaLPSS2_GPIO2_1124_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PullMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1125, version=0)
class Intel_iaLPSS2_GPIO2_1125_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1126, version=0)
class Intel_iaLPSS2_GPIO2_1126_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1127, version=0)
class Intel_iaLPSS2_GPIO2_1127_0(Etw):
pattern = Struct(
"BankName" / CString,
"MaskSet" / Int32ul,
"MaskRequested" / Int32ul,
"MaskFailed" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1128, version=0)
class Intel_iaLPSS2_GPIO2_1128_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1129, version=0)
class Intel_iaLPSS2_GPIO2_1129_0(Etw):
pattern = Struct(
"BankName" / CString,
"Active" / Int32ul,
"ActiveRaw" / Int32ul,
"ActiveMask" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1130, version=0)
class Intel_iaLPSS2_GPIO2_1130_0(Etw):
pattern = Struct(
"BankName" / CString,
"Enabled" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1131, version=0)
class Intel_iaLPSS2_GPIO2_1131_0(Etw):
pattern = Struct(
"BankName" / CString,
"MaskSet" / Int32ul,
"MaskRequested" / Int32ul,
"MaskFailed" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1132, version=0)
class Intel_iaLPSS2_GPIO2_1132_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinIoMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1133, version=0)
class Intel_iaLPSS2_GPIO2_1133_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1134, version=0)
class Intel_iaLPSS2_GPIO2_1134_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinState" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1135, version=0)
class Intel_iaLPSS2_GPIO2_1135_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinState" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1136, version=0)
class Intel_iaLPSS2_GPIO2_1136_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinState" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1137, version=0)
class Intel_iaLPSS2_GPIO2_1137_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinIoMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1138, version=0)
class Intel_iaLPSS2_GPIO2_1138_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinIoMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1139, version=0)
class Intel_iaLPSS2_GPIO2_1139_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinIoModeCurrent" / Int32ul,
"PinIoModeRequested" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1140, version=0)
class Intel_iaLPSS2_GPIO2_1140_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1141, version=0)
class Intel_iaLPSS2_GPIO2_1141_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1142, version=0)
class Intel_iaLPSS2_GPIO2_1142_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1143, version=0)
class Intel_iaLPSS2_GPIO2_1143_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1144, version=0)
class Intel_iaLPSS2_GPIO2_1144_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul,
"PinIoMode" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1150, version=0)
class Intel_iaLPSS2_GPIO2_1150_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1151, version=0)
class Intel_iaLPSS2_GPIO2_1151_0(Etw):
pattern = Struct(
"BankName" / CString
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1152, version=0)
class Intel_iaLPSS2_GPIO2_1152_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1153, version=0)
class Intel_iaLPSS2_GPIO2_1153_0(Etw):
pattern = Struct(
"BankName" / CString
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1201, version=0)
class Intel_iaLPSS2_GPIO2_1201_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1202, version=0)
class Intel_iaLPSS2_GPIO2_1202_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1203, version=0)
class Intel_iaLPSS2_GPIO2_1203_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
@declare(guid=guid("63848cff-3ec7-4ddf-8072-5f95e8c8eb98"), event_id=1204, version=0)
class Intel_iaLPSS2_GPIO2_1204_0(Etw):
pattern = Struct(
"BankName" / CString,
"PinNo" / Int32ul
)
| 26.75154
| 123
| 0.664031
|
8837caff6dc2c9b603f8fbb3ff1b11a1dfe33e75
| 3,834
|
py
|
Python
|
lm_eval/tasks/cbt.py
|
JunShern/lm-evaluation-harness
|
84aa15c6e4cb65adf39c2dccf91a799cc7e6440a
|
[
"MIT"
] | 203
|
2021-01-08T16:39:09.000Z
|
2022-03-31T06:03:16.000Z
|
lm_eval/tasks/cbt.py
|
JunShern/lm-evaluation-harness
|
84aa15c6e4cb65adf39c2dccf91a799cc7e6440a
|
[
"MIT"
] | 183
|
2020-12-27T03:41:08.000Z
|
2022-03-19T21:56:53.000Z
|
lm_eval/tasks/cbt.py
|
JunShern/lm-evaluation-harness
|
84aa15c6e4cb65adf39c2dccf91a799cc7e6440a
|
[
"MIT"
] | 73
|
2021-01-05T22:37:01.000Z
|
2022-03-29T10:14:53.000Z
|
import numpy as np
from lm_eval.base import rf
from lm_eval.metrics import mean
from .common import HFTask
class CBTBase(HFTask):
"""The Children’s Book Test (CBT) from the paper:
https://research.fb.com/wp-content/uploads/2016/11/the_goldilocks_principle_reading_children_s_books_with_explicit_memory_representations.pdf
NOTE: This evaluation is based on the (context + query) question-answering variant
used by the Recurrent Language Models described in the aforementioned paper.
See section 4.4.
"""
DATASET_PATH = "cbt"
DATASET_NAME = None
VERSION = 0
def fewshot_description(self):
# TODO: Figure out description.
return ""
def detokenize(self, text):
text = text.replace(" '", "'")
text = text.replace(" \n", "\n")
text = text.replace("\n ", "\n")
text = text.replace(" n't", "n't")
text = text.replace("`` ", '"')
text = text.replace("''", '"')
# punctuation
text = text.replace(" :", ":")
text = text.replace(" ;", ";")
text = text.replace(" !", "!")
text = text.replace(" ?", "?")
text = text.replace(" ,", ",")
text = text.replace(" .", ".")
return text
def doc_to_text(self, doc):
passage = " ".join(doc["sentences"])
text = "Passage: " + passage + "\nQuestion: " + doc["question"]
return self.detokenize(text)
def doc_to_target(self, doc):
return ""
def fewshot_examples(self, k, rnd):
assert k == 0, f"CBT is only implemented for the zero-shot setting. Given k={k}."
return super().fewshot_examples(k, rnd)
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
lls = []
for option in doc["options"]:
# Following Section 4.4 "Recurrent Language Models" in the CBT paper:
# "we rank candidate [option] c based on p(q1 . . . qk−1, c, qk+1 . . . ql)
# rather than simply p(q1 . . . qk−1, c)."
lls.append(rf.loglikelihood("", ctx.replace("XXXXX", option))[0])
return lls
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
gold = doc["options"].index(doc["answer"])
pred = np.argmax(results)
return {
"acc": pred == gold
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {
"acc": mean
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"acc": True
}
class CBTCN(CBTBase):
DATASET_NAME = "CN"
class CBTNE(CBTBase):
DATASET_NAME = "NE"
| 33.631579
| 145
| 0.588419
|
e5026540ae7f67727310455a26be832de3cfaebd
| 6,432
|
py
|
Python
|
devops.py
|
tiholic/django-pgtrigger
|
ef8878115bef1a933405ab86d7f7c117252d970c
|
[
"BSD-3-Clause"
] | 135
|
2020-06-27T14:02:46.000Z
|
2021-05-27T01:07:41.000Z
|
devops.py
|
tiholic/django-pgtrigger
|
ef8878115bef1a933405ab86d7f7c117252d970c
|
[
"BSD-3-Clause"
] | 25
|
2021-06-19T20:28:52.000Z
|
2022-03-22T23:14:59.000Z
|
devops.py
|
tiholic/django-pgtrigger
|
ef8878115bef1a933405ab86d7f7c117252d970c
|
[
"BSD-3-Clause"
] | 8
|
2021-06-12T14:06:30.000Z
|
2022-02-25T15:10:32.000Z
|
#!/usr/bin/env python3
"""
Devops functions for this package. Includes functions for automated
package deployment, changelog generation, and changelog checking.
This script is generated by the template at
https://github.com/Opus10/public-django-app-template
Do not change this script! Any fixes or updates to this script should be made
to https://github.com/Opus10/public-django-app-template
"""
import os
import subprocess
import sys
import tempfile
from packaging import version
CIRCLECI_ENV_VAR = 'CIRCLECI'
class Error(Exception):
"""Base exception for this script"""
class NotOnCircleCIError(Error):
"""Thrown when not running on CircleCI"""
def _check_git_version():
"""Verify git version"""
git_version = _shell_stdout("git --version | rev | cut -f 1 -d' ' | rev")
if version.parse(git_version) < version.parse('2.22.0'):
raise RuntimeError(
f'Must have git version >= 2.22.0 (version = {git_version})'
)
def _shell(
cmd, check=True, stdin=None, stdout=None, stderr=None
): # pragma: no cover
"""Runs a subprocess shell with check=True by default"""
return subprocess.run(
cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr
)
def _shell_stdout(cmd, check=True):
"""Runs a shell command and returns stdout"""
ret = _shell(cmd, stdout=subprocess.PIPE, check=check)
return ret.stdout.decode('utf-8').strip() if ret.stdout else ''
def _configure_git():
"""Configure git name/email and verify git version"""
_check_git_version()
_shell('git config --local user.email "wesleykendall@protonmail.com"')
_shell('git config --local user.name "Opus 10 Devops"')
_shell('git config push.default current')
def _find_latest_tag():
return _shell_stdout('git describe --tags --abbrev=0', check=False)
def _find_sem_ver_update():
"""
Find the semantic version string based on the commit log.
Defaults to returning "patch"
"""
sem_ver = 'patch'
latest_tag = _find_latest_tag()
log_section = f'{latest_tag}..HEAD' if latest_tag else ''
cmd = (
f"git log {log_section} --pretty='%(trailers:key=type,valueonly)'"
" | grep -q {sem_ver_type}"
)
change_types_found = {
change_type: _shell(
cmd.format(sem_ver_type=change_type), check=False
).returncode
== 0
for change_type in ['bug', 'feature', 'api-break']
}
if change_types_found['api-break']:
sem_ver = 'major'
elif change_types_found['bug'] or change_types_found['feature']:
sem_ver = 'minor'
return sem_ver
def _update_package_version():
"""Apply semantic versioning to package based on git commit messages"""
# Obtain the current version
old_version = _shell_stdout('make version')
if old_version == '0.0.0':
old_version = ''
latest_tag = _find_latest_tag()
if old_version and version.parse(old_version) != version.parse(latest_tag):
raise RuntimeError(
f'The latest tag "{latest_tag}" and the current version'
f' "{old_version}" do not match.'
)
# Find out the sem-ver tag to apply
sem_ver = _find_sem_ver_update()
_shell(f'poetry version {sem_ver}')
# Get the new version
new_version = _shell_stdout('make version')
if new_version == old_version:
raise RuntimeError(
f'Version update could not be applied (version = "{old_version}")'
)
return old_version, new_version
def _generate_changelog_and_tag(old_version, new_version):
"""Generates a change log using git-tidy and tags repo"""
# Tag the version temporarily so that changelog generation
# renders properly
_shell(f'git tag -f -a {new_version} -m "Version {new_version}"')
# Generate the full changelog
_shell('git tidy-log > CHANGELOG.md')
# Generate a requirements.txt for readthedocs.org
_shell('echo "poetry" > docs/requirements.txt')
_shell('echo "." >> docs/requirements.txt')
_shell(
'poetry export --dev --without-hashes -f requirements.txt '
'>> docs/requirements.txt'
)
# Add all updated files
_shell('git add pyproject.toml CHANGELOG.md docs/requirements.txt')
# Use [skip ci] to ensure CircleCI doesnt recursively deploy
_shell(
'git commit --no-verify -m "Release version'
f' {new_version} [skip ci]" -m "Type: trivial"'
)
# Create release notes just for this release so that we can use them in
# the commit message
with tempfile.NamedTemporaryFile() as commit_msg_file:
_shell(f'echo "{new_version}\n" > {commit_msg_file.name}')
tidy_log_args = f'^{old_version} HEAD' if old_version else 'HEAD'
_shell(f'git tidy-log {tidy_log_args} >> {commit_msg_file.name}')
# Update the tag so that it includes the latest release messages and
# the automated commit
_shell(f'git tag -d {new_version}')
_shell(
f'git tag -f -a {new_version} -F {commit_msg_file.name}'
' --cleanup=whitespace'
)
def _publish_to_pypi():
"""
Uses poetry to publish to pypi
"""
if 'PYPI_USERNAME' not in os.environ or 'PYPI_PASSWORD' not in os.environ:
raise RuntimeError('Must set PYPI_USERNAME and PYPI_PASSWORD env vars')
_shell('poetry config http-basic.pypi ${PYPI_USERNAME} ${PYPI_PASSWORD}')
_shell('poetry build')
_shell('poetry publish -vvv -n', stdout=subprocess.PIPE)
def _build_and_push_distribution():
"""
Builds and pushes distribution to PyPI, along with pushing the
tags back to the repo
"""
_publish_to_pypi()
# Push the code changes after succcessful pypi deploy
_shell('git push --follow-tags')
def deploy():
"""Deploys the package and uploads documentation."""
# Ensure proper environment
if not os.environ.get(CIRCLECI_ENV_VAR): # pragma: no cover
raise NotOnCircleCIError('Must be on CircleCI to run this script')
_configure_git()
old_version, new_version = _update_package_version()
_generate_changelog_and_tag(old_version, new_version)
_build_and_push_distribution()
print(f'Deployment complete. Latest version is {new_version}')
if __name__ == '__main__':
if sys.argv[-1] == 'deploy':
deploy()
else:
raise RuntimeError(f'Invalid subcommand "{sys.argv[-1]}"')
| 30.056075
| 79
| 0.670087
|
ef3dd476a843b9914629c865df3a2c4d708eb27a
| 1,492
|
py
|
Python
|
tank/tflite/albert.py
|
NodLabs/SHARK
|
71f5cfcb30b3e7032c6d1d9f952860ff7769afa0
|
[
"Apache-2.0"
] | 62
|
2022-02-07T22:52:25.000Z
|
2022-03-30T06:35:52.000Z
|
tank/tflite/albert.py
|
NodLabs/SHARK
|
71f5cfcb30b3e7032c6d1d9f952860ff7769afa0
|
[
"Apache-2.0"
] | 2
|
2022-02-22T05:47:45.000Z
|
2022-03-22T05:01:50.000Z
|
tank/tflite/albert.py
|
NodLabs/SHARK
|
71f5cfcb30b3e7032c6d1d9f952860ff7769afa0
|
[
"Apache-2.0"
] | 4
|
2022-02-25T13:09:36.000Z
|
2022-03-21T09:09:51.000Z
|
# RUN: %PYTHON %s
import numpy as np
from shark.shark_importer import SharkImporter
import pytest
model_path = "https://tfhub.dev/tensorflow/lite-model/albert_lite_base/squadv1/1?lite-format=tflite"
# Inputs modified to be useful albert inputs.
def generate_inputs(input_details):
for input in input_details:
print(str(input["shape"]), input["dtype"].__name__)
args = []
args.append(
np.random.randint(
low=0,
high=256,
size=input_details[0]["shape"],
dtype=input_details[0]["dtype"],
)
)
args.append(
np.ones(
shape=input_details[1]["shape"], dtype=input_details[1]["dtype"]
)
)
args.append(
np.zeros(
shape=input_details[2]["shape"], dtype=input_details[2]["dtype"]
)
)
return args
if __name__ == "__main__":
my_shark_importer = SharkImporter(
model_path=model_path,
model_type="tflite",
model_source_hub="tfhub",
device="cpu",
dynamic=False,
jit_trace=True,
)
# Case1: Use default inputs
my_shark_importer.compile()
shark_results = my_shark_importer.forward()
# Case2: Use manually set inputs
input_details, output_details = my_shark_importer.get_model_details()
inputs = generate_inputs(input_details) # device_inputs
my_shark_importer.compile(inputs)
shark_results = my_shark_importer.forward(inputs)
# print(shark_results)
| 27.62963
| 100
| 0.642091
|
cfdba5f0ac1fc9774a273d6af38000eaa2338e95
| 764
|
py
|
Python
|
api/resources/subscriber.py
|
odbalogun/areavas-bl
|
bde6696e52cc1b1f780b26803f4071edcc6ca428
|
[
"Apache-2.0"
] | null | null | null |
api/resources/subscriber.py
|
odbalogun/areavas-bl
|
bde6696e52cc1b1f780b26803f4071edcc6ca428
|
[
"Apache-2.0"
] | null | null | null |
api/resources/subscriber.py
|
odbalogun/areavas-bl
|
bde6696e52cc1b1f780b26803f4071edcc6ca428
|
[
"Apache-2.0"
] | null | null | null |
from flask_restful import Resource
from flasgger import swag_from
from api.repositories.subscriber import SubRepository
from api.models import Subscription
from api.utils.extensions import ma
from api.utils.responses import response_success
class SubSchema(ma.SQLAlchemySchema):
class Meta:
fields = ('id', 'msisdn', 'product_id', 'category_id', 'initial_sub_date', 'status', 'subscription_mode',
'last_renewed_date', 'expiry_date')
model = Subscription
class SubSingle(Resource):
@staticmethod
@swag_from('../docs/subscriber/GET.yml')
def get(msisdn):
schema = SubSchema()
sub = schema.dump(SubRepository.get(msisdn))
if sub:
return response_success(sub)
return {}
| 30.56
| 113
| 0.695026
|
71cad7e66b1831bf9d6cb42d161b52520cdc32ac
| 207
|
py
|
Python
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from ._client import KeyClient
__all__ = ["KeyClient"]
| 25.875
| 38
| 0.47343
|
b490859bf7b42c45eed7f28c19aff78b157b1632
| 1,987
|
py
|
Python
|
tab_rounds/generate_round/generate_round.py
|
Andrew-Talley/mock-trial-tab
|
493b0b843d34c732dec724e8ab51f355835a3f46
|
[
"MIT"
] | 1
|
2020-10-10T20:24:53.000Z
|
2020-10-10T20:24:53.000Z
|
tab_rounds/generate_round/generate_round.py
|
Andrew-Talley/mock-trial-tab
|
493b0b843d34c732dec724e8ab51f355835a3f46
|
[
"MIT"
] | 1
|
2020-10-10T20:30:20.000Z
|
2020-10-10T20:30:20.000Z
|
tab_rounds/generate_round/generate_round.py
|
Andrew-Talley/mock-trial-tab
|
493b0b843d34c732dec724e8ab51f355835a3f46
|
[
"MIT"
] | null | null | null |
import random
from typing import Tuple, List
from itertools import zip_longest, chain
from collections import defaultdict
from tab_rounds.generate_round.resolve_impermissibles import resolve_impermissibles
from tab_rounds.generate_round.utilities import Side, get_order_info_for_team
def snake_array(arr):
def combine_iters(left, right):
joined = chain.from_iterable(zip_longest(left, right))
filtered = filter(lambda x: x != None, joined)
return filtered
left = combine_iters(arr[0::4], arr[3::4])
right = combine_iters(arr[1::4], arr[2::4])
return left, right
def __team_ids__(sub_teams):
return [team.get("id") for team in sub_teams]
def separate_pl_and_def_teams(round_num, teams, coin_flip: str) -> Tuple[List[str], List[str]]:
if (round_num == 2 or round_num == 4):
p_team_list = [team for team in teams if team["needs_side"] == Side.PI]
d_team_list = [team for team in teams if team["needs_side"] == Side.DEF]
p_teams = __team_ids__(p_team_list)
d_teams = __team_ids__(d_team_list)
else:
left_side, right_side = snake_array(teams)
left_ids = __team_ids__(left_side)
right_ids = __team_ids__(right_side)
(p_teams, d_teams) = (left_ids, right_ids) if coin_flip == "Heads" else (right_ids, left_ids)
return [p_teams, d_teams]
def generate_round(round_num, teams, coin_flip: str, r3_coin_flip="Heads"):
teams_copy = teams[:]
random.shuffle(teams_copy)
if (round_num > 1):
teams_copy.sort(
key=lambda team: -team.get("id") if coin_flip == "Heads" else team.get("id")
)
teams_copy.sort(
key=get_order_info_for_team,
reverse=True
)
[p_teams, d_teams] = separate_pl_and_def_teams(round_num, teams_copy, r3_coin_flip)
pairings = [{"p": p, "d": d} for (p, d) in zip(p_teams, d_teams)]
pairings = resolve_impermissibles(pairings, teams, round_num)
return pairings
| 33.677966
| 101
| 0.682436
|
7cbfbb6d7b3b8ce4a3de074a465be0c35e1ee724
| 8,136
|
py
|
Python
|
bottleneck/src/template/move/move_median.py
|
fhal/bottleneck
|
7147ad85fadbc9c6ffccb05224efa7c380ded4ee
|
[
"BSD-2-Clause"
] | 1
|
2015-01-30T19:49:12.000Z
|
2015-01-30T19:49:12.000Z
|
bottleneck/src/template/move/move_median.py
|
fhal/bottleneck
|
7147ad85fadbc9c6ffccb05224efa7c380ded4ee
|
[
"BSD-2-Clause"
] | null | null | null |
bottleneck/src/template/move/move_median.py
|
fhal/bottleneck
|
7147ad85fadbc9c6ffccb05224efa7c380ded4ee
|
[
"BSD-2-Clause"
] | null | null | null |
"move_median template"
from copy import deepcopy
import bottleneck as bn
__all__ = ["move_median"]
FLOAT_DTYPES = [x for x in bn.dtypes if 'float' in x]
INT_DTYPES = [x for x in bn.dtypes if 'int' in x]
# Float dtypes (no axis=None) -----------------------------------------------
floats = {}
floats['dtypes'] = FLOAT_DTYPES
floats['axisNone'] = False
floats['force_output_dtype'] = False
floats['reuse_non_nan_func'] = False
floats['top'] = """
@cython.boundscheck(False)
@cython.wraparound(False)
def NAME_NDIMd_DTYPE_axisAXIS(np.ndarray[np.DTYPE_t, ndim=NDIM] a,
int window):
"Moving median of NDIMd array of dtype=DTYPE along axis=AXIS."
cdef mm_handle *mm
"""
loop = {}
loop[1] = """\
if (window < 1) or (window > nAXIS):
raise ValueError, MOVE_WINDOW_ERR_MSG % (window, nAXIS)
elif (window == 1):
if issubclass(a.dtype.type, np.inexact):
return PyArray_Copy(a)
else:
return a.astype(np.float64)
for iINDEX0 in range(window-1):
y[INDEXALL] = np.nan
mm = mm_new(window)
for iINDEX0 in range(window):
mm_insert_init(mm, a[INDEXALL])
y[INDEXREPLACE|window-1|] = mm_get_median(mm)
for iINDEX0 in range(window, nINDEX0):
mm_update(mm, a[INDEXALL])
y[INDEXALL] = mm_get_median(mm)
mm_free(mm)
return y
"""
loop[2] = """\
if (window < 1) or (window > nAXIS):
raise ValueError, MOVE_WINDOW_ERR_MSG % (window, nAXIS)
elif (window == 1):
if issubclass(a.dtype.type, np.inexact):
return PyArray_Copy(a)
else:
return a.astype(np.float64)
mm = mm_new(window)
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(window-1):
y[INDEXALL] = np.nan
for iINDEX1 in range(window):
mm_insert_init(mm, a[INDEXALL])
y[INDEXREPLACE|window-1|] = mm_get_median(mm)
for iINDEX1 in range(window, nINDEX1):
mm_update(mm, a[INDEXALL])
y[INDEXALL] = mm_get_median(mm)
mm.n_s = 0
mm.n_l = 0
mm_free(mm)
return y
"""
loop[3] = """\
if (window < 1) or (window > nAXIS):
raise ValueError, MOVE_WINDOW_ERR_MSG % (window, nAXIS)
elif (window == 1):
if issubclass(a.dtype.type, np.inexact):
return PyArray_Copy(a)
else:
return a.astype(np.float64)
mm = mm_new(window)
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
for iINDEX2 in range(window-1):
y[INDEXALL] = np.nan
for iINDEX2 in range(window):
mm_insert_init(mm, a[INDEXALL])
y[INDEXREPLACE|window-1|] = mm_get_median(mm)
for iINDEX2 in range(window, nINDEX2):
mm_update(mm, a[INDEXALL])
y[INDEXALL] = mm_get_median(mm)
mm.n_s = 0
mm.n_l = 0
mm_free(mm)
return y
"""
floats['loop'] = loop
# Int dtypes (no axis=None) ------------------------------------------------
ints = deepcopy(floats)
ints['force_output_dtype'] = 'float64'
ints['dtypes'] = INT_DTYPES
# Slow, unaccelerated ndim/dtype --------------------------------------------
slow = {}
slow['name'] = "move_median"
slow['signature'] = "arr, window"
slow['func'] = "bn.slow.move_median(arr, window, axis=AXIS)"
# Template ------------------------------------------------------------------
move_median = {}
move_median['name'] = 'move_median'
move_median['is_reducing_function'] = False
move_median['cdef_output'] = True
move_median['slow'] = slow
move_median['templates'] = {}
move_median['templates']['float'] = floats
move_median['templates']['int'] = ints
move_median['pyx_file'] = 'move/%sbit/move_median.pyx'
move_median['main'] = '''"move_median auto-generated from template"
cdef extern from "../csrc/move_median.c":
struct _mm_node:
np.npy_uint32 small
np.npy_uint64 idx
np.npy_float64 val
_mm_node *next
ctypedef _mm_node mm_node
struct _mm_handle:
int odd
np.npy_uint64 n_s
np.npy_uint64 n_l
mm_node **s_heap
mm_node **l_heap
mm_node **nodes
mm_node *node_data
mm_node *first
mm_node *last
np.npy_uint64 s_first_leaf
np.npy_uint64 l_first_leaf
ctypedef _mm_handle mm_handle
mm_handle *mm_new(np.npy_uint64 size)
void mm_insert_init(mm_handle *mm, np.npy_float64 val)
void mm_update(mm_handle *mm, np.npy_float64 val)
np.npy_float64 mm_get_median(mm_handle *mm)
void mm_free(mm_handle *mm)
def move_median(arr, int window, int axis=-1):
"""
Moving window median along the specified axis.
This functions is not protected against NaN. Therefore, you may get
unexpected results if the input contains NaN.
Parameters
----------
arr : ndarray
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving median. By default the moving
median is taken over the last axis (axis=-1). An axis of None is not
allowed.
Returns
-------
y : ndarray
The moving median of the input array along the specified axis. The output
has the same shape as the input.
Notes
-----
Unexpected results may occur if the input array contains NaN.
Examples
--------
>>> arr = np.array([1.0, 2.0, 3.0, 4.0])
>>> bn.move_median(arr, window=2)
array([ nan, 1.5, 2.5, 3.5])
"""
func, arr = move_median_selector(arr, axis)
return func(arr, window)
def move_median_selector(arr, int axis):
"""
Return move_median function and array that matches `arr` and `axis`.
Under the hood Bottleneck uses a separate Cython function for each
combination of ndim, dtype, and axis. A lot of the overhead in
bn.move_median() is in checking that `axis` is within range, converting
`arr` into an array (if it is not already an array), and selecting the
function to use to calculate the moving median.
You can get rid of the overhead by doing all this before you, for example,
enter an inner loop, by using this function.
Parameters
----------
arr : array_like
Input array. If `arr` is not an array, a conversion is attempted.
axis : {int, None}
Axis along which the moving median is to be computed.
Returns
-------
func : function
The moving median function that matches the number of dimensions,
dtype, and the axis along which you wish to find the median.
a : ndarray
If the input array `arr` is not a ndarray, then `a` will contain the
result of converting `arr` into a ndarray otherwise a view is
returned.
Examples
--------
Create a numpy array:
>>> arr = np.array([1.0, 2.0, 3.0, 4.0])
Obtain the function needed to determine the sum of `arr` along axis=0:
>>> window, axis = 2, 0
>>> func, a = bn.move.move_median_selector(arr, axis)
>>> func
<built-in function move_median_1d_float64_axis0>
Use the returned function and array to determine the moving median:
>>> func(a, window)
array([ nan, 1.5, 2.5, 3.5])
"""
cdef np.ndarray a
if type(arr) is np.ndarray:
a = arr
else:
a = np.array(arr, copy=False)
cdef int ndim = PyArray_NDIM(a)
cdef int dtype = PyArray_TYPE(a)
if axis < 0:
axis += ndim
cdef tuple key = (ndim, dtype, axis)
try:
func = move_median_dict[key]
except KeyError:
if (axis < 0) or (axis >= ndim):
raise ValueError, "axis(=%d) out of bounds" % axis
try:
func = move_median_slow_dict[axis]
except KeyError:
tup = (str(ndim), str(a.dtype), str(axis))
raise TypeError, "Unsupported ndim/dtype/axis (%s/%s/%s)." % tup
return func, a
'''
| 31.053435
| 81
| 0.590708
|
48a052a7646e6b86413bc2e3d05ebd01c99d5913
| 18,518
|
py
|
Python
|
pyrpm/spec.py
|
bkircher/python-rpm-spec
|
817be80f1b6ad6045893881b8e57ef374c3ea458
|
[
"MIT"
] | 29
|
2017-02-14T11:57:56.000Z
|
2022-01-26T06:24:04.000Z
|
pyrpm/spec.py
|
bkircher/python-rpm-spec
|
817be80f1b6ad6045893881b8e57ef374c3ea458
|
[
"MIT"
] | 39
|
2017-01-21T13:26:43.000Z
|
2021-06-22T10:26:03.000Z
|
pyrpm/spec.py
|
bkircher/python-rpm-spec
|
817be80f1b6ad6045893881b8e57ef374c3ea458
|
[
"MIT"
] | 18
|
2017-01-20T08:38:24.000Z
|
2021-12-15T07:31:48.000Z
|
"""Python module for parsing RPM spec files.
RPMs are build from a package's sources along with a spec file. The spec file controls how the RPM
is built. This module allows you to parse spec files and gives you simple access to various bits of
information that is contained in the spec file.
Current status: This module does not parse everything of a spec file. Only the pieces I needed. So
there is probably still plenty of stuff missing. However, it should not be terribly complicated to
add support for the missing pieces.
"""
import re
import sys
from warnings import warn
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, List, Optional, Union, Tuple, Type, cast
if sys.version_info < (3, 7):
re.Pattern = Any
re.Match = Any
__all__ = ["Spec", "replace_macros", "Package", "warnings_enabled"]
# Set this to True if you want the library to issue warnings during parsing.
warnings_enabled: bool = False
class _Tag(metaclass=ABCMeta):
def __init__(self, name: str, pattern_obj: re.Pattern, attr_type: Type[Any]) -> None:
self.name = name
self.pattern_obj = pattern_obj
self.attr_type = attr_type
def test(self, line: str) -> Optional[re.Match]:
return re.search(self.pattern_obj, line)
def update(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Any:
"""Update given spec object and parse context and return them again.
:param spec_obj: An instance of Spec class
:param context: The parse context
:param match_obj: The re.match object
:param line: The original line
:return: Given updated Spec instance and parse context dictionary.
"""
assert spec_obj
assert context
assert match_obj
assert line
return self.update_impl(spec_obj, context, match_obj, line)
@abstractmethod
def update_impl(self, spec_obj, context, match_obj, line):
pass
@staticmethod
def current_target(spec_obj: "Spec", context: Dict[str, Any]) -> Union["Spec", "Package"]:
target_obj = spec_obj
if context["current_subpackage"] is not None:
target_obj = context["current_subpackage"]
return target_obj
class _NameValue(_Tag):
"""Parse a simple name → value tag."""
def __init__(self, name: str, pattern_obj: re.Pattern, attr_type: Optional[Type[Any]] = None) -> None:
super().__init__(name, pattern_obj, cast(Type[Any], attr_type if attr_type else str))
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
if self.name == "changelog":
context["current_subpackage"] = None
target_obj = _Tag.current_target(spec_obj, context)
value = match_obj.group(1)
# Sub-packages
if self.name == "name":
spec_obj.packages = []
spec_obj.packages.append(Package(value))
if self.name in ["description", "changelog"]:
context["multiline"] = self.name
else:
setattr(target_obj, self.name, self.attr_type(value))
return spec_obj, context
class _SetterMacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, str)
def get_namespace(self, spec_obj, context):
raise NotImplementedError()
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
name, value = match_obj.groups()
setattr(self.get_namespace(spec_obj, context), name, str(value))
return spec_obj, context
class _GlobalMacroDef(_SetterMacroDef):
"""Parse global macro definitions."""
def get_namespace(self, spec_obj: "Spec", context: Dict[str, Any]) -> "Spec":
return spec_obj
class _LocalMacroDef(_SetterMacroDef):
"""Parse define macro definitions."""
def get_namespace(self, spec_obj: "Spec", context: Dict[str, Any]) -> "Spec":
return context["current_subpackage"]
class _MacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name, pattern_obj):
super().__init__(name, pattern_obj, str)
def update_impl(self, spec_obj, context, match_obj, line):
name, value = match_obj.groups()
spec_obj.macros[name] = str(value)
if name not in _tag_names:
# Also make available as attribute of spec object
setattr(spec_obj, name, str(value))
return spec_obj, context
class _List(_Tag):
"""Parse a tag that expands to a list."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, list)
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
target_obj = _Tag.current_target(spec_obj, context)
if not hasattr(target_obj, self.name):
setattr(target_obj, self.name, list())
value = match_obj.group(1)
if self.name == "packages":
if value == "-n":
subpackage_name = line.rsplit(" ", 1)[-1].rstrip()
else:
subpackage_name = "{}-{}".format(spec_obj.name, value)
package = Package(subpackage_name)
context["current_subpackage"] = package
package.is_subpackage = True
spec_obj.packages.append(package)
elif self.name in [
"build_requires",
"requires",
"conflicts",
"obsoletes",
"provides",
]:
# Macros are valid in requirements
value = replace_macros(value, spec=spec_obj)
# It's also legal to do:
# Requires: a b c
# Requires: b >= 3.1
# Requires: a, b >= 3.1, c
# 1. Tokenize
tokens = [val for val in re.split("[\t\n, ]", value) if val != ""]
values: List[str] = []
# 2. Join
add = False
for val in tokens:
if add:
add = False
val = values.pop() + " " + val
elif val in [">=", "!=", ">", "<", "<=", "==", "="]:
add = True # Add next value to this one
val = values.pop() + " " + val
values.append(val)
for val in values:
requirement = Requirement(val)
getattr(target_obj, self.name).append(requirement)
else:
getattr(target_obj, self.name).append(value)
return spec_obj, context
class _ListAndDict(_Tag):
"""Parse a tag that expands to a list and to a dict."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, list)
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
source_name, value = match_obj.groups()
dictionary = getattr(spec_obj, "{}_dict".format(self.name))
dictionary[source_name] = value
target_obj = _Tag.current_target(spec_obj, context)
getattr(target_obj, self.name).append(value)
return spec_obj, context
class _SplitValue(_NameValue):
"""Parse a (name->value) tag, and at the same time split the tag to a list."""
def __init__(self, name: str, pattern_obj: re.Pattern, sep: str = None) -> None:
super().__init__(name, pattern_obj)
self.name_list = "%s_list" % name
self.sep = sep
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
super().update_impl(spec_obj, context, match_obj, line)
target_obj = _Tag.current_target(spec_obj, context)
value = getattr(target_obj, self.name)
value = value.split(self.sep)
setattr(target_obj, self.name_list, value)
return spec_obj, context
def re_tag_compile(tag):
return re.compile(tag, re.IGNORECASE)
class _DummyMacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name, pattern_obj):
super().__init__(name, pattern_obj, str)
def update_impl(self, spec_obj, context, _, line):
context["line_processor"] = None
if warnings_enabled:
warn("Unknown macro: " + line)
return spec_obj, context
_tags = [
_NameValue("name", re_tag_compile(r"^Name\s*:\s*(\S+)")),
_NameValue("version", re_tag_compile(r"^Version\s*:\s*(\S+)")),
_NameValue("epoch", re_tag_compile(r"^Epoch\s*:\s*(\S+)")),
_NameValue("release", re_tag_compile(r"^Release\s*:\s*(\S+)")),
_NameValue("summary", re_tag_compile(r"^Summary\s*:\s*(.+)")),
_NameValue("description", re_tag_compile(r"^%description\s*(\S*)")),
_NameValue("changelog", re_tag_compile(r"^%changelog\s*(\S*)")),
_NameValue("license", re_tag_compile(r"^License\s*:\s*(.+)")),
_NameValue("group", re_tag_compile(r"^Group\s*:\s*(.+)")),
_NameValue("url", re_tag_compile(r"^URL\s*:\s*(\S+)")),
_NameValue("buildroot", re_tag_compile(r"^BuildRoot\s*:\s*(\S+)")),
_SplitValue("buildarch", re_tag_compile(r"^BuildArch\s*:\s*(\S+)")),
_SplitValue("excludearch", re_tag_compile(r"^ExcludeArch\s*:\s*(.+)")),
_SplitValue("exclusivearch", re_tag_compile(r"^ExclusiveArch\s*:\s*(.+)")),
_ListAndDict("sources", re_tag_compile(r"^(Source\d*\s*):\s*(.+)")),
_ListAndDict("patches", re_tag_compile(r"^(Patch\d*\s*):\s*(\S+)")),
_List("build_requires", re_tag_compile(r"^BuildRequires\s*:\s*(.+)")),
_List("requires", re_tag_compile(r"^Requires\s*:\s*(.+)")),
_List("conflicts", re_tag_compile(r"^Conflicts\s*:\s*(.+)")),
_List("obsoletes", re_tag_compile(r"^Obsoletes\s*:\s*(.+)")),
_List("provides", re_tag_compile(r"^Provides\s*:\s*(.+)")),
_List("packages", re.compile(r"^%package\s+(\S+)")),
_MacroDef("define", re.compile(r"^%define\s+(\S+)\s+(\S+)")),
_MacroDef("global", re.compile(r"^%global\s+(\S+)\s+(\S+)")),
_DummyMacroDef("dummy", re.compile(r"^%[a-z_]+\b.*$")),
]
_tag_names = [tag.name for tag in _tags]
_macro_pattern = re.compile(r"%{(\S+?)\}")
def _parse(spec_obj: "Spec", context: Dict[str, Any], line: str) -> Any:
for tag in _tags:
match = tag.test(line)
if match:
if "multiline" in context:
context.pop("multiline", None)
return tag.update(spec_obj, context, match, line)
if "multiline" in context:
target_obj = _Tag.current_target(spec_obj, context)
previous_txt = getattr(target_obj, context["multiline"], "")
if previous_txt is None:
previous_txt = ""
setattr(target_obj, context["multiline"], str(previous_txt) + line)
return spec_obj, context
class Requirement:
"""Represents a single requirement or build requirement in an RPM spec file.
Each spec file contains one or more requirements or build requirements.
For example, consider following spec file::
Name: foo
Version: 0.1
%description
%{name} is the library that everyone needs.
%package devel
Summary: Header files, libraries and development documentation for %{name}
Group: Development/Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
BuildRequires: gstreamer%{?_isa} >= 0.1.0
%description devel
This package contains the header files, static libraries, and development
documentation for %{name}. If you like to develop programs using %{name}, you
will need to install %{name}-devel.
This spec file's requirements have a name and either a required or minimum
version.
"""
expr = re.compile(r"(.*?)\s+([<>]=?|=)\s+(\S+)")
def __init__(self, name: str) -> None:
assert isinstance(name, str)
self.line = name
self.name: str
self.operator: Optional[str]
self.version: Optional[str]
match = Requirement.expr.match(name)
if match:
self.name = match.group(1)
self.operator = match.group(2)
self.version = match.group(3)
else:
self.name = name
self.operator = None
self.version = None
def __repr__(self):
return self.line
class Package:
"""Represents a single package in a RPM spec file.
Each spec file describes at least one package and can contain one or more subpackages (described
by the %package directive). For example, consider following spec file::
Name: foo
Version: 0.1
%description
%{name} is the library that everyone needs.
%package devel
Summary: Header files, libraries and development documentation for %{name}
Group: Development/Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
%description devel
This package contains the header files, static libraries, and development
documentation for %{name}. If you like to develop programs using %{name}, you
will need to install %{name}-devel.
%package -n bar
Summary: A command line client for foo.
License: GPLv2+
%description -n bar
This package contains a command line client for foo.
This spec file will create three packages:
* A package named foo, the base package.
* A package named foo-devel, a subpackage.
* A package named bar, also a subpackage, but without the foo- prefix.
As you can see above, the name of a subpackage normally includes the main package name. When the
-n option is added to the %package directive, the prefix of the base package name is omitted and
a completely new name is used.
"""
def __init__(self, name: str) -> None:
assert isinstance(name, str)
for tag in _tags:
if tag.attr_type is list and tag.name in [
"build_requires",
"requires",
"conflicts",
"obsoletes",
"provides",
]:
setattr(self, tag.name, tag.attr_type())
elif tag.name in [
"description",
]:
setattr(self, tag.name, None)
self.name = name
self.is_subpackage = False
def __repr__(self) -> str:
return "Package('{}')".format(self.name)
class Spec:
"""Represents a single spec file."""
def __init__(self) -> None:
for tag in _tags:
if tag.attr_type is list:
setattr(self, tag.name, tag.attr_type())
else:
setattr(self, tag.name, None)
self.sources_dict: Dict[str, str] = {}
self.patches_dict: Dict[str, str] = {}
self.macros: Dict[str, str] = {}
self.name: Optional[str]
self.packages: List[Package] = []
@property
def packages_dict(self) -> Dict[str, Package]:
"""All packages in this RPM spec as a dictionary.
You can access the individual packages by their package name, e.g.,
git_spec.packages_dict['git-doc']
"""
assert self.packages
return dict(zip([package.name for package in self.packages], self.packages))
@staticmethod
def from_file(filename: str) -> "Spec":
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = Spec()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line)
return spec
@staticmethod
def from_string(string: str) -> "Spec":
"""Creates a new Spec object from a given string.
:param string: The contents of a spec file.
:return: A new Spec object.
"""
spec = Spec()
parse_context = {"current_subpackage": None}
for line in string.splitlines():
spec, parse_context = _parse(spec, parse_context, line)
return spec
def replace_macros(string: str, spec: Spec) -> str:
"""Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced.
:param spec A Spec object. Definitions in that spec file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
"""
assert isinstance(spec, Spec)
def _is_conditional(macro: str) -> bool:
return macro.startswith("?") or macro.startswith("!")
def _test_conditional(macro: str) -> bool:
if macro[0] == "?":
return True
if macro[0] == "!":
return False
raise Exception("Given string is not a conditional macro")
def _macro_repl(match):
macro_name = match.group(1)
if _is_conditional(macro_name) and spec:
parts = macro_name[1:].split(sep=":", maxsplit=1)
assert parts
if _test_conditional(macro_name):
if hasattr(spec, parts[0]) or parts[0] in spec.macros:
if len(parts) == 2:
return parts[1]
return spec.macros.get(parts[0], getattr(spec, parts[0], None))
return ""
if not hasattr(spec, parts[0]) and parts[0] not in spec.macros:
if len(parts) == 2:
return parts[1]
return spec.macros.get(parts[0], getattr(spec, parts[0], None))
return ""
if spec:
value = spec.macros.get(macro_name, getattr(spec, macro_name, None))
if value:
return str(value)
return match.string[match.start() : match.end()]
# Recursively expand macros
# Note: If macros are not defined in the spec file, this won't try to
# expand them.
while True:
ret = re.sub(_macro_pattern, _macro_repl, string)
if ret != string:
string = ret
continue
return ret
| 34.742964
| 124
| 0.605249
|
29bca09def29a4f07c17006fe1e0c9532d6ad8b9
| 6,856
|
py
|
Python
|
platform/core/polyaxon/db/models/jobs.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/db/models/jobs.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/db/models/jobs.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Optional
from hestia.datetime_typing import AwareDT
from django.conf import settings
from django.db import models
from django.utils.functional import cached_property
import auditor
import compiler
from constants.cloning_strategies import CloningStrategy
from constants.k8s_jobs import JOB_NAME, JOB_NAME_FORMAT
from db.models.abstract.backend import BackendModel
from db.models.abstract.datarefs import DataReferenceModel
from db.models.abstract.deleted import DeletedModel
from db.models.abstract.describable import DescribableModel
from db.models.abstract.is_managed import IsManagedModel
from db.models.abstract.job import AbstractJobModel, AbstractJobStatusModel, JobMixin
from db.models.abstract.nameable import NameableModel
from db.models.abstract.node_scheduling import NodeSchedulingModel
from db.models.abstract.outputs import OutputsModel
from db.models.abstract.persistence import PersistenceModel
from db.models.abstract.readme import ReadmeModel
from db.models.abstract.sub_paths import SubPathModel
from db.models.abstract.tag import TagModel
from db.models.unique_names import JOB_UNIQUE_NAME_FORMAT
from db.redis.heartbeat import RedisHeartBeat
from events.registry.job import JOB_RESTARTED
from libs.paths.jobs import get_job_subpath
from schemas import kinds
class Job(AbstractJobModel,
BackendModel,
IsManagedModel,
DataReferenceModel,
OutputsModel,
PersistenceModel,
SubPathModel,
NodeSchedulingModel,
NameableModel,
DescribableModel,
ReadmeModel,
TagModel,
DeletedModel,
JobMixin):
"""A model that represents the configuration for run job."""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='+')
project = models.ForeignKey(
'db.Project',
on_delete=models.CASCADE,
related_name='jobs')
content = models.TextField(
null=True,
blank=True,
help_text='The yaml content of the polyaxonfile/specification.')
code_reference = models.ForeignKey(
'db.CodeReference',
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='+')
build_job = models.ForeignKey(
'db.BuildJob',
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='jobs')
original_job = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='clones',
help_text='The original job that was cloned from.')
cloning_strategy = models.CharField(
max_length=16,
blank=True,
null=True,
choices=CloningStrategy.CHOICES)
status = models.OneToOneField(
'db.JobStatus',
related_name='+',
blank=True,
null=True,
editable=True,
on_delete=models.SET_NULL)
class Meta:
app_label = 'db'
unique_together = (('project', 'name'),)
indexes = [
models.Index(fields=['name']),
]
@cached_property
def unique_name(self) -> str:
return JOB_UNIQUE_NAME_FORMAT.format(
project_name=self.project.unique_name,
id=self.id)
@property
def subpath(self) -> str:
return get_job_subpath(job_name=self.unique_name)
@cached_property
def pod_id(self) -> str:
return JOB_NAME_FORMAT.format(name=JOB_NAME, job_uuid=self.uuid.hex)
@cached_property
def specification(self) -> Optional['JobSpecification']:
return compiler.compile(kind=kinds.JOB, values=self.content)
@property
def has_specification(self) -> bool:
return self.content is not None
@property
def is_clone(self) -> bool:
return self.original_job is not None
@property
def original_unique_name(self) -> Optional[str]:
return self.original_job.unique_name if self.original_job else None
@property
def is_restart(self) -> bool:
return self.is_clone and self.cloning_strategy == CloningStrategy.RESTART
@property
def is_resume(self) -> bool:
return self.is_clone and self.cloning_strategy == CloningStrategy.RESUME
@property
def is_copy(self) -> bool:
return self.is_clone and self.cloning_strategy == CloningStrategy.COPY
def _ping_heartbeat(self) -> None:
RedisHeartBeat.job_ping(self.id)
def set_status(self, # pylint:disable=arguments-differ
status: str,
created_at: AwareDT = None,
message: str = None,
traceback: Dict = None,
details: Dict = None) -> bool:
params = {'created_at': created_at} if created_at else {}
return self._set_status(status_model=JobStatus,
status=status,
message=message,
details=details,
**params)
def _clone(self,
cloning_strategy: str,
event_type: str,
user=None,
description: str = None,
content=None,
code_reference=None,
update_code_reference: bool = False) -> 'Job':
if not code_reference and not update_code_reference:
code_reference = self.code_reference
instance = Job.objects.create(
project=self.project,
user=user or self.user,
description=description or self.description,
content=content or self.content,
original_job=self,
cloning_strategy=cloning_strategy,
code_reference=code_reference)
auditor.record(event_type=event_type, instance=instance)
return instance
def restart(self,
user=None,
description: str = None,
content=None,
code_reference=None,
update_code_reference: bool = False) -> 'Job':
return self._clone(cloning_strategy=CloningStrategy.RESTART,
event_type=JOB_RESTARTED,
user=user,
description=description,
content=content,
code_reference=code_reference,
update_code_reference=update_code_reference)
class JobStatus(AbstractJobStatusModel):
"""A model that represents run job status at certain time."""
job = models.ForeignKey(
'db.Job',
on_delete=models.CASCADE,
related_name='statuses')
class Meta(AbstractJobStatusModel.Meta):
app_label = 'db'
verbose_name_plural = 'Job Statuses'
| 33.607843
| 85
| 0.637398
|
789c383a5069f0e0e1c876a26c200c7bd5c2f50e
| 8,117
|
py
|
Python
|
.molecule/default/files/polish/lib/workchain.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 180
|
2019-07-12T07:45:26.000Z
|
2022-03-22T13:16:57.000Z
|
.molecule/default/files/polish/lib/workchain.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 2,466
|
2016-12-24T01:03:52.000Z
|
2019-07-04T13:41:08.000Z
|
.molecule/default/files/polish/lib/workchain.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2016-12-23T16:28:00.000Z
|
2019-07-01T15:55:20.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Functions to dynamically generate a WorkChain from a reversed polish notation expression."""
import collections
import hashlib
import os
from pathlib import Path
from string import Template
from .expression import OPERATORS # pylint: disable=relative-beyond-top-level
INDENTATION_WIDTH = 4
BLOCK_TPL_INI = '{indent:}cls.setup,\n'
BLOCK_TPL_ADD_BASE = """
{indent:}if_(cls.is_positive)(
{indent_inner:}cls.add
{indent:}).else_(
{indent_inner:}cls.subtract
{indent:}),
"""
BLOCK_TPL_ADD_WORK = """
{indent:}if_(cls.is_positive)(
{indent_inner:}cls.add
{indent:}).else_(
{indent_inner:}cls.subtract_calcfunction
{indent:}),
"""
BLOCK_TPL_ADD_CALC = """
{indent:}if_(cls.is_positive)(
{indent_inner:}cls.add_calculation,
{indent_inner:}cls.post_add,
{indent:}).else_(
{indent_inner:}cls.subtract
{indent:}),
"""
BLOCK_TPL_ADD_BOTH = """
{indent:}if_(cls.is_positive)(
{indent_inner:}cls.add_calculation,
{indent_inner:}cls.post_add,
{indent:}).else_(
{indent_inner:}cls.subtract_calcfunction
{indent:}),
"""
BLOCK_TPL_MUL = """
{indent:}cls.pre_iterate,
{indent:}while_(cls.iterate)(
{block:}{indent:}),
"""
BLOCK_TPL_POW = """
{indent:}cls.raise_power,
{indent:}cls.post_raise_power,
"""
BLOCK_TPL_END = """
{indent:}cls.results
"""
def generate_outlines(expression):
"""
For a given expression in Reverse Polish Notation, generate the nested symbolic structure of the outlines.
:param expression: a valid expression
:return: a nested list structure of strings representing the structure of the outlines
"""
stack = collections.deque()
values = []
outline = [['add']]
for part in expression.split():
if part not in OPERATORS.keys():
stack.appendleft(part)
values.append(part)
else:
stack.popleft()
sub_outline = outline[-1]
if part == '+':
sub_outline.append('add')
elif part == '*':
sub_outline = outline.pop()
outline.append([sub_outline])
elif part == '^':
outline.append(['pow'])
for sub_outline in outline:
sub_outline.append('ini')
sub_outline.append('end')
return outline, values
def format_outlines(outlines, use_calculations=False, use_calcfunctions=False):
"""
Given the symbolic structure of the workchain outlines produced by ``generate_outlines``, format the actual
string form of those workchain outlines
:param outlines: the list of symbolic outline structures
:param use_calculations: use CalcJobs for the add operations
:param use_calcfunctions: use calcfunctions for the subtract operations
:return: a list of outline strings
"""
outline_strings = []
for sub_outline in outlines:
outline_string = ''
for instruction in sub_outline:
if instruction == 'ini':
outline_string = BLOCK_TPL_INI.format(indent=format_indent()) + outline_string
elif instruction == 'add':
outline_string = format_block(instruction, 0, use_calculations, use_calcfunctions) + outline_string
elif instruction == 'pow':
outline_string += BLOCK_TPL_POW.format(indent=format_indent())
elif instruction == 'end':
outline_string += BLOCK_TPL_END.format(indent=format_indent())
else:
outline_string += format_block(instruction, 0, use_calculations, use_calcfunctions)
outline_strings.append(outline_string)
return outline_strings
def format_block(instruction, level=0, use_calculations=False, use_calcfunctions=False):
"""
Format the instruction into its proper string form
:param use_calculations: use CalcJobs for the add operations
:param use_calcfunctions: use calcfunctions for the subtract operations
:return: the string representation of the instruction
"""
block = ''
string = ''
if isinstance(instruction, list):
for sub_instruction in instruction:
if sub_instruction == 'add':
block = format_block(sub_instruction, level + 1, use_calculations, use_calcfunctions) + block
else:
block += format_block(sub_instruction, level + 1, use_calculations, use_calcfunctions)
string += BLOCK_TPL_MUL.format(indent=format_indent(level), level=level, block=block)
elif instruction == 'pow':
string += BLOCK_TPL_POW.format(indent=format_indent(level))
elif instruction == 'add':
if use_calculations and use_calcfunctions:
string = BLOCK_TPL_ADD_BOTH.format(indent=format_indent(level), indent_inner=format_indent(level + 1))
elif use_calculations:
string = BLOCK_TPL_ADD_CALC.format(indent=format_indent(level), indent_inner=format_indent(level + 1))
elif use_calcfunctions:
string = BLOCK_TPL_ADD_WORK.format(indent=format_indent(level), indent_inner=format_indent(level + 1))
else:
string = BLOCK_TPL_ADD_BASE.format(indent=format_indent(level), indent_inner=format_indent(level + 1))
return string
def format_indent(level=0, width=INDENTATION_WIDTH):
"""
Format the indentation for the given indentation level and indentation width
:param level: the level of indentation
:param width: the width in spaces of a single indentation
:return: the indentation string
"""
return ' ' * level * width
def write_workchain(outlines, directory=None) -> Path:
"""
Given a list of string formatted outlines, write the corresponding workchains to file
:returns: file path
"""
dirpath = os.path.dirname(os.path.realpath(__file__))
template_dir = os.path.join(dirpath, 'template')
template_file_base = os.path.join(template_dir, 'base.tpl')
template_file_workchain = os.path.join(template_dir, 'workchain.tpl')
if directory is None:
directory = os.path.join(dirpath, os.path.pardir, 'polish_workchains')
directory = Path(directory)
directory.mkdir(parents=True, exist_ok=True)
(directory / '__init__.py').touch()
with open(template_file_base, 'r', encoding='utf8') as handle:
template_base = handle.readlines()
with open(template_file_workchain, 'r', encoding='utf8') as handle:
template_workchain = Template(handle.read())
code_strings = []
for line in template_base:
code_strings.append(line)
code_strings.append('\n')
counter = len(outlines) - 1
for outline in outlines:
outline_string = ''
for subline in outline.split('\n'):
outline_string += f'\t\t\t{subline}\n'
if counter == len(outlines) - 1:
child_class = None
else:
child_class = f'Polish{counter + 1:02d}WorkChain'
subs = {
'class_name': f'Polish{counter:02d}WorkChain',
'child_class': child_class,
'outline': outline_string,
}
code_strings.append(template_workchain.substitute(**subs))
code_strings.append('\n\n')
counter -= 1
code_string = '\n'.join(code_strings)
hashed = hashlib.md5(code_string.encode('utf8')).hexdigest()
filepath = directory / f'polish_{hashed}.py'
filepath.write_text(code_string)
return filepath
| 32.468
| 115
| 0.63558
|
f299c98fdfd221c6ad6c1f200b5815119307e5ce
| 19,369
|
py
|
Python
|
tom_education/views.py
|
joesingo/tom_education
|
9bf9ea3d465f83040e4618ce89efbab2a087b2fa
|
[
"MIT"
] | 1
|
2019-07-16T11:20:55.000Z
|
2019-07-16T11:20:55.000Z
|
tom_education/views.py
|
joesingo/tom_education
|
9bf9ea3d465f83040e4618ce89efbab2a087b2fa
|
[
"MIT"
] | 45
|
2019-07-05T08:49:17.000Z
|
2019-09-23T14:02:17.000Z
|
tom_education/views.py
|
joesingo/tom_education
|
9bf9ea3d465f83040e4618ce89efbab2a087b2fa
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from datetime import datetime
import json
from typing import Iterable
import csv
import logging
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.db.utils import IntegrityError
from django.http import JsonResponse, HttpResponseBadRequest, HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import redirect, reverse
from django.utils.http import urlencode
from django.views.generic import FormView, TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormMixin
from tom_dataproducts.models import DataProduct, ObservationRecord, ReducedDatum
from tom_observations.facility import get_service_class
from tom_observations.views import ObservationCreateView
from tom_targets.models import (
Target, GLOBAL_TARGET_FIELDS, REQUIRED_NON_SIDEREAL_FIELDS,
REQUIRED_NON_SIDEREAL_FIELDS_PER_SCHEME
)
from tom_targets.views import TargetDetailView, TargetCreateView, TargetUpdateView
from rest_framework.exceptions import NotFound
from rest_framework.generics import CreateAPIView, ListAPIView, RetrieveAPIView
from rest_framework import serializers
from rest_framework.response import Response
from tom_education.forms import make_templated_form, DataProductActionForm, GalleryForm
from tom_education.models import (
AsyncProcess,
ASYNC_STATUS_CREATED,
ObservationAlert,
ObservationTemplate,
PipelineProcess,
TimelapsePipeline,
)
from tom_education.serializers import (
AsyncProcessSerializer,
ObservationAlertSerializer,
PipelineProcessSerializer,
TargetDetailSerializer,
TimestampField,
)
from tom_education.tasks import run_pipeline, send_task
logger = logging.getLogger(__name__)
class TemplatedObservationCreateView(ObservationCreateView):
supported_facilities = ('LCO',)
def get_form_class(self):
return make_templated_form(super().get_form_class())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Get extra context data from the form object, if applicable
form = context['form']
if hasattr(form, 'get_extra_context'):
context.update(form.get_extra_context())
context['target'] = self.get_target()
return context
def serialize_fields(self, form):
# TODO: Removing groups which provides a QuerySet and we don't use anyway
cleanform = dict(form.cleaned_data)
del cleanform['groups']
return json.dumps(cleanform)
def form_url(self):
"""
Return the URL for this form view for the current facility and target
"""
base = reverse("tom_education:create_obs", kwargs={'facility': self.get_facility()})
return base + '?' + urlencode({'target_id': self.get_target_id()})
def can_create_template(self):
"""
Return True if the current user can create a template for the current
facility, and False otherwise
"""
supported_facility = self.get_facility() in self.supported_facilities
return supported_facility and self.request.user.is_staff
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['form_url'] = self.form_url()
kwargs['show_create'] = self.can_create_template()
return kwargs
def get_initial(self):
initial = super().get_initial()
facility = self.get_facility()
template_id = self.request.GET.get('template_id')
if template_id:
template = ObservationTemplate.objects.filter(
target=self.get_target(),
facility=facility
).get(pk=template_id)
initial.update(json.loads(template.fields))
# Set identifier field to something unique based on the template
id_field = ObservationTemplate.get_identifier_field(facility)
initial[id_field] = template.get_identifier()
# Dates need to be converted to just YYYY-MM-DD to display in the
# widget properly
for field in ObservationTemplate.get_date_fields(facility):
dt = initial[field]
initial[field] = datetime.fromisoformat(dt).strftime('%Y-%m-%d')
return initial
def form_valid(self, form):
facility = self.get_facility()
if self.get_form_class().new_template_action[0] in form.data:
if not self.can_create_template():
raise PermissionDenied()
# Create new template
# TODO: deal with None below
name = form.cleaned_data.get(ObservationTemplate.get_identifier_field(facility))
try:
template = ObservationTemplate.objects.create(
name=name,
target=self.get_target(),
facility=facility,
fields=self.serialize_fields(form)
)
except IntegrityError:
form.add_error(None, 'Template name "{}" already in use'.format(name))
return self.form_invalid(form)
path = template.get_create_url(self.form_url())
return redirect(path)
return super().form_valid(form)
class ActionableTargetDetailView(FormMixin, TargetDetailView):
"""
Extend the target detail view to add a form to select a group of data
products and perform an action on them.
A method `handle_<name>(products, form)` is called to handle the form
submission, where `<name>` is the value of the action field in the form.
"""
form_class = DataProductActionForm
template_name = "tom_targets/target_dataview.html"
def get_success_url(self):
return reverse('tom_targets:detail', kwargs={'pk': self.get_object().pk})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['target'] = self.get_object()
return kwargs
def get_context_data(self, *args, **kwargs):
self.object = self.get_object()
context = super().get_context_data(*args, **kwargs)
context['dataproducts_form'] = self.get_form()
context['pipeline_names'] = sorted(PipelineProcess.get_available().keys())
context['pipeline_flags'] = {}
for name in context['pipeline_names']:
pipeline_cls = PipelineProcess.get_subclass(name)
if pipeline_cls.flags:
context['pipeline_flags'][name] = pipeline_cls.flags
return context
def post(self, _request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
# Form is not rendered in the template, so add form errors as messages
# Note: this discards information about which field each error relates
# to
for err_list in form.errors.values():
for err_msg in err_list:
messages.error(self.request, err_msg)
return self.form_invalid(form)
def form_valid(self, form):
products = form.get_selected_products()
try:
method = getattr(self, 'handle_{}'.format(form.data['action']))
except AttributeError:
return HttpResponseBadRequest('Invalid action \'{}\''.format(form.data['action']))
return method(products, form)
def handle_pipeline(self, products, form):
try:
name = form.data['pipeline_name']
except KeyError:
return HttpResponseBadRequest('No pipeline_name given')
try:
pipeline_cls = PipelineProcess.get_subclass(name)
except KeyError:
return HttpResponseBadRequest("Invalid pipeline name '{}'".format(name))
# Get pipeline-specific flags. Initially set all to False; those
# present in form data will be set to True
flags = {f: False for f in pipeline_cls.flags} if pipeline_cls.flags else {}
for key in form.data:
prefix = 'pipeline_flag_'
if not key.startswith(prefix):
continue
flag = key[len(prefix):]
if flag not in flags:
continue
flags[flag] = True
target = self.get_object()
pipe = pipeline_cls.create_timestamped(target, products, flags)
send_task(run_pipeline, pipe, name)
return JsonResponse({'ok': True})
def handle_view_gallery(self, products, form):
# Redirect to gallery page with product PKs as GET params
product_pks = [str(p.pk) for p in products]
base = reverse('tom_education:gallery')
url = base + '?' + urlencode({'product_pks': ",".join(product_pks)})
return redirect(url)
def handle_delete(self, products, form):
product_pks = [str(p.pk) for p in products]
base = reverse('tom_education:delete_dataproducts')
url = base + '?' + urlencode({'product_pks': ",".join(product_pks)})
return redirect(url)
class GalleryView(FormView):
"""
Show thumbnails for a number of data products and allow the user to add a
selection of them to a data product group
"""
form_class = GalleryForm
template_name = 'tom_education/gallery.html'
def get_pks_string(self):
"""
Return comma separated string of products PKs from GET or POST params
"""
if self.request.method == 'GET':
obj = self.request.GET
else:
obj = self.request.POST
return obj.get('product_pks', '')
def get_products(self, pks_string):
try:
return self._products
except AttributeError:
pass
if pks_string:
pks = pks_string.split(',')
self._products = {DataProduct.objects.get(pk=int(pk)) for pk in pks}
else:
self._products = set([])
return self._products
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['products'] = self.get_products(self.get_pks_string())
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Comma separated PK string is required to construct form instance, so
# must be send in the POST request too. Put it in the context so it
# can be sent as a hidden field
pks_string = self.get_pks_string()
context['product_pks'] = pks_string
products = self.get_products(pks_string)
if products:
context['products'] = products
context['show_form'] = True
else:
messages.error(self.request, 'No data products provided')
return context
def form_valid(self, form):
selected = form.get_selected_products()
group = form.cleaned_data['group']
for product in selected:
product.group.add(group)
product.save()
# Redirect to group detail view
msg = 'Added {} data products to group \'{}\''.format(len(selected), group.name)
messages.success(self.request, msg)
url = reverse('tom_dataproducts:group-detail', kwargs={'pk': group.pk})
return HttpResponseRedirect(url)
class AsyncStatusApi(ListAPIView):
"""
View that finds all AsyncProcess objects associated with a specified Target
and returns the listing in a JSON response
"""
serializer_class = AsyncProcessSerializer
def get_queryset(self):
try:
target = Target.objects.get(pk=self.kwargs['target'])
except Target.DoesNotExist:
raise Http404
return AsyncProcess.objects.filter(target=target).order_by('-created')
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
timestamp = TimestampField().to_representation(datetime.now())
return Response({'timestamp': timestamp, 'processes': serializer.data})
class PipelineProcessDetailView(DetailView):
model = PipelineProcess
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.object.target:
context['target_url'] = reverse('tom_targets:detail', kwargs={'pk': self.object.target.pk})
return context
class PipelineProcessApi(RetrieveAPIView):
"""
Return information about a PipelineProcess in a JSON response
"""
queryset = PipelineProcess.objects.all()
serializer_class = PipelineProcessSerializer
@dataclass
class TargetDetailApiInfo:
"""
Wrapper object containing a target and its timelapses, for serialization
for the target detail API
"""
target: Target
timelapses: Iterable[TimelapsePipeline]
data: Target
class TargetDetailApiView(RetrieveAPIView):
"""
Return information about a target and its timelapses, and return a JSON
response
"""
serializer_class = TargetDetailSerializer
# Note: we specify a Target queryset to make use of rest_framework methods
# to retrieve Target model from the PK kwarg in URL, but it is NOT a Target
# object that will be serialized
queryset = Target.objects.all()
def get_object(self):
target = super().get_object()
tl_pipelines = TimelapsePipeline.objects.filter(
target=target, group__dataproduct__target__isnull=False,
status=ASYNC_STATUS_CREATED,
process_type='TimelapsePipeline'
).order_by('-terminal_timestamp')
return TargetDetailApiInfo(target=target, timelapses=tl_pipelines, data=target)
class ObservationAlertApiCreateView(CreateAPIView):
"""
Create an ObservationAlert by instantiating an ObservationTemplate for a
given target
"""
throttle_scope = 'observe'
serializer_class = ObservationAlertSerializer
def perform_create(self, serializer):
data = serializer.validated_data
try:
target = Target.objects.get(pk=data['target'])
facility_class = get_service_class(data['facility'])
template = ObservationTemplate.objects.get(
target=target,
name=data['template_name'],
facility=data['facility']
)
except Target.DoesNotExist:
raise NotFound(detail='Target not found.')
except ImportError:
raise NotFound(detail='Facility not found.')
except ObservationTemplate.DoesNotExist:
err = "Template '{}' not found for target '{}' and facility '{}'".format(
data['template_name'], target.name, data['facility']
)
raise NotFound(detail=err)
# Construct form for creating an observation
form_data = {
'target_id': target.pk,
'facility': facility_class.name
}
form_data.update(json.loads(template.fields))
id_field = ObservationTemplate.get_identifier_field(facility_class.name)
form_data[id_field] = template.get_identifier()
form_data.update(data.get('overrides', {}))
form = facility_class.get_form(None)(form_data) # observation type is not relevant to us
if not form.is_valid():
raise serializers.ValidationError(form.errors)
# Submit observation using facility class
observation_ids = facility_class().submit_observation(form.observation_payload())
assert len(observation_ids) == 1, (
'Submittion created multiple observation IDs: {}'.format(observation_ids)
)
# Create Observation record and alert
ob = ObservationRecord.objects.create(
target=target,
facility=facility_class.name,
parameters=form.serialize_parameters(),
observation_id=observation_ids[0]
)
ObservationAlert.objects.create(email=data['email'], observation=ob)
class DataProductDeleteMultipleView(LoginRequiredMixin, TemplateView):
template_name = 'tom_education/dataproduct_confirm_delete_multiple.html'
def get_products(self, pks_string):
pks = pks_string.split(',')
return {DataProduct.objects.get(pk=int(pk)) for pk in pks}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.method == 'GET':
context['next'] = self.request.META.get('HTTP_REFERER', '/')
context['product_pks'] = self.request.GET.get('product_pks', '')
context['to_delete'] = self.get_products(context['product_pks'])
return context
def post(self, request, *args, **kwargs):
prods = self.get_products(self.request.POST.get('product_pks', []))
for prod in prods:
ReducedDatum.objects.filter(data_product=prod).delete()
prod.data.delete()
prod.delete()
messages.success(request, 'Deleted {} data products'.format(len(prods)))
return HttpResponseRedirect(self.request.POST.get('next', '/'))
class NonSiderealFieldsMixin:
"""
Mixin for views which adds information to the template context about the
required fields per scheme for non-sidereal targets. This allows client
side JS to hide fields which are not applicable for the selected scheme.
Relies on the view having a method get_target_type() which returns
Target.SIDEREAL or Target.NON_SIDEREAL
"""
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
if self.get_target_type() == Target.NON_SIDEREAL:
form = self.get_form()
# Build list of base fields that should always be shown, including
# non-model fields declared in the form itself and extra fields
declared = list(form.declared_fields.keys())
extra = list(getattr(form, 'extra_fields', {}).keys())
base = GLOBAL_TARGET_FIELDS + REQUIRED_NON_SIDEREAL_FIELDS + declared + extra
context['non_sidereal_fields'] = json.dumps({
'base_fields': base,
'scheme_fields': REQUIRED_NON_SIDEREAL_FIELDS_PER_SCHEME,
})
return context
class EducationTargetCreateView(NonSiderealFieldsMixin, TargetCreateView):
pass
class EducationTargetUpdateView(NonSiderealFieldsMixin, TargetUpdateView):
def get_target_type(self):
return self.object.type
def photometry_to_csv(request, pk):
# Create the HttpResponse object with the appropriate CSV header.
target = Target.objects.get(pk=pk)
filename = target.name.replace(' ','_').replace('.','_')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="{filename}.csv"'
rdata = ReducedDatum.objects.filter(target=target, data_type='photometry').order_by('timestamp')
writer = csv.writer(response)
for rdatum in rdata:
try:
vals = json.loads(rdatum.value)
except json.decoder.JSONDecodeError:
logger.warning(f'Could not parse {rdatum.value} of {target.name}')
writer.writerow([rdatum.timestamp.isoformat('T'), vals['magnitude'], vals['error']])
return response
| 38.127953
| 105
| 0.662812
|
15b8b559effaf60eb7c35ae83aca225e34563cb8
| 12,759
|
py
|
Python
|
shunt/shunt/shunt.py
|
velezj/project-manager
|
92e28e5718ca1302f6da0cf8b3d4a3bb5a1a8a72
|
[
"MIT"
] | null | null | null |
shunt/shunt/shunt.py
|
velezj/project-manager
|
92e28e5718ca1302f6da0cf8b3d4a3bb5a1a8a72
|
[
"MIT"
] | null | null | null |
shunt/shunt/shunt.py
|
velezj/project-manager
|
92e28e5718ca1302f6da0cf8b3d4a3bb5a1a8a72
|
[
"MIT"
] | null | null | null |
import shunt.logutils as logutils
logger = logutils.getLogger( __name__ )
import shunt.project_paths as project_paths
import shunt.shuntfile as shuntfile
import pathlib
import os
import os.path
import subprocess
import shutil
import jinja2
##============================================================================
##
# Processes a Shuntfile and creates materializes all the views
def materialize_views( shuntfile_path, parents = None ):
logger.info( "Materializing Shuntfile '{0}'".format(
shuntfile_path ) )
# ok, load the shuntfile
sf = shuntfile.load_shuntfile( shuntfile_path )
# validate that the shuntfile has the needed structure
validate_shuntfile( sf )
logger.info( "Shuntfile at '{0}' valid!".format( shuntfile_path ) )
# ensure materiale directory exists and is empty
materialize_path = project_paths.materialize_path( sf, parents )
if pathlib.Path( materialize_path ).exists():
logger.info( "Emptying previous materialization directory '{0}'".format( materialize_path ) )
_safe_empty_materialize_path( materialize_path )
materialize_path = ensure_path( materialize_path )
# ok, we want to take the template paths and process any which
# need processing.
# This is because we allow git,s3 and other URL type paths.
logger.info( "Materializing template paths" )
template_paths = project_paths.find_all_template_paths(shuntfile_path)
template_paths = [ _materialize_path( p,
materialize_path )
for p in template_paths ]
logger.info( "Template paths: {0}".format( template_paths ) )
# ok, grab all of the resources and copy them
logger.info( "Copying resources" )
for res in shuntfile.shuntfile_get( sf, ['project','resources'], [] ):
# copy the resource
_copy_resource( template_paths,
materialize_path,
res )
# now, grab all of the wanted views
logger.info( "Materializing views" )
for view in shuntfile.shuntfile_get( sf, ['project','views'], [] ):
# materialize the view
logger.info( "Materialize view '{0}'".format( view ) )
res = _materialize_view(
template_paths,
materialize_path,
view )
logger.info( "Materialized '{0}' to '{1}'".format(
view, res ) )
# ok, now grab all of hte subprojects and recurse
logger.info( "Processing subprojects" )
for proj in shuntfile.shuntfile_get( sf, ['project','subprojects'],[]):
new_parents = parents
if new_parents is None:
new_parents = []
new_parents = [ sf ] + new_parents
proj_path = ( pathlib.Path( shuntfile_path ) / ".." / proj / shuntfile.SHUNT_FILENAME ).resolve().as_posix()
logger.info( "Processing Subproject '{0}'".format( proj ) )
materialize_views( proj_path, parents = new_parents )
# done message log
logger.info( "Done Materializing Shutnfile '{0}'".format( shuntfile_path ) )
##============================================================================
##
# Actually materialize a single view.
# This will run the jinja2 template defined by hte view and
# output hte result into the materialzation path directory, creating
# it if need be
def _materialize_view( template_paths,
materialize_path,
view_name ):
# ok, create a jinja2 environment with the given template paths
env = jinja2.Environment(
loader = jinja2.FileSystemLoader( template_paths,
followlinks=True ) )
logger.info( "_materialize_view: jinja2 template paths set to '{0}'".format(
template_paths ) )
# grab the template file using hte view name
template = env.get_template( view_name )
# ok, render hte template to a file with the name
mpath = ( pathlib.Path( materialize_path ) / view_name ).resolve().as_posix()
with open( mpath, 'w' ) as f:
f.write( template.render() )
logger.info( " rendered template for view '{1}' into '{0}'".format(
mpath,
view_name ) )
return mpath
##============================================================================
##
# Given a path and a materialize_path,
# materializes any paths needed and returns the resulting set of path.
#
# This handles URL/URI style paths by downloading them (recursively)
# into the materialize_path if the materialize_path does not exists
def _materialize_path( path,
materialize_path,
force=False ):
# string paths are trated as already materialized
if isinstance( path, str ):
return path
# ok, ensure that path is astructure
if not isinstance( path, dict ):
msg = "Invalid path object. Expected string or dictionary but got type={0} '{1}'".format(
type(path),
path )
raise ValueError( msg )
# makre sure we at least have a source and destination
if 'source' not in path or 'destination' not in path:
msg = "Malformed path object. Paths need to have 'source' and 'destination' keys defined. Path = '{0}'".format( path )
raise ValueError( msg )
# ok, lookup the source
source = path['source']
if source not in KNOWN_PATH_MATERIALIZATION_SOURCES:
msg = "Invalid path source '{0}'. We don't know how to materialize such a path".format( source )
raise ValueError( msg )
# check if destination already there and not forcing
if ( pathlib.Path( materialize_path ) / path['destination'] ).exists() and not force:
logger.info( "Skipping materialization of path '{0}' because destination exists and not forcing".format( path ) )
return ( pathlib.Path(materialize_path) / path['destination'] ).resolve().as_posix()
# ok, grab the materialzer and run it
return KNOWN_PATH_MATERIALIZATION_SOURCES[ source ]( path,
materialize_path,
force = force )
##============================================================================
##
# Materialzie a git path
def _git_materialize_path( path,
materialize_path,
force=False ):
logger.info( "Materializing GIT path: '{0}' into '{1}'".format(
path, materialize_path ) )
donep = subprocess.run( ['git','clone'] + path['args'],
cwd = materialize_path,
check = True )
return (pathlib.Path(materialize_path) / path['destination'] ).resolve().as_posix()
##============================================================================
##
# Materialzie an s3 path
def _s3_materialize_path( path,
materialize_path,
force=False ):
logger.info( "Materializing S3 path: '{0}' into '{1}'".format(
path, materialize_path ) )
donep = subprocess.run( ['aws','s3'] + path['args'],
cwd = materialize_path,
check = True )
return (pathlib.Path(materialize_path) / path['destination'] ).resolve().as_posix()
##============================================================================
##
# A mapping from source to path materalizer function for paths
KNOWN_PATH_MATERIALIZATION_SOURCES = {
'git' : _git_materialize_path,
's3' : _s3_materialize_path,
}
##============================================================================
##
# Validates that a given shuntfile object has the required structure
# Raises error if not valid
def validate_shuntfile( sf ):
pass
##============================================================================
##
# Copy a resource into hte materialization path
def _copy_resource( template_paths,
materialize_path,
res ):
# get source path
source_path = None
if isinstance( res, str ):
source_path = pathlib.Path( res )
else:
source_path = res.get( 'source', None )
# resolve the source path to an actual path of a resource that exists
source_path = resolve_path( template_paths,
source_path )
if source_path is None:
msg = "Unable to copy resource '{0}': path does not exists in any of {1}".format( res, template_paths )
raise ValueError( msg )
# get hte target path
target_path = None
if isinstance( res, str ):
target_path = res
else:
target_path = res.get( "target", None )
if target_path is None:
msg = "Unable to copy resource '{0}', target path is not defined".format(res)
raise ValueError( msg )
# resolve the target path
if pathlib.Path( target_path ).is_absolute():
target_path = pathlib.Path( materialize_path ).joinpath( pathlib.Path( target_path ).name ).resolve().as_posix()
else:
target_path = pathlib.Path( materialize_path ).joinpath( target_path ).resolve().as_posix()
# ok, copy the file
if pathlib.Path( source_path ).is_dir():
shutil.copytree( source_path,
target_path )
else:
shutil.copyfile( source_path,
target_path )
logger.info( "copied resource '{0}' TO -> '{1}'".format(
source_path,
target_path ) )
##============================================================================
##
# Given a set of paths and a relative path,
# searches the paths in order and returns the first which includes
# a file or directory at the given relative path
def resolve_path( paths,
relative_path ):
# check arguments for None which results in None
if relative_path is None:
return None
if paths is None:
return None
# if we are given an absoulte path, return it if it exists
# otherwise return None
if pathlib.Path( relative_path ).is_absolute():
if pathlib.Path( relative_path ).exists():
return pathlib.Path( relative_path ).resolve().as_posix()
else:
return None
# Ok, check the path relative to the set of paths in order
# If it exists then return it as the path
for p in paths:
if pathlib.Path( p ).joinpath( relative_path ).exists():
return pathlib.Path( p ).joinpath( relative_path ).resolve().as_posix()
return None
##============================================================================
##
# make sure a given directory exists
def ensure_path( p ):
materialize_path = pathlib.Path( p ).resolve()
if not materialize_path.exists():
materialize_path.mkdir( parents=True )
materialize_path = materialize_path.as_posix()
logger.info( " creating directory: '{0}'".format(
materialize_path ) )
return materialize_path
##============================================================================
##============================================================================
##
# *Safely* empty the materialization path.
# This takes care of ensuring that the terraform state does not
# contain any resources, otherwise throws and exception
def _safe_empty_materialize_path( materialize_path ):
# ok, call terraform show and make sure nothing is shown
cmd = [ 'terraform', 'show', "-no-color" ]
donep = subprocess.run( cmd,
cwd = materialize_path,
stdout=subprocess.PIPE,
check = True )
if False and len(donep.stdout.strip()) > 1 and donep.stdout.strip().lower() != b"no state.":
raise RuntimeError( "Cannot not *safely* delete materialization path '{0}'. `terraform show` still returns resources being managed by terraform. Run `terraform destroy` first before trying to apply a shuntfile again! resources still shown = {1}".format( materialize_path, donep.stdout.strip() ) )
shutil.rmtree( materialize_path )
##============================================================================
##============================================================================
##============================================================================
def main():
import logging
logging.basicConfig( level=logging.INFO )
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( 'shuntfile' )
args = parser.parse_args()
sf_path = args.shuntfile
materialize_views( sf_path )
##============================================================================
if __name__ == '__main__':
main()
| 37.090116
| 304
| 0.569715
|
5e4d7fd00cbffb5ba28df5f925664e7ded4cbecf
| 2,293
|
py
|
Python
|
cryptodoge/types/header_block.py
|
grayfallstown-cryptodoge/cryptodoge
|
ffeb5218ce184a56073a5dc0ac5acddba3728bd4
|
[
"Apache-2.0"
] | 10
|
2021-08-21T17:41:51.000Z
|
2022-02-09T04:28:12.000Z
|
cryptodoge/types/header_block.py
|
grayfallstown-cryptodoge/cryptodoge
|
ffeb5218ce184a56073a5dc0ac5acddba3728bd4
|
[
"Apache-2.0"
] | 1
|
2021-12-15T21:23:38.000Z
|
2021-12-15T21:23:38.000Z
|
cryptodoge/types/header_block.py
|
grayfallstown-cryptodoge/cryptodoge
|
ffeb5218ce184a56073a5dc0ac5acddba3728bd4
|
[
"Apache-2.0"
] | 2
|
2021-08-21T18:22:59.000Z
|
2021-12-10T07:12:18.000Z
|
from dataclasses import dataclass
from typing import List, Optional
from cryptodoge.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from cryptodoge.types.blockchain_format.reward_chain_block import RewardChainBlock
from cryptodoge.types.blockchain_format.vdf import VDFProof
from cryptodoge.types.end_of_slot_bundle import EndOfSubSlotBundle
from cryptodoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class HeaderBlock(Streamable):
# Same as a FullBlock but without TransactionInfo and Generator (but with filter), used by light clients
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlock # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
challenge_chain_ip_proof: VDFProof
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_proof: Optional[VDFProof] # Iff deficit < 4
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_filter: bytes # Filter for block transactions
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def prev_hash(self):
return self.foliage.prev_block_hash
@property
def height(self):
return self.reward_chain_block.height
@property
def weight(self):
return self.reward_chain_block.weight
@property
def header_hash(self):
return self.foliage.get_hash()
@property
def total_iters(self):
return self.reward_chain_block.total_iters
@property
def log_string(self):
return "block " + str(self.header_hash) + " sb_height " + str(self.height) + " "
@property
def is_transaction_block(self) -> bool:
return self.reward_chain_block.is_transaction_block
@property
def first_in_sub_slot(self) -> bool:
return self.finished_sub_slots is not None and len(self.finished_sub_slots) > 0
| 36.983871
| 108
| 0.758395
|
b078a0d6a355f95b480cbfa4dc9d637fdf591e95
| 18,003
|
py
|
Python
|
temp/module_Test.py
|
awesome-davian/SALT_FrontEnd
|
0c2060945099ab43e612ce72a5ee27f3ebea9719
|
[
"MIT"
] | 4
|
2017-08-16T11:55:25.000Z
|
2020-11-22T09:48:22.000Z
|
temp/module_Test.py
|
awesome-davian/FrontEnd
|
0c2060945099ab43e612ce72a5ee27f3ebea9719
|
[
"MIT"
] | null | null | null |
temp/module_Test.py
|
awesome-davian/FrontEnd
|
0c2060945099ab43e612ce72a5ee27f3ebea9719
|
[
"MIT"
] | null | null | null |
import matlab.engine
import logging
import math
import constants
import numpy as np
import pymongo
import sys
from nltk import PorterStemmer
import time
from datetime import datetime
from operator import itemgetter
porter_stemmer=PorterStemmer();
sys.path.insert(0, '../')
conn = pymongo.MongoClient("localhost", constants.DEFAULT_MONGODB_PORT);
# dbname = constants.DB_NAME;
# db = conn[dbname];
class TopicModelingModule():
def __init__(self, DB):
import nmf_core
logging.debug('init');
self.nmf = nmf_core
self.db = DB
logging.info("loading Matlab module...")
self.eng = matlab.engine.start_matlab()
self.eng.cd('./matlab/discnmf_code/')
logging.info("Done: loading Matlab module")
logging.info("loading Term-doc matrices")
self.eng.script_init(nargout=0)
logging.info("Done: loading Term-doc matrices")
# self.eng.function_test('mtx_2013_d309_13_2418_5112',nargout=1)
def get_tile_id(self, level, x, y):
pow2 = 1 << level;
tile_id = x * pow2 + y;
return round(tile_id);
def lon_to_x(self, level, lon):
pow2 = 1 << level;
x = (lon + 180.0)/360.0 * pow2;
return x;
def lat_to_y(self, level, lat):
latR = math.radians(lat);
pow2 = 1 << level;
y = (1 - math.log(math.tan(latR) + 1 / math.cos(latR)) / math.pi) / 2 * pow2;
y = pow2 - y;
return y;
def x_to_lon(self, level, x):
pow2 = 1 << level;
lon = (x / pow2 * 360.0) - 180.0;
return lon;
def y_to_lat(self, level, y):
pow2 = 1 << level;
n = -math.pi + (2.0*math.pi*y)/pow2;
lat = math.degrees(math.atan(math.sinh(n)))
return lat;
def get_neighbor_ids(self, level, x, y):
neighbor = [];
neighbor.append(self.get_tile_id(level, x+1, y+1));
neighbor.append(self.get_tile_id(level, x+1, y+0));
neighbor.append(self.get_tile_id(level, x+1, y-1));
neighbor.append(self.get_tile_id(level, x+0, y+1));
#neighbor.append(self.get_tile_id(level, x+0, y+0)); --> it's me.
neighbor.append(self.get_tile_id(level, x+0, y-1));
neighbor.append(self.get_tile_id(level, x-1, y+1));
neighbor.append(self.get_tile_id(level, x-1, y+0));
neighbor.append(self.get_tile_id(level, x-1, y-1));
return neighbor;
def get_docs_including_word(self, level, x, y, year, yday, word_list):
doc_lists=collections.OrderedDict()
map_idx_to_word, map_word_to_idx, bag_words, stem_bag_words = self.db.read_voca()
termdoc_dir = constants.MTX_DIR
file_name = 'mtx_' + str(year) + '_d' + str(yday) + '_' + str(level) + '_' + str(x) + '_' + str(y)
word_idxs = []
for w in word_list:
stemmed = porter_stemmer.stem(w)
word_idxs.append(map_word_to_idx[stemmed])
logging.debug('inword: %s, wordidx: %d', stemmed, map_word_to_idx[stemmed])
with open(termdoc_dir + file_name, 'r', encoding='UTF8') as f:
lines=f.readlines()
for line in lines:
v = line.split('\t')
word_idx = int(v[0])
doc_idx = int(v[1])
for w in word_idxs:
if word_idx == w:
doc_lists[doc_idx] = 1
# doc_lists.append(doc_idx)
break;
# # duplicate date can be appended so i changed the algorithm like above.
# for word in word_list:
# stem_word=porter_stemmer.stem(word)
# for line in lines:
# v = line.split('\t')
# word_idx = int(v[0])
# doc_idx = int(v[1])
# if(map_word_to_idx[stem_word]==word_idx):
# doc_lists.append(doc_idx)
# logging.info(doc_lists)
return doc_lists
def make_sub_term_doc_matrix(self, level, x, y, year, yday, date, include_word_list, exclude_word_list):
map_idx_to_word, map_word_to_idx, bag_words, stem_bag_words = self.db.read_voca()
include_docs = self.get_docs_including_word(level, x, y, year, yday, include_word_list)
exclude_docs = self.get_docs_including_word(level, x, y, year, yday, exclude_word_list)
# term_doc_mtx = self.getTermDocMtx(level, x, y, date)
mtx = self.db.read_spatial_mtx(constants.MTX_DIR, year, yday, level, x, y)
new_tile_mtx=[]
word_idx=0;
for each in mtx:
flag = True
if len(include_docs) > 0 and (each[1] in include_docs):
flag = True
if len(exclude_docs) > 0 and (each[1] in exclude_docs):
flag = False
if flag == True:
new_tile_mtx.append([int(each[0]), int(each[1]), int(each[2]), int(each[3])])
new_tile_mtx = np.array(new_tile_mtx, dtype=np.int32).reshape(int(np.size(new_tile_mtx)/4), 4)
return new_tile_mtx;
def run_topic_modeling(self, mtx_name, xcls_value, num_clusters, num_keywords, include_words, exclude_words):
start_time = time.time()
logging.debug('run_topic_modeling(%s, %d)', mtx_name, xcls_value);
start_time_makeab = time.time()
# A = matlab.double(mtx.tolist());
# logging.debug('mtx size: %d', len(A))
elapsed_time_makeab= time.time() - start_time_makeab
logging.info('run_topic_modeling -make ab Execution time: %.3fms', elapsed_time_makeab)
start_time_function_run = time.time()
map_idx_to_word, map_word_to_idx, bag_words, stem_bag_words = self.db.read_voca()
voca = []
for key, value in map_idx_to_word.items():
temp = [key,value]
# voca.append(temp)
voca.append(value)
#[topics_list] = self.eng.function_run_extm(A, B, xcls_value, voca, constants.DEFAULT_NUM_TOPICS, constants.DEFAULT_NUM_TOP_K, nargout=3);
# [topics_list, w_scores, t_scores, xscore] = self.eng.function_run_extm(A, xcls_value, voca, num_clusters, num_keywords, nargout=4);
exclusiveness_value = xcls_value/5
[topics_list, w_scores, t_scores] = self.eng.function_run_extm_inex(mtx_name, exclusiveness_value, constants.STOP_WORDS, include_words, exclude_words, voca, num_clusters, num_keywords, nargout=3)
logging.debug(topics_list)
logging.debug(w_scores)
logging.debug(t_scores)
if len(topics_list) == 0:
return []
topics = np.asarray(topics_list);
topics = np.reshape(topics, (num_clusters, num_keywords));
word_scores = np.asarray(w_scores);
word_scores = np.reshape(word_scores, (num_clusters, num_keywords));
topic_scores = np.asarray(t_scores[0]);
# logging.debug(topics_list)
# logging.debug(topics)
# logging.debug(w_scores)
# logging.debug(word_scores)
# logging.debug(t_scores)
# logging.debug(topic_scores)
elapsed_time_function_run = time.time() - start_time_function_run
logging.info('run_topic_modeling -function_run_extm Execution time: %.3fms', elapsed_time_function_run)
start_time_replace = time.time()
# find original word and replace
ret_topics = []
topic_id = 0;
for topic in topics:
ret_topic = {}
ret_topic['score'] = topic_scores[topic_id]
ret_words = []
for rank, word in enumerate(topic):
word_score = word_scores[topic_id, rank]
temp_count = 0
temp_word = ''
s_count = 0
res_word = ''
try:
for key, value in stem_bag_words[word].items():
res_word = key
break
except KeyError:
logging.debug('KeyError: %s', word)
continue
word_cnt = bag_words[word]
ret_word = {}
ret_word['word'] = res_word
ret_word['score'] = word_score
ret_word['count'] = word_cnt
ret_words.append(ret_word)
ret_topic['words'] = ret_words
ret_topics.append(ret_topic)
topic_id += 1
elapsed_time_replace= time.time() - start_time_replace
logging.info('run_topic_modeling -replace Execution time: %.3fms', elapsed_time_replace)
return ret_topics
def get_ondemand_topics(self, level, x, y, year, yday, topic_count, word_count, exclusiveness, include_words, exclude_words):
logging.debug('get_ondemand_topics(%d, %d, %d, %d, %d, %d)', level, x, y, year, yday, exclusiveness);
nmtx_name = 'mtx_' + str(year) + '_d' + str(yday) + '_' + str(level) + '_' + str(x) + '_' + str(y)
ondemand_topics=[]
# sub_mtx = self.make_sub_term_doc_matrix(level, x, y, year, yday, include_words, exclude_words)
#mtx = self.db.read_spatial_mtx(constants.MTX_DIR, year, yday, level, x, y)
# ondemand_topics = self.run_topic_modeling(mtx, level, x, y, exclusiveness, topic_count, word_count, include_words, exclude_words)
# nmtx_name = 'mtx_2013_d308_12_1209_2556'
ondemand_topics = self.run_topic_modeling(nmtx_name, exclusiveness, topic_count, word_count, include_words, exclude_words)
return ondemand_topics;
def get_topics(self, level, x, y, topic_count, word_count, include_words, exclude_words, exclusiveness, date):
start_time=time.time()
logging.debug('get_topics(%s, %s, %s)', level, x, y)
tile_id = self.get_tile_id(level, x, y);
# voca_hash= self.db.get_vocabulary_hashmap();
# voca= self.db.get_vocabulary();
# for testing
#exclusiveness = 50;
exclusiveness_local = exclusiveness / 5;
logging.debug('exclusiveness: %f', exclusiveness_local);
result = {};
tile = {};
tile['x'] = x;
tile['y'] = y;
tile['level'] = level;
result['tile'] = tile;
# result['exclusiveness'] = exclusiveness
date = datetime.fromtimestamp(int(int(date)/1000))
year = date.timetuple().tm_year
yday = date.timetuple().tm_yday
# result['exclusiveness_score'] = self.db.get_xscore(level, x, y, year, yday)
topics = []
# if include or exclude exist
logging.info('len(include_words): %d, len(exclude_words): %d',len(include_words), len(exclude_words))
if len(include_words)==0 and len(exclude_words) ==0:
topics = self.db.get_topics(int(level), int(x), int(y), year, yday, topic_count, word_count, exclusiveness);
logging.info('done get_topics')
else:
topics = self.get_ondemand_topics(level, x, y, year, yday, topic_count, word_count, exclusiveness, include_words, exclude_words)
logging.info('done ondemand_topics')
# topics = self.db.get_topics(level, x, y, year, yday, topic_count, word_count, exclusiveness);
result['topic'] = topics;
end_time=time.time() - start_time
logging.info('end of get_topics Execution time: %.3fms' , end_time)
print(result);
return result;
def get_related_docs(self, level, x, y, word, date):
start_time = time.time()
# get docs including the word
date = datetime.fromtimestamp(int(int(date)/1000))
year = date.timetuple().tm_year
day_of_year = date.timetuple().tm_yday
logging.debug('get_releated_docs(%s, %s, %s, %s, %d)', level, x, y, word, day_of_year)
s_word = porter_stemmer.stem(word)
word_idx = self.db.get_global_voca_map_word_to_idx()[s_word]
logging.info('word: %s, s_word: %s, word_idx: %d', word, s_word, word_idx)
map_related_docs = self.db.get_related_docs_map()[word_idx]
logging.info('stemmed word: %s, idx: %d', s_word, word_idx)
d = str(constants.DATA_RANGE).split('-')
d[0] = '20'+d[0][0:2] + '-' + d[0][2:4] + '-' + d[0][4:6]
d[1] = '20'+d[1][0:2] + '-' + d[1][2:4] + '-' + d[1][4:6]
logging.debug(d[0])
logging.debug(d[1])
date_format = "%Y-%m-%d"
start_date = datetime.strptime(d[0], date_format).timetuple().tm_yday
end_date = datetime.strptime(d[1], date_format).timetuple().tm_yday if len(d) > 1 else start_date
# start_date = int(d[0])
# end_date = int(d[1]) if len(d) > 1 else int(d[0])
max_compare = max([end_date - day_of_year, day_of_year - start_date])
logging.debug('start date: %d', start_date)
logging.debug('end date: %d', end_date)
logging.debug('max_compare: %d', max_compare)
total_docs = []
# size = 0
# f_size = 0
# docs = self.db.get_related_docs_map()
# for each in docs.items():
# try:
# size += len(each[1][day_of_year])
# for txt in each[1][day_of_year]:
# f_size += txt.count('love')
# if txt.count('love') > 0:
# logging.debug('Found! [%d]: %s', each[0], str(txt).encode('utf-8'))
# except KeyError:
# pass
# logging.debug('size of %d : %d, f_size: %d', day_of_year, size, f_size)
logging.debug('case: %s', day_of_year)
try:
doc_list = map_related_docs[day_of_year]
for doc in doc_list:
d = {}
d['username'] = str(doc[0])
d['created_at'] = int(doc[1])
d['text'] = str(doc[2])
total_docs.append(d)
except KeyError:
pass
logging.debug('len: %s', len(total_docs))
if len(total_docs) < constants.MAX_RELATED_DOCS:
for each in range(1, max_compare+1):
date_cursor = day_of_year - each
if date_cursor < start_date:
continue
else:
logging.debug('case: %s', date_cursor)
try:
doc_list = map_related_docs[date_cursor]
for doc in doc_list:
d = {}
d['username'] = str(doc[0])
d['created_at'] = int(doc[1])
d['text'] = str(doc[2])
total_docs.append(d)
except KeyError:
pass
logging.debug('len: %s', len(total_docs))
if len(total_docs) > constants.MAX_RELATED_DOCS:
break
date_cursor = day_of_year + each
if date_cursor > end_date:
continue
else:
logging.debug('case: %s', date_cursor)
try:
doc_list = map_related_docs[date_cursor]
for doc in doc_list:
d = {}
d['username'] = str(doc[0])
d['created_at'] = int(doc[1])
d['text'] = str(doc[2])
total_docs.append(d)
except KeyError:
pass
logging.debug('len: %s', len(total_docs))
if len(total_docs) > constants.MAX_RELATED_DOCS:
break
total_docs_sorted = sorted(total_docs[:constants.MAX_RELATED_DOCS], key=itemgetter('created_at'), reverse=True)
result = {}
tile = {}
tile['x'] = x
tile['y'] = y
tile['level'] = level
result['tile'] = tile
result['documents'] = total_docs_sorted[:constants.MAX_RELATED_DOCS]
elapsed_time=time.time()-start_time
logging.info('get_releated_docs elapsed: %.3fms' , elapsed_time)
return result
def getTermDocMtx(self, level, x, y, date):
date = datetime.fromtimestamp(int(int(date)/1000))
year = date.timetuple().tm_year
day_of_year = date.timetuple().tm_yday
tile_name = 'mtx_' + str(year) + '_d' + str(day_of_year) + '_' + str(level) + '_' + str(x) + '_' + str(y)
#neighbor_tile_name = 'docmap_' + str(year) + '_d' + str(day_of_year) + '_' + str(level) + '_' + str(x) + '_' + str(y)
mtx_file = open(constants.MTX_DIR + tile_name, 'r', encoding='UTF8')
#mtx_file = open(constants.MTX_DIR + neighbor_tile_name, 'r', en)
lines = mtx_file.readlines()
tile_mtxs = [];
for line in lines:
v = line.split('\t')
item = [float(v[0]), float(v[1]), float(v[2])]
temp_mtx = np.append(temp_mtx, item, axis=0)
for nid in range(0, 9):
temp_mtx = []
for each in mtx_dict[nid]:
v = each.split('\t')
item = np.array([float(v[0]), float(v[1]), float(v[2])], dtype=np.double)
temp_mtx = np.append(temp_mtx, item, axis=0)
temp_mtx = np.array(temp_mtx, dtype=np.double).reshape(len(mtx_dict[nid]), 3)
if nid == 0:
mtx = temp_mtx
else:
nmtx.append(temp_mtx)
mtx_file.close()
logging.info("#lines: %s", len(lines))
return mtx, nmtx
def get_tile_detail_info(self, level, x, y, date_from, date_to):
logging.debug('get_tile_detail_info(%s, %s, %s, %s, %s)', level, x, y, date_from, date_to)
date_from = int(date_from)
date_to = int(date_to)
date_intv = 86400000
result = {};
tile = {};
tile['x'] = x;
tile['y'] = y;
tile['level'] = level;
result['tile'] = tile;
time_graph = []
all_topics = []
date_unix = date_from
while True:
if date_unix > date_to:
break
date = datetime.fromtimestamp(int(date_unix/1000))
year = date.timetuple().tm_year
mon = date.timetuple().tm_mon
mday = date.timetuple().tm_mday
yday = date.timetuple().tm_yday
date_unix += date_intv
exclusiveness_score = self.db.get_xscore(level, x, y, year, yday) #fix
if exclusiveness_score > 0.0:
item = {}
item['score'] = exclusiveness_score
item['date'] = datetime(year=year, month=mon, day=mday).strftime("%d-%m-%Y")
time_graph.append(item)
item = {}
item['date'] = datetime(year=year, month=mon, day=mday).strftime("%d-%m-%Y")
topics = []
for xvalue in range(0, 6):
topic = {}
topic['exclusiveness'] = xvalue
topic['topic'] = self.db.get_topics(int(level), int(x), int(y), year, yday, constants.DEFAULT_NUM_TOPICS, constants.DEFAULT_NUM_TOP_K, xvalue)
if len(topic['topic']) > 0:
topics.append(topic)
item['topics'] = topics
if len(item['topics']) > 0:
all_topics.append(item)
result['time_grath'] = time_graph
result['all_topics'] = all_topics
return result;
def get_heatmaps(self, level, x, y, date_from, date_to):
# logging.debug('get_heatmaps(%d, %d, %d, %d, %d)', level, x, y, date_from, date_to)
date_from = int(date_from)
date_to = int(date_to)
date_intv = 86400000
result = []
date_unix = date_from
while True:
if date_unix > date_to:
break
date = datetime.fromtimestamp(int(date_unix/1000))
year = date.timetuple().tm_year
yday = date.timetuple().tm_yday
date_unix += date_intv
# get heatmap list from db
exclusiveness_score = self.db.get_xscore(level, x, y, year, yday)
xcls_scores = {}
tile = {}
tile['x'] = x
tile['y'] = y
tile['level'] = level
xcls_scores['tile'] = tile
xcls_scores['exclusiveness_score'] = []
xcls_score = {}
xcls_score['value'] = exclusiveness_score
date_str = date.strftime("%d-%m-%Y")
# logging.debug('date_str: %s', date_str)
xcls_score['date'] = date_str
xcls_scores['exclusiveness_score'].append(xcls_score)
result.append(xcls_scores)
return result
def get_word_info(self, level, x, y, word):
# TBD
return "word_info";
| 27.911628
| 198
| 0.641004
|
adc84a2d151bda1d2187c9c3eb63cec0abd3168b
| 13,870
|
py
|
Python
|
tests/test_cds.py
|
PhilippeCharlot22/python-onapsdk
|
5f1398d9c344a985e830bba36286f95c46e4807c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cds.py
|
PhilippeCharlot22/python-onapsdk
|
5f1398d9c344a985e830bba36286f95c46e4807c
|
[
"Apache-2.0"
] | 10
|
2021-09-20T15:42:47.000Z
|
2021-09-23T12:49:51.000Z
|
tests/test_cds.py
|
PhilippeCharlot22/python-onapsdk
|
5f1398d9c344a985e830bba36286f95c46e4807c
|
[
"Apache-2.0"
] | 2
|
2021-09-20T13:53:12.000Z
|
2021-09-21T08:05:58.000Z
|
# SPDX-License-Identifier: Apache-2.0
import json
import os.path
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import MagicMock, patch, PropertyMock, mock_open
from pytest import raises
from onapsdk.cds.blueprint import Blueprint, Mapping, MappingSet, Workflow
from onapsdk.cds.blueprint_processor import Blueprintprocessor
from onapsdk.cds.cds_element import CdsElement
from onapsdk.cds.data_dictionary import DataDictionary, DataDictionarySet
from onapsdk.exceptions import FileError, ParameterError, RequestError, ValidationError
DD_1 = {
"name": "vf-module-name",
"tags": "vf-module-name",
"data_type": "string",
"description": "vf-module-name",
"entry_schema": "string",
"updatedBy": "Singal, Kapil <ks220y@att.com>",
"definition": {
"tags": "vf-module-name",
"name": "vf-module-name",
"property": {
"description": "vf-module-name",
"type": "string"
},
"updated-by": "Singal, Kapil <ks220y@att.com>",
"sources": {
"input": {
"type": "source-input"
},
"default": {
"type": "source-default",
"properties": {}
}
}
}
}
RAW_DD = {
"tags": "vf-module-name",
"name": "vf-module-name",
"property": {
"description": "vf-module-name",
"type": "string"
},
"updated-by": "Singal, Kapil <ks220y@att.com>",
"sources": {
"input": {
"type": "source-input"
},
"default": {
"type": "source-default",
"properties": {}
}
}
}
vLB_CBA_Python_meta_bytes = b'TOSCA-Meta-File-Version: 1.0.0\nCSAR-Version: 1.0\nCreated-By: PLATANIA, MARCO <platania@research.att.com>\nEntry-Definitions: Definitions/vLB_CDS.json\nTemplate-Tags: vDNS-CDS-test1\nContent-Type: application/vnd.oasis.bpmn\nTemplate-Name: vDNS-CDS-test1\nTemplate-Version: 1.0'
vLB_CBA_Python_base_template_mapping_bytes = b'[\n {\n "name": "service-instance-id",\n "property": {\n "description": "",\n "required": false,\n "type": "string",\n "status": "",\n "constraints": [\n {}\n ],\n "entry_schema": {\n "type": ""\n }\n },\n "input-param": false,\n "dictionary-name": "service-instance-id",\n "dictionary-source": "input",\n "dependencies": [],\n "version": 0\n },\n {\n "name": "vnf-id",\n "property": {\n "description": "",\n "required": false,\n "type": "string",\n "status": "",\n "constraints": [\n {}\n ],\n "entry_schema": {\n "type": ""\n }\n },\n "input-param": false,\n "dictionary-name": "vnf-id",\n "dictionary-source": "input",\n "dependencies": [],\n "version": 0\n },\n {\n "name": "vdns_vf_module_id",\n "property": {\n "description": "",\n "required": false,\n "type": "string",\n "status": "",\n "constraints": [\n {}\n ],\n "entry_schema": {\n "type": ""\n }\n },\n "input-param": false,\n "dictionary-name": "vdns_vf_module_id",\n "dictionary-source": "sdnc",\n "dependencies": [\n\t "service-instance-id",\n "vnf-id"\n ],\n "version": 0\n },\n {\n "name": "vdns_int_private_ip_0",\n "property": {\n "description": "",\n "required": false,\n "type": "string",\n "status": "",\n "constraints": [\n {}\n ],\n "entry_schema": {\n "type": ""\n }\n },\n "input-param": false,\n "dictionary-name": "vdns_int_private_ip_0",\n "dictionary-source": "sdnc",\n "dependencies": [\n "service-instance-id",\n "vnf-id",\n "vdns_vf_module_id"\n ],\n "version": 0\n },\n {\n "name": "vdns_onap_private_ip_0",\n "property": {\n "description": "",\n "required": false,\n "type": "string",\n "status": "",\n "constraints": [\n {}\n ],\n "entry_schema": {\n "type": ""\n }\n },\n "input-param": false,\n "dictionary-name": "vdns_onap_private_ip_0",\n "dictionary-source": "sdnc",\n "dependencies": [\n "service-instance-id",\n "vnf-id",\n "vdns_vf_module_id"\n ],\n "version": 0\n }\n]'
@patch.object(Blueprint, "send_message")
def test_blueprint_enrichment(send_message_mock):
blueprint = Blueprint(b"test cba - it will never work")
blueprint.enrich()
send_message_mock.assert_called_once()
send_message_mock.reset_mock()
send_message_mock.side_effect = RequestError
with raises(RequestError):
blueprint.enrich()
@patch.object(Blueprint, "send_message")
def test_blueprint_publish(send_message_mock):
blueprint = Blueprint(b"test cba - it will never work")
blueprint.publish()
send_message_mock.assert_called_once()
@patch.object(Blueprint, "send_message")
def test_blueprint_deploy(send_message_mock):
blueprint = Blueprint(b"test cba - it will never work")
blueprint.deploy()
send_message_mock.assert_called_once()
def test_blueprint_load_from_file():
with TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, "test.zip")
with open(path, "wb") as f:
f.write(b"test cba - it will never work")
blueprint = Blueprint.load_from_file(path)
assert blueprint.cba_file_bytes == b"test cba - it will never work"
def test_blueprint_load_from_file_file_error():
with TemporaryDirectory() as tmpdirname, \
patch("__main__.open", new_callable=mock_open) as mo, \
raises(FileError) as exc:
path = os.path.join(tmpdirname, "nonexistent_file.zip")
mo.side_effect = FileNotFoundError
Blueprint.load_from_file(path)
assert exc.type == FileError
def test_blueprint_save():
blueprint = Blueprint(b"test cba - it will never work")
with TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, "test.zip")
blueprint.save(path)
with open(path, "rb") as f:
assert f.read() == b"test cba - it will never work"
def test_blueprint_read_cba_metadata():
b = Blueprint(b"test cba - it will never work")
with raises(ValidationError) as exc:
b.get_cba_metadata(b"Invalid")
b.get_cba_metadata(b"123: 456")
assert exc.type is ValidationError
cba_metadata = b.get_cba_metadata(vLB_CBA_Python_meta_bytes)
assert cba_metadata.tosca_meta_file_version == "1.0.0"
assert cba_metadata.csar_version == 1.0
assert cba_metadata.created_by == "PLATANIA, MARCO <platania@research.att.com>"
assert cba_metadata.entry_definitions == "Definitions/vLB_CDS.json"
assert cba_metadata.template_name == "vDNS-CDS-test1"
assert cba_metadata.template_version == 1.0
assert cba_metadata.template_tags == "vDNS-CDS-test1"
with open(Path(Path(__file__).resolve().parent, "data/vLB_CBA_Python.zip"), "rb") as cba_file:
b = Blueprint(cba_file.read())
assert b.metadata.tosca_meta_file_version == "1.0.0"
assert b.metadata.csar_version == 1.0
assert b.metadata.created_by == "PLATANIA, MARCO <platania@research.att.com>"
assert b.metadata.entry_definitions == "Definitions/vLB_CDS.json"
assert b.metadata.template_name == "vDNS-CDS-test1"
assert b.metadata.template_version == 1.0
assert b.metadata.template_tags == "vDNS-CDS-test1"
def test_blueprint_get_mappings_from_mapping_file():
b = Blueprint(b"test cba - it will never work")
mappings = list(b.get_mappings_from_mapping_file(vLB_CBA_Python_base_template_mapping_bytes))
assert len(mappings) == 5
mapping = mappings[0]
assert mapping.name == "service-instance-id"
assert mapping.mapping_type == "string"
assert mapping.dictionary_name == "service-instance-id"
assert mapping.dictionary_sources == ["input"]
def test_blueprint_generate_data_dictionary_set():
with open(Path(Path(__file__).resolve().parent, "data/vLB_CBA_Python.zip"), "rb") as cba_file:
b = Blueprint(cba_file.read())
dd_set = b.get_data_dictionaries()
print(dd_set)
@patch.object(CdsElement, "_url", new_callable=PropertyMock)
def test_data_dictionary(cds_element_url_property_mock):
cds_element_url_property_mock.return_value = "http://127.0.0.1"
with raises(ValidationError) as exc:
DataDictionary({})
assert exc.type is ValidationError
dd = DataDictionary({}, fix_schema=False)
assert dd.url == "http://127.0.0.1/api/v1/dictionary"
assert dd.data_dictionary_json == {}
dd = DataDictionary(DD_1)
dd.name == DD_1["name"]
@patch.object(DataDictionary, "send_message")
def test_data_dictionary_upload(send_message_mock):
dd = DataDictionary(DD_1)
dd.upload()
send_message_mock.assert_called_once()
@patch.object(DataDictionary, "send_message")
def test_data_dictionary_set(send_message_mock):
dd_set = DataDictionarySet()
dd_set.add(DataDictionary(DD_1))
assert dd_set.length == 1
dd_set.add(DataDictionary(DD_1))
assert dd_set.length == 1
dd_set.add(DataDictionary({"name": "test"}, fix_schema=False))
assert dd_set.length == 2
dd_set.upload()
assert send_message_mock.call_count == 2
def test_data_dictionary_set_save_to_file_load_from_file():
dd = DataDictionarySet()
dd.add(DataDictionary(DD_1))
with TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, "dd.json")
dd.save_to_file(path)
with open(path, "r") as f:
assert f.read() == json.dumps([dd.data_dictionary_json for dd in dd.dd_set], indent=4)
dd_2 = DataDictionarySet.load_from_file(path)
assert dd.dd_set == dd_2.dd_set
def test_data_dictionary_load_from_file_file_error():
with TemporaryDirectory() as tmpdirname, \
patch("__main__.open", new_callable=mock_open) as mo, \
raises(FileError) as exc:
path = os.path.join(tmpdirname, "nonexistent_file.zip")
mo.side_effect = FileNotFoundError
DataDictionarySet.load_from_file(path)
assert exc.type == FileError
def test_mapping():
m1 = Mapping(name="test",
mapping_type="string",
dictionary_name="test_dictionary_name",
dictionary_sources=["dictionary_source_1"])
m2 = Mapping(name="test", mapping_type="string", dictionary_name="test_dictionary_name", dictionary_sources=["dictionary_source_2"])
assert m1 == m2
m1.merge(m2)
assert sorted(m1.dictionary_sources) == ["dictionary_source_1", "dictionary_source_2"]
m1.merge(m2)
assert sorted(m1.dictionary_sources) == ["dictionary_source_1", "dictionary_source_2"]
def test_mapping_set():
ms = MappingSet()
assert if not ms:
m1 = Mapping(name="test",
mapping_type="string",
dictionary_name="test_dictionary_name",
dictionary_sources=["dictionary_source_1"])
m2 = Mapping(name="test", mapping_type="string", dictionary_name="test_dictionary_name", dictionary_sources=["dictionary_source_2"])
ms.add(m1)
assert len(ms) == 1
ms.add(m2)
assert len(ms) == 1
assert sorted(ms[0].dictionary_sources) == ["dictionary_source_1", "dictionary_source_2"]
def test_blueprint_get_workflows_from_entry_definitions_file():
with open(Path(Path(__file__).resolve().parent, "data/vLB_CBA_Python.zip"), "rb") as cba_file:
b = Blueprint(cba_file.read())
assert len(b.workflows) == 3
workflow = b.workflows[0]
assert len(workflow.steps) == 1
assert workflow.steps[0].name == "resource-assignment"
assert workflow.steps[0].description == "Resource Assign Workflow"
assert workflow.steps[0].target == "resource-assignment"
assert len(workflow.inputs) == 2
assert len(workflow.outputs) == 1
def test_blueprint_get_workflow_by_name():
with open(Path(Path(__file__).resolve().parent, "data/vLB_CBA_Python.zip"), "rb") as cba_file:
b = Blueprint(cba_file.read())
workflow = b.get_workflow_by_name("resource-assignment")
assert workflow.name == "resource-assignment"
workflow = b.get_workflow_by_name("config-assign")
assert workflow.name == "config-assign"
workflow = b.get_workflow_by_name("config-deploy")
assert workflow.name == "config-deploy"
with raises(ParameterError):
b.get_workflow_by_name("non-existing-workflow")
@patch.object(Workflow, "send_message")
def test_workflow_execute(send_message_mock):
metadata = MagicMock(template_name="test", template_version="test")
blueprint = MagicMock(metadata=metadata)
workflow = Workflow("test_workflow", {}, blueprint)
assert len(workflow.steps) == 0
assert len(workflow.inputs) == 0
assert len(workflow.outputs) == 0
workflow.execute({})
send_message_mock.assert_called_once()
def test_data_dictionary_validation():
assert DataDictionary(DD_1).has_valid_schema()
raw_dd = DataDictionary(RAW_DD, fix_schema=False)
assert not raw_dd.has_valid_schema()
raw_dd = DataDictionary(RAW_DD, fix_schema=True)
assert raw_dd.has_valid_schema()
@patch.object(Blueprintprocessor, "send_message")
def test_blueprintprocessor_bootstrap(mock_send_message):
Blueprintprocessor.bootstrap()
assert mock_send_message.called_once()
assert mock_send_message.call_args[1]["data"] == '{\n "loadModelType" : true,\n "loadResourceDictionary" : true,\n "loadCBA" : true\n}'
mock_send_message.reset_mock()
Blueprintprocessor.bootstrap(load_cba=False, load_model_type=False, load_resource_dictionary=False)
assert mock_send_message.called_once()
assert mock_send_message.call_args[1]["data"] == '{\n "loadModelType" : false,\n "loadResourceDictionary" : false,\n "loadCBA" : false\n}'
| 42.676923
| 2,358
| 0.659553
|
3fe0576c14ed15713ddeeb82e75e25493b457721
| 9,968
|
py
|
Python
|
ngraph/frontends/neon/saver.py
|
NervanaSystems/ngraph-python
|
ac032c83c7152b615a9ad129d54d350f9d6a2986
|
[
"Apache-2.0"
] | 18
|
2018-03-19T04:16:49.000Z
|
2021-02-08T14:44:58.000Z
|
ngraph/frontends/neon/saver.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 2
|
2019-04-16T06:41:49.000Z
|
2019-05-06T14:08:13.000Z
|
ngraph/frontends/neon/saver.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 11
|
2018-06-16T15:59:08.000Z
|
2021-03-06T00:45:30.000Z
|
#!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division, print_function, absolute_import
from operator import itemgetter
import ngraph as ng
from ngraph.frontends.neon.saverfile import SaverFile
def get_root_ops(computation):
"""
Get list of root Ops from a forest of computation graph
Arguments:
computation (ComputationOp or dict of Ops):
A ComputationOp or dictionary of output Ops of interest.
Returns:
List of root Ops
"""
if isinstance(computation, dict):
computation_keys = tuple(sorted(computation.keys()))
outputs = itemgetter(*computation_keys)(computation)
outputs = [outputs] if len(computation_keys) == 1 else list(outputs)
values = type(outputs)(ng.as_op(output) for output in outputs)
elif isinstance(computation, ng.ComputationOp):
values = computation.values
else:
raise ValueError()
return values
class Saver(object):
def __init__(self):
"""
A class that defines a set of methods to enable weight saving and restoring
Methods:
setup_save: prepare save function for saving all weight variables in
computation
save: saves weight values to named file
setup_restore: prepare restore function for loading weight from file to
weight variables in computation
restore: load weight values to computation
Examples:
... create some_op_graph ...
comp = ng.computation(some_op_graph, "all")
" create saver object
weight_saver = Saver()
with closing(ngt.make_transformer()) as transformer:
func = transformer.add_computation(comp)
" setup save function
weight_saver.setup_save(transformer=transformer, computation=comp)
... some usage of func ...
" call save
weight_saver.save(filename="some_name")
...
with closing(ngt.make_transformer()) as another_transformer:
another_func = restore_transformer.add_computation(comp)
" setup restore
weight_saver.setup_restore(transformer=another_transformer,
computation=comp,
filename="some_name")
" call restore
weight_saver.restore()
... now use another_func with the restored weights ...
"""
self.getter_op_names = None
self.getter = None
self.setter = None
def setup_save(self, transformer, computation):
"""
prepare save function for saving all weight variables in computation
Arguments:
transformer : transformer where the weights are stored
computation (ComputationOp or dict of Ops):
A ComputationOp or dictionary of output Ops of interest.
"""
# collect and return a set of all AssignableTensorOp's
def find_ops(values):
"""
Find and return all weights.
"""
nodes = dict()
frontier = set(values)
visited = set()
def find_op(op_to_add):
"""
find persistent and trainable AssignableTensorOp
"""
if isinstance(op_to_add, ng.TensorValueOp):
tensor = op_to_add.tensor
if isinstance(tensor, ng.AssignableTensorOp):
if tensor.is_persistent:
if tensor.is_constant:
pass
elif tensor.is_placeholder:
pass
else:
try:
prev_op = nodes[tensor.name]
except KeyError:
prev_op = tensor
nodes[tensor.name] = tensor
assert prev_op == tensor
while len(frontier) > 0:
op_to_visit = frontier.pop()
find_op(op_to_visit)
visited.add(op_to_visit)
for arg in op_to_visit.args:
if arg not in visited:
frontier.add(arg)
for arg in op_to_visit.all_deps:
if arg not in visited:
frontier.add(arg)
return nodes
# Traverse computation graph and extract persistent tensors and unique op instance name
save_variables = find_ops(get_root_ops(computation))
self.getter_op_names, ops = zip(*save_variables.items())
self.getter = transformer.computation(ops)
def save(self, filename, compress=False, transformer=None, computation=None):
"""
Save weight values to named file
Arguments:
filename: name of file to be used for saving weights
compress: specify whether to compress the weights
transformer : transformer where the weights are stored
required only if setup_save is not called
computation (ComputationOp or dict of Ops):
A ComputationOp or dictionary of output Ops of interest.
required only if setup_save is not called
"""
if self.getter is None:
self.setup_save(transformer=transformer,
computation=computation)
tensors = dict()
tensors = {name: tensor.copy() for name, tensor in zip(self.getter_op_names,
self.getter())}
# write dictionary to file
savefile = SaverFile(filename)
savefile.write_values(tensors, compress)
def setup_restore(self, transformer, computation, filename):
"""
prepare restore function for loading weight from file to
weight variables in computation
Arguments:
transformer : transformer where the weights will be restored
computation (ComputationOp or dict of Ops):
A ComputationOp or dictionary of output Ops of interest.
filename: name of file with saved weights
"""
def match_ops(tensors, values):
"""
Match weights with tensor values loaded from file
"""
nodes = dict()
frontier = set(values)
visited = set()
def match_op(op_to_add):
"""
Match weight with loaded tensor value
"""
if isinstance(op_to_add, ng.TensorValueOp):
tensor = op_to_add.tensor
if isinstance(tensor, ng.AssignableTensorOp):
if tensor.is_persistent:
if tensor.is_constant:
pass
elif tensor.is_placeholder:
pass
else:
try:
nodes[tensor] = tensors[tensor.name]
except KeyError:
print("Warning: Missing weight in save file: " + tensor.name)
while len(frontier) > 0:
op_to_visit = frontier.pop()
match_op(op_to_visit)
visited.add(op_to_visit)
for arg in op_to_visit.args:
if arg not in visited:
frontier.add(arg)
for arg in op_to_visit.all_deps:
if arg not in visited:
frontier.add(arg)
return nodes
# load weight from file to tensors
savefile = SaverFile(filename)
tensors = savefile.read_values()
nodes = match_ops(tensors, get_root_ops(computation))
restore_ops = []
for op_to_save, op_value in nodes.items():
restore_ops.append(ng.AssignOp(op_to_save, op_value))
self.setter = transformer.computation(restore_ops)
def restore(self, transformer=None, computation=None, filename=None):
"""
load weight values to computation
Arguments:
transformer : transformer where the weights will be restored
required only if setup_restore is not called
computation (ComputationOp or dict of Ops):
A ComputationOp or dictionary of output Ops of interest.
required only if setup_restore is not called
filename: name of file with saved weights
required only if setup_restore is not called
"""
if self.setter is None:
self.setup_restore(transformer=transformer,
computation=computation,
filename=filename)
self.setter()
| 42.417021
| 97
| 0.541734
|
f557e164f78347b2ce7a034f2814a912577a994e
| 19,255
|
py
|
Python
|
projects/WSL/wsl/data/datasets/builtin_meta.py
|
shenyunhang/PDSL
|
ade1e0cf4435c9252b4384100a82bb45b026b7c2
|
[
"Apache-2.0"
] | 6
|
2021-11-03T09:07:39.000Z
|
2022-02-06T15:36:56.000Z
|
projects/WSL/wsl/data/datasets/builtin_meta.py
|
shenyunhang/PDSL
|
ade1e0cf4435c9252b4384100a82bb45b026b7c2
|
[
"Apache-2.0"
] | 2
|
2021-10-17T16:55:59.000Z
|
2022-03-27T02:46:38.000Z
|
projects/WSL/wsl/data/datasets/builtin_meta.py
|
shenyunhang/PDSL
|
ade1e0cf4435c9252b4384100a82bb45b026b7c2
|
[
"Apache-2.0"
] | 4
|
2021-11-03T09:07:41.000Z
|
2022-02-06T15:37:03.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Note:
For your custom dataset, there is no need to hard-code metadata anywhere in the code.
For example, for COCO-format dataset, metadata will be obtained automatically
when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
during loading.
However, we hard-coded metadata for a few common dataset here.
The only goal is to allow users who don't have these dataset to use pre-trained models.
Users don't have to download a COCO json (which contains metadata), in order to visualize a
COCO model (with correct class names and colors).
"""
import numpy as np
# All coco categories, together with their nice-looking visualization colors
# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
]
VOC_C = 21
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits""" ""
return "".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
def labelcolormap(N):
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r = 0
g = 0
b = 0
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7 - j))
g = g ^ (np.uint8(str_id[-2]) << (7 - j))
b = b ^ (np.uint8(str_id[-3]) << (7 - j))
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
voc_cmaps = labelcolormap(VOC_C).tolist()
VOC_CATEGORIES = [
{"id": 1, "name": "aeroplane", "isthing": 1, "color": voc_cmaps[1]},
{"id": 2, "name": "bicycle", "isthing": 1, "color": voc_cmaps[2]},
{"id": 3, "name": "bird", "isthing": 1, "color": voc_cmaps[3]},
{"id": 4, "name": "boat", "isthing": 1, "color": voc_cmaps[4]},
{"id": 5, "name": "bottle", "isthing": 1, "color": voc_cmaps[5]},
{"id": 6, "name": "bus", "isthing": 1, "color": voc_cmaps[6]},
{"id": 7, "name": "car", "isthing": 1, "color": voc_cmaps[7]},
{"id": 8, "name": "cat", "isthing": 1, "color": voc_cmaps[8]},
{"id": 9, "name": "chair", "isthing": 1, "color": voc_cmaps[9]},
{"id": 10, "name": "cow", "isthing": 1, "color": voc_cmaps[10]},
{"id": 11, "name": "diningtable", "isthing": 1, "color": voc_cmaps[11]},
{"id": 12, "name": "dog", "isthing": 1, "color": voc_cmaps[12]},
{"id": 13, "name": "horse", "isthing": 1, "color": voc_cmaps[13]},
{"id": 14, "name": "motorbike", "isthing": 1, "color": voc_cmaps[14]},
{"id": 15, "name": "person", "isthing": 1, "color": voc_cmaps[15]},
{"id": 16, "name": "pottedplant", "isthing": 1, "color": voc_cmaps[16]},
{"id": 17, "name": "sheep", "isthing": 1, "color": voc_cmaps[17]},
{"id": 18, "name": "sofa", "isthing": 1, "color": voc_cmaps[18]},
{"id": 19, "name": "train", "isthing": 1, "color": voc_cmaps[19]},
{"id": 20, "name": "tvmonitor", "isthing": 1, "color": voc_cmaps[20]},
{"id": 21, "name": "background", "isthing": 0, "color": [255, 255, 255]},
]
def _get_flickr_coco_meta():
thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 80, len(thing_ids)
# Mapping from the incontiguous COCO category id to an id in [0, 79]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_flickr_voc_meta():
thing_ids = [k["id"] for k in VOC_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in VOC_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 20, len(thing_ids)
# Mapping from the incontiguous VOC category id to an id in [0, 79]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in VOC_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_voc_sbd_instances_meta():
thing_ids = [k["id"] for k in VOC_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in VOC_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 20, len(thing_ids)
# Mapping from the incontiguous VOC category id to an id in [0, 79]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in VOC_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_voc_sbd_panoptic_separated_meta():
"""
Returns metadata for "separated" version of the panoptic segmentation dataset.
"""
stuff_ids = [k["id"] for k in VOC_CATEGORIES if k["isthing"] == 0]
assert len(stuff_ids) == 1, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 53], used in models) to ids in the dataset (used for processing results)
# The id 0 is mapped to an extra category "thing".
stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
# When converting VOC panoptic annotations to semantic annotations
# We label the "thing" category to 0
stuff_dataset_id_to_contiguous_id[0] = 0
# 1 names for VOC stuff categories (including "things")
stuff_classes = ["things"] + [
k["name"].replace("-other", "").replace("-merged", "")
for k in VOC_CATEGORIES
if k["isthing"] == 0
]
# NOTE: I randomly picked a color for things
stuff_colors = [[82, 18, 128]] + [k["color"] for k in VOC_CATEGORIES if k["isthing"] == 0]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
"stuff_colors": stuff_colors,
}
ret.update(_get_voc_sbd_instances_meta())
return ret
def _get_builtin_metadata(dataset_name):
if dataset_name == "flickr_voc":
return _get_flickr_voc_meta()
elif dataset_name == "flickr_coco":
return _get_flickr_coco_meta()
elif dataset_name == "voc_2007_train_pgt":
return _get_flickr_voc_meta()
elif dataset_name == "voc_2007_val_pgt":
return _get_flickr_voc_meta()
elif dataset_name == "voc_sbd":
return _get_voc_sbd_instances_meta()
elif dataset_name == "voc_sbd_panoptic_separated":
return _get_voc_sbd_panoptic_separated_meta()
elif dataset_name == "voc_sbd_panoptic_standard":
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in VOC_CATEGORIES]
thing_colors = [k["color"] for k in VOC_CATEGORIES]
stuff_classes = [k["name"] for k in VOC_CATEGORIES]
stuff_colors = [k["color"] for k in VOC_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(COCO_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
else:
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
elif dataset_name == "imagenet_lvis_rare_300_1":
meta = {}
return meta
elif dataset_name == "imagenet_lvis_rare_300_10":
meta = {}
return meta
elif dataset_name == "imagenet_lvis_rare_300_100":
meta = {}
return meta
raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
| 54.85755
| 96
| 0.559283
|
cf7c1a64e90180b352085d35175416bf2b20ccf5
| 2,881
|
py
|
Python
|
taggit/migrations/0002_auto__add_tagtransform.py
|
theatlantic/django-taggit
|
ec6c3a6e8b37f4c60d7bacd10c44462562fc781b
|
[
"BSD-3-Clause"
] | null | null | null |
taggit/migrations/0002_auto__add_tagtransform.py
|
theatlantic/django-taggit
|
ec6c3a6e8b37f4c60d7bacd10c44462562fc781b
|
[
"BSD-3-Clause"
] | null | null | null |
taggit/migrations/0002_auto__add_tagtransform.py
|
theatlantic/django-taggit
|
ec6c3a6e8b37f4c60d7bacd10c44462562fc781b
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TagTransform'
db.create_table('taggit_tagtransform', (
('type', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('rule', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('transform', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('taggit', ['TagTransform'])
def backwards(self, orm):
# Deleting model 'TagTransform'
db.delete_table('taggit_tagtransform')
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'taggit.tagtransform': {
'Meta': {'object_name': 'TagTransform'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'transform': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['taggit']
| 49.672414
| 174
| 0.570982
|
b9bc9c09f8176a272168badc1e96a8a490a338c8
| 2,300
|
py
|
Python
|
mindspore/ops/_op_impl/tbe/maximum_grad.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | 7
|
2020-05-24T03:19:26.000Z
|
2020-05-24T03:20:00.000Z
|
mindspore/ops/_op_impl/tbe/maximum_grad.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | null | null | null |
mindspore/ops/_op_impl/tbe/maximum_grad.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaximumGrad op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
maximum_grad_op_info = TBERegOp("MaximumGrad") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("maximum_grad.so") \
.compute_cost(10) \
.kernel_name("maximum_grad") \
.partial_flag(True) \
.attr("grad_x", "optional", "bool", "all") \
.attr("grad_y", "optional", "bool", "all") \
.input(0, "grads", False, "required", "all") \
.input(1, "x1", False, "required", "all") \
.input(2, "x2", False, "required", "all") \
.output(0, "y1", False, "required", "all") \
.output(1, "y2", False, "required", "all") \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default,
DataType.I32_Default) \
.dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD,
DataType.I32_5HD) \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default) \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD,
DataType.F16_5HD) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD,
DataType.F32_5HD) \
.get_op_info()
@op_info_register(maximum_grad_op_info)
def _maximum_grad_tbe():
"""MaximumGrad TBE register"""
return
| 44.230769
| 105
| 0.668261
|
7500dfb7969f18eba67759b5bfd609d1b09289e6
| 2,673
|
py
|
Python
|
test/unit/ggrc/notification/test_should_receive.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
test/unit/ggrc/notification/test_should_receive.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/unit/ggrc/notification/test_should_receive.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Test should_receive function in the notifications.common module
"""
import unittest
from mock import Mock
from ggrc.notifications.common import should_receive
class TestShouldReceive(unittest.TestCase):
"""Test should_receive function"""
def setUp(self):
self.notif = Mock(id=1)
self.no_access_person = Mock(system_wide_role="No Access")
self.default_digest_person = Mock(system_wide_role="Reader",
notification_configs=[])
self.enabled_digest_person = Mock(
system_wide_role="Reader",
notification_configs=[Mock(enable_flag=True,
notif_type="Email_Digest")])
self.disabled_digest_person = Mock(
system_wide_role="Reader",
notification_configs=[Mock(enable_flag=False,
notif_type="Email_Digest")])
def _call_should_receive(self, person, force_notifications):
"""Helper function that calls should_receive and returns it's result"""
if person is None:
user_id = -1
user_cache = {}
else:
user_id = 1
user_cache = {
user_id: person
}
user_data = {
"force_notifications": {
1: force_notifications
},
"user": {
"id": user_id
}
}
return should_receive(self.notif, user_data, user_cache)
def test_invalid_user(self):
"""should_receive returns False when user is invalid"""
res = self._call_should_receive(None, True)
self.assertFalse(res)
def test_no_access_user(self):
"""should_receive returns False when user has No Access"""
res = self._call_should_receive(self.no_access_person, True)
self.assertFalse(res)
def test_default_email_digest(self):
"""should_receive returns True when notification_configs not set"""
res = self._call_should_receive(self.default_digest_person, False)
self.assertTrue(res)
def test_enabled_email_digest_flag(self):
"""should_receive returns True when email_digest flag enabled"""
res = self._call_should_receive(self.enabled_digest_person, False)
self.assertTrue(res)
def test_disabled_email_digest_flag(self):
"""should_receive returns False when email_digest flag disabled"""
res = self._call_should_receive(self.disabled_digest_person, False)
self.assertFalse(res)
def test_force_notifications(self):
"""should_receive returns True when force_notif is set"""
res = self._call_should_receive(self.disabled_digest_person, True)
self.assertTrue(res)
| 33.4125
| 78
| 0.688739
|
ed180ab7f45d33e3f9678977fbb7026c1d80c230
| 5,387
|
py
|
Python
|
nikola/plugins/task/indexes.py
|
jonasstein/nikola
|
9b94d4581f66d72f8ceb3772e458ca66cb7eb869
|
[
"MIT"
] | null | null | null |
nikola/plugins/task/indexes.py
|
jonasstein/nikola
|
9b94d4581f66d72f8ceb3772e458ca66cb7eb869
|
[
"MIT"
] | null | null | null |
nikola/plugins/task/indexes.py
|
jonasstein/nikola
|
9b94d4581f66d72f8ceb3772e458ca66cb7eb869
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2012-2018 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Render the blog's main index."""
from nikola.plugin_categories import Taxonomy
class Indexes(Taxonomy):
"""Classify for the blog's main index."""
name = "classify_indexes"
classification_name = "index"
overview_page_variable_name = None
more_than_one_classifications_per_post = False
has_hierarchy = False
show_list_as_index = True
template_for_single_list = "index.tmpl"
template_for_classification_overview = None
apply_to_posts = True
apply_to_pages = False
omit_empty_classifications = False
path_handler_docstrings = {
'index_index': False,
'index': """Link to a numbered index.
Example:
link://index/3 => /index-3.html""",
'index_atom': """Link to a numbered Atom index.
Example:
link://index_atom/3 => /index-3.atom""",
'index_rss': """A link to the RSS feed path.
Example:
link://rss => /blog/rss.xml""",
}
def set_site(self, site):
"""Set Nikola site."""
# Redirect automatically generated 'index_rss' path handler to 'rss' for compatibility with old rss plugin
site.register_path_handler('rss', lambda name, lang: site.path_handlers['index_rss'](name, lang))
site.path_handlers['rss'].__doc__ = """A link to the RSS feed path.
Example:
link://rss => /blog/rss.xml
""".strip()
return super(Indexes, self).set_site(site)
def get_implicit_classifications(self, lang):
"""Return a list of classification strings which should always appear in posts_per_classification."""
return [""]
def classify(self, post, lang):
"""Classify the given post for the given language."""
return [""]
def get_classification_friendly_name(self, classification, lang, only_last_component=False):
"""Extract a friendly name from the classification."""
return self.site.config["BLOG_TITLE"](lang)
def get_path(self, classification, lang, dest_type='page'):
"""Return a path for the given classification."""
if dest_type == 'rss':
return [
self.site.config['RSS_PATH'](lang),
self.site.config['RSS_FILENAME_BASE'](lang)
], 'auto'
if dest_type == 'feed':
return [
self.site.config['ATOM_PATH'](lang),
self.site.config['ATOM_FILENAME_BASE'](lang)
], 'auto'
page_number = None
if dest_type == 'page':
# Interpret argument as page number
try:
page_number = int(classification)
except (ValueError, TypeError):
pass
return [self.site.config['INDEX_PATH'](lang)], 'always', page_number
def provide_context_and_uptodate(self, classification, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation."""
kw = {
"show_untranslated_posts": self.site.config["SHOW_UNTRANSLATED_POSTS"],
}
context = {
"title": self.site.config["INDEXES_TITLE"](lang) or self.site.config["BLOG_TITLE"](lang),
"description": self.site.config["BLOG_DESCRIPTION"](lang),
"pagekind": ["main_index", "index"],
"featured": [p for p in self.site.posts if p.post_status == 'featured' and
(lang in p.translated_to or kw["show_untranslated_posts"])],
}
kw.update(context)
return context, kw
def should_generate_classification_page(self, classification, post_list, lang):
"""Only generates list of posts for classification if this function returns True."""
return not self.site.config["DISABLE_INDEXES"]
def should_generate_atom_for_classification_page(self, classification, post_list, lang):
"""Only generates Atom feed for list of posts for classification if this function returns True."""
return not self.site.config["DISABLE_MAIN_ATOM_FEED"]
def should_generate_rss_for_classification_page(self, classification, post_list, lang):
"""Only generates RSS feed for list of posts for classification if this function returns True."""
return not self.site.config["DISABLE_MAIN_RSS_FEED"]
| 39.036232
| 114
| 0.671802
|
3bb3ef7c1c388359e01fa73af56015e6814389bc
| 2,120
|
py
|
Python
|
src/prpy/perception/perception_helper.py
|
MisoRobotics/prpy
|
afbc8e9c3a8e4f4dc31261782962e12eb4b0ca10
|
[
"BSD-3-Clause"
] | 53
|
2015-02-07T04:58:23.000Z
|
2022-03-07T15:49:07.000Z
|
src/prpy/perception/perception_helper.py
|
MisoRobotics/prpy
|
afbc8e9c3a8e4f4dc31261782962e12eb4b0ca10
|
[
"BSD-3-Clause"
] | 283
|
2015-01-01T17:16:37.000Z
|
2018-05-09T23:51:48.000Z
|
src/prpy/perception/perception_helper.py
|
MisoRobotics/prpy
|
afbc8e9c3a8e4f4dc31261782962e12eb4b0ca10
|
[
"BSD-3-Clause"
] | 28
|
2015-02-20T01:37:38.000Z
|
2021-03-12T07:05:23.000Z
|
#!/usr/bin/env python
# Copyright (c) 2016, Carnegie Mellon University
# All rights reserved.
# Authors: Gilwoo Lee <gilwool@cs.cmu.edu>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def RemoveAllObjects(env, robot, kept_bodies=None):
"""
Remove everything from the environment except robot and kept_bodies.
The environment must be locked while calling this method.
@param env OpenRAVE environment to remove objects
@param robot Robot in env to keep
@param kept_bodies Bodies to keep
"""
if kept_bodies is None:
kept_bodies = []
for o in env.GetBodies():
if o != robot and o not in kept_bodies:
env.Remove(o)
| 45.106383
| 79
| 0.757075
|
1c69bd13b712e4c64ed9e1e288e733666af172aa
| 3,882
|
py
|
Python
|
mmdet/datasets/imagenet.py
|
Nitin-Mane/MMDET
|
7410b25f27c200719482955cb4a8a1c381e67e04
|
[
"Apache-2.0"
] | 2
|
2019-10-15T09:42:33.000Z
|
2020-02-05T11:37:09.000Z
|
mmdet/datasets/imagenet.py
|
Nitin-Mane/MMDET
|
7410b25f27c200719482955cb4a8a1c381e67e04
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/imagenet.py
|
Nitin-Mane/MMDET
|
7410b25f27c200719482955cb4a8a1c381e67e04
|
[
"Apache-2.0"
] | 1
|
2020-07-31T18:42:13.000Z
|
2020-07-31T18:42:13.000Z
|
import numpy as np
from .custom import CustomDataset
from .custom_pair import CustomPairDataset
from .custom_block import CustomBlockDataset
from .registry import DATASETS
@DATASETS.register_module
class ImageNetDETVIDDataset(CustomDataset):
CLASSES = ('airplane','antelope','bear','bicycle','bird','bus',
'car','cattle','dog','domestic_cat','elephant','fox',
'giant_panda','hamster','horse','lion','lizard','monkey',
'motorcycle','rabbit','red_panda','sheep','snake','squirrel',
'tiger','train','turtle','watercraft','whale','zebra')
def __init__(self,*args,**kargs):
super().__init__(*args,**kargs)
self.img_ids = list(range(len(self.img_infos)))
self.cat_ids = list(range(len(self.CLASSES)))
def get_ann_info(self, idx):
ann = self.img_infos[idx]['ann']
# modify type if necessary.
if not isinstance(ann['bboxes'],np.ndarray):
ann['bboxes'] = np.array(ann['bboxes'], dtype=np.float32).reshape(-1, 4)
if not isinstance(ann['labels'], np.ndarray):
ann['labels'] = np.array(ann['labels'], dtype=np.int64)#.reshape(-1, 1)
self.img_infos[idx]['ann']=ann
return ann
@DATASETS.register_module
class ImageNetVIDBlockDataset(CustomBlockDataset):
CLASSES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',
'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox',
'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey',
'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel',
'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra')
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.img_ids = list(range(len(self.img_infos)))
self.cat_ids = list(range(len(self.CLASSES)))
def get_ann_info(self, idx):
ann = self.img_infos[idx]['ann']
# modify type if necessary.
if not isinstance(ann['bboxes'], np.ndarray):
ann['bboxes'] = np.array(ann['bboxes'], dtype=np.float32).reshape(-1, 4)
if not isinstance(ann['labels'], np.ndarray):
ann['labels'] = np.array(ann['labels'], dtype=np.int64) # .reshape(-1, 1)
self.img_infos[idx]['ann'] = ann
return ann
@DATASETS.register_module
class ImageNetVIDPairDataset(CustomPairDataset):
CLASSES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',
'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox',
'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey',
'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel',
'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra')
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.img_ids = list(range(len(self.img_infos)))
self.cat_ids = list(range(len(self.CLASSES)))
def get_ann_info(self, idx):
ann1 = self.img_infos[idx]['ann1']
ann2 = self.img_infos[idx]['ann2']
# modify type if necessary.
if not isinstance(ann1['bboxes'], np.ndarray):
ann1['bboxes'] = np.array(ann1['bboxes'], dtype=np.float32).reshape(-1, 4)
if not isinstance(ann1['labels'], np.ndarray):
ann1['labels'] = np.array(ann1['labels'], dtype=np.int64)
if not isinstance(ann1['trackids'], np.ndarray):
ann1['trackids'] = np.array(ann1['trackids'], dtype=np.int64)
self.img_infos[idx]['ann1'] = ann1
if not isinstance(ann2['bboxes'], np.ndarray):
ann2['bboxes'] = np.array(ann2['bboxes'], dtype=np.float32).reshape(-1, 4)
if not isinstance(ann2['labels'], np.ndarray):
ann2['labels'] = np.array(ann2['labels'], dtype=np.int64)
if not isinstance(ann2['trackids'], np.ndarray):
ann2['trackids'] = np.array(ann2['trackids'], dtype=np.int64)
self.img_infos[idx]['ann2'] = ann2
return ann1, ann2
| 43.133333
| 84
| 0.621587
|
8385c0c07b87e79a22c0d1b1aece4c6028665645
| 447
|
py
|
Python
|
wargames/overthewire-vortex/level9/win.py
|
spchal/pwntools-write-ups
|
dc58c50ef9dcee9ecbf8ba14a6b60f7af355afe1
|
[
"MIT"
] | 456
|
2015-01-07T15:08:46.000Z
|
2022-03-24T10:34:27.000Z
|
wargames/overthewire-vortex/level9/win.py
|
spchal/pwntools-write-ups
|
dc58c50ef9dcee9ecbf8ba14a6b60f7af355afe1
|
[
"MIT"
] | 15
|
2015-03-08T23:51:24.000Z
|
2019-06-22T16:03:03.000Z
|
wargames/overthewire-vortex/level9/win.py
|
spchal/pwntools-write-ups
|
dc58c50ef9dcee9ecbf8ba14a6b60f7af355afe1
|
[
"MIT"
] | 133
|
2015-01-08T19:17:40.000Z
|
2022-02-12T23:00:33.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
level = 9
host = 'vortex.labs.overthewire.org'
user = 'vortex%i' % level
chal = 'vortex%i' % level
password = args['PASSWORD']
passfile = '/etc/vortex_pass/vortex%i' % (level+1)
binary = '/vortex/%s' % chal
shell = ssh(host=host, user=user, password=password)
password = shell.cat('/var/mail/vortex9')
log.success('Password: %s' % password)
print password
| 24.833333
| 55
| 0.642058
|
1931e3d5ebed11d2b47003411a1aea4fdbb76a2a
| 290
|
py
|
Python
|
rimbaud/articles/models.py
|
dbousfira/rimbaud
|
9be52713360adef64b4134d648a88c9aac9b9d4d
|
[
"MIT"
] | null | null | null |
rimbaud/articles/models.py
|
dbousfira/rimbaud
|
9be52713360adef64b4134d648a88c9aac9b9d4d
|
[
"MIT"
] | null | null | null |
rimbaud/articles/models.py
|
dbousfira/rimbaud
|
9be52713360adef64b4134d648a88c9aac9b9d4d
|
[
"MIT"
] | null | null | null |
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=200)
desc = models.CharField(max_length=200)
article = models.TextField(max_length=5000)
pub_date = models.DateTimeField('date published')
visibility = models.BooleanField()
| 29
| 53
| 0.744828
|
cd99f1774a37d89be7a8db3978c6863abd261491
| 10,407
|
py
|
Python
|
tensorflow/python/kernel_tests/matmul_op_test.py
|
aeverall/tensorflow
|
7992bf97711919f56f80bff9e5510cead4ab2095
|
[
"Apache-2.0"
] | 2
|
2018-12-12T23:33:05.000Z
|
2019-02-26T07:20:22.000Z
|
tensorflow/python/kernel_tests/matmul_op_test.py
|
aeverall/tensorflow
|
7992bf97711919f56f80bff9e5510cead4ab2095
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/matmul_op_test.py
|
aeverall/tensorflow
|
7992bf97711919f56f80bff9e5510cead4ab2095
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
# TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
# os.environ["TF_MATMUL_AUTOTUNE_ENABLE"] = "1" to enable it.
class MatVecTest(test_lib.TestCase):
"""Simple test for matvec, which is sugar on top of matmul."""
def testTwoByTwoCase(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
c = math_ops.matvec(a, b)
self.assertAllEqual((2,), c.shape)
self.assertAllEqual([5 + 2 * 6, 3 * 5 + 4 * 6], c)
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.CudaSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=2e-5,
float_atol=2e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session(), test_util.use_gpu():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testSimpleStatistics(self):
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesRegexp(
Exception, "(Shape must be rank 2 but is rank 1|is not a matrix)"):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesRegexp(
Exception, "(Dimensions must be equal|Matrix size-incompatible)"):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_v1_only("Tensor.op is generally not applicable in TF 2")
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
self.assertAllEqual(c, d)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
for use_static_shape in [False, True]:
for dtype in (np.int32, np.int64, np.float16, np.float32, np.float64,
np.complex64, np.complex128):
if not use_static_shape and (dtype == np.int32 or dtype == np.int64):
# TODO(rmlarsen): Re-enable this test when we have fixed the underlying
# bug in Windows (b/35935459).
continue
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
| 38.261029
| 80
| 0.63957
|
686065ddde5554962e76d22150225acc8f942543
| 22,844
|
py
|
Python
|
build/tools/repopick.py
|
aryaman895/vendor_xtended
|
654cf88ad605f34cfc5624b473dd4b098ce9bd2f
|
[
"Apache-2.0"
] | null | null | null |
build/tools/repopick.py
|
aryaman895/vendor_xtended
|
654cf88ad605f34cfc5624b473dd4b098ce9bd2f
|
[
"Apache-2.0"
] | null | null | null |
build/tools/repopick.py
|
aryaman895/vendor_xtended
|
654cf88ad605f34cfc5624b473dd4b098ce9bd2f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2013-15 The CyanogenMod Project
# (C) 2017 The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Run repopick.py -h for a description of this utility.
#
from __future__ import print_function
import sys
import json
import os
import subprocess
import re
import argparse
import textwrap
from functools import cmp_to_key
from xml.etree import ElementTree
try:
import requests
except ImportError:
try:
# For python3
import urllib.error
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.request = urllib2
# cmp() is not available in Python 3, define it manually
# See https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
def cmp(a, b):
return (a > b) - (a < b)
# Verifies whether pathA is a subdirectory (or the same) as pathB
def is_subdir(a, b):
a = os.path.realpath(a) + '/'
b = os.path.realpath(b) + '/'
return b == a[:len(b)]
def fetch_query_via_ssh(remote_url, query):
"""Given a remote_url and a query, return the list of changes that fit it
This function is slightly messy - the ssh api does not return data in the same structure as the HTTP REST API
We have to get the data, then transform it to match what we're expecting from the HTTP RESET API"""
if remote_url.count(':') == 2:
(uri, userhost, port) = remote_url.split(':')
userhost = userhost[2:]
elif remote_url.count(':') == 1:
(uri, userhost) = remote_url.split(':')
userhost = userhost[2:]
port = 29418
else:
raise Exception('Malformed URI: Expecting ssh://[user@]host[:port]')
out = subprocess.check_output(['ssh', '-x', '-p{0}'.format(port), userhost, 'gerrit', 'query', '--format=JSON --patch-sets --current-patch-set', query])
if not hasattr(out, 'encode'):
out = out.decode()
reviews = []
for line in out.split('\n'):
try:
data = json.loads(line)
# make our data look like the http rest api data
review = {
'branch': data['branch'],
'change_id': data['id'],
'current_revision': data['currentPatchSet']['revision'],
'number': int(data['number']),
'revisions': {patch_set['revision']: {
'_number': int(patch_set['number']),
'fetch': {
'ssh': {
'ref': patch_set['ref'],
'url': 'ssh://{0}:{1}/{2}'.format(userhost, port, data['project'])
}
},
'commit': {
'parents': [{'commit': parent} for parent in patch_set['parents']]
},
} for patch_set in data['patchSets']},
'subject': data['subject'],
'project': data['project'],
'status': data['status']
}
reviews.append(review)
except:
pass
args.quiet or print('Found {0} reviews'.format(len(reviews)))
return reviews
def fetch_query_via_http(remote_url, query):
if "requests" in sys.modules:
auth = None
if os.path.isfile(os.getenv("HOME") + "/.gerritrc"):
f = open(os.getenv("HOME") + "/.gerritrc", "r")
for line in f:
parts = line.rstrip().split("|")
if parts[0] in remote_url:
auth = requests.auth.HTTPBasicAuth(username=parts[1], password=parts[2])
status_code = '-1'
if auth:
url = '{0}/a/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS&o=ALL_COMMITS'.format(remote_url, query)
data = requests.get(url, auth=auth)
status_code = str(data.status_code)
if status_code != '200':
#They didn't get good authorization or data, Let's try the old way
url = '{0}/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS&o=ALL_COMMITS'.format(remote_url, query)
data = requests.get(url)
reviews = json.loads(data.text[5:])
else:
"""Given a query, fetch the change numbers via http"""
url = '{0}/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS&o=ALL_COMMITS'.format(remote_url, query)
data = urllib.request.urlopen(url).read().decode('utf-8')
reviews = json.loads(data[5:])
for review in reviews:
review['number'] = review.pop('_number')
return reviews
def fetch_query(remote_url, query):
"""Wrapper for fetch_query_via_proto functions"""
if remote_url[0:3] == 'ssh':
return fetch_query_via_ssh(remote_url, query)
elif remote_url[0:4] == 'http':
return fetch_query_via_http(remote_url, query.replace(' ', '+'))
else:
raise Exception('Gerrit URL should be in the form http[s]://hostname/ or ssh://[user@]host[:port]')
if __name__ == '__main__':
# Default to MSM-Xtended Gerrit
default_gerrit = 'https://review.msmxtended.org'
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\
repopick.py is a utility to simplify the process of cherry picking
patches from MSM-Xtended's Gerrit instance (or any gerrit instance of your choosing)
Given a list of change numbers, repopick will cd into the project path
and cherry pick the latest patch available.
With the --start-branch argument, the user can specify that a branch
should be created before cherry picking. This is useful for
cherry-picking many patches into a common branch which can be easily
abandoned later (good for testing other's changes.)
The --abandon-first argument, when used in conjunction with the
--start-branch option, will cause repopick to abandon the specified
branch in all repos first before performing any cherry picks.'''))
parser.add_argument('change_number', nargs='*',
help='change number to cherry pick. Use {change number}/{patchset number} to get a specific revision.')
parser.add_argument('-i', '--ignore-missing', action='store_true',
help='do not error out if a patch applies to a missing directory')
parser.add_argument('-s', '--start-branch', nargs=1,
metavar='', help='start the specified branch before cherry picking')
parser.add_argument('-r', '--reset', action='store_true',
help='reset to initial state (abort cherry-pick) if there is a conflict')
parser.add_argument('-a', '--abandon-first', action='store_true',
help='before cherry picking, abandon the branch specified in --start-branch')
parser.add_argument('-b', '--auto-branch', action='store_true',
help='shortcut to "--start-branch auto --abandon-first --ignore-missing"')
parser.add_argument('-q', '--quiet', action='store_true', help='print as little as possible')
parser.add_argument('-v', '--verbose', action='store_true', help='print extra information to aid in debug')
parser.add_argument('-f', '--force', action='store_true', help='force cherry pick even if change is closed')
parser.add_argument('-p', '--pull', action='store_true', help='execute pull instead of cherry-pick')
parser.add_argument('-P', '--path', metavar='', help='use the specified path for the change')
parser.add_argument('-t', '--topic', metavar='', help='pick all commits from a specified topic')
parser.add_argument('-Q', '--query', metavar='', help='pick all commits using the specified query')
parser.add_argument('-g', '--gerrit', default=default_gerrit,
metavar='', help='Gerrit Instance to use. Form proto://[user@]host[:port]')
parser.add_argument('-e', '--exclude', nargs=1,
metavar='', help='exclude a list of commit numbers separated by a ,')
parser.add_argument('-c', '--check-picked', type=int, default=10,
metavar='', help='pass the amount of commits to check for already picked changes')
args = parser.parse_args()
if not args.start_branch and args.abandon_first:
parser.error('if --abandon-first is set, you must also give the branch name with --start-branch')
if args.auto_branch:
args.abandon_first = True
args.ignore_missing = True
if not args.start_branch:
args.start_branch = ['auto']
if args.quiet and args.verbose:
parser.error('--quiet and --verbose cannot be specified together')
if (1 << bool(args.change_number) << bool(args.topic) << bool(args.query)) != 2:
parser.error('One (and only one) of change_number, topic, and query are allowed')
# Change current directory to the top of the tree
if 'ANDROID_BUILD_TOP' in os.environ:
top = os.environ['ANDROID_BUILD_TOP']
if not is_subdir(os.getcwd(), top):
sys.stderr.write('ERROR: You must run this tool from within $ANDROID_BUILD_TOP!\n')
sys.exit(1)
os.chdir(os.environ['ANDROID_BUILD_TOP'])
# Sanity check that we are being run from the top level of the tree
if not os.path.isdir('.repo'):
sys.stderr.write('ERROR: No .repo directory found. Please run this from the top of your tree.\n')
sys.exit(1)
# If --abandon-first is given, abandon the branch before starting
if args.abandon_first:
# Determine if the branch already exists; skip the abandon if it does not
plist = subprocess.check_output(['repo', 'info'])
if not hasattr(plist, 'encode'):
plist = plist.decode()
needs_abandon = False
for pline in plist.splitlines():
matchObj = re.match(r'Local Branches.*\[(.*)\]', pline)
if matchObj:
local_branches = re.split('\s*,\s*', matchObj.group(1))
if any(args.start_branch[0] in s for s in local_branches):
needs_abandon = True
if needs_abandon:
# Perform the abandon only if the branch already exists
if not args.quiet:
print('Abandoning branch: %s' % args.start_branch[0])
subprocess.check_output(['repo', 'abandon', args.start_branch[0]])
if not args.quiet:
print('')
# Get the master manifest from repo
# - convert project name and revision to a path
project_name_to_data = {}
manifest = subprocess.check_output(['repo', 'manifest'])
xml_root = ElementTree.fromstring(manifest)
projects = xml_root.findall('project')
remotes = xml_root.findall('remote')
default_revision = xml_root.findall('default')[0].get('revision')
# dump project data into the a list of dicts with the following data:
# {project: {path, revision}}
for project in projects:
name = project.get('name')
# when name and path are equal, "repo manifest" doesn't return a path at all, so fall back to name
path = project.get('path', name)
revision = project.get('upstream')
if revision is None:
for remote in remotes:
if remote.get('name') == project.get('remote'):
revision = remote.get('revision')
if revision is None:
revision = default_revision
if name not in project_name_to_data:
project_name_to_data[name] = {}
revision = revision.split('refs/heads/')[-1]
project_name_to_data[name][revision] = path
# get data on requested changes
reviews = []
change_numbers = []
def cmp_reviews(review_a, review_b):
current_a = review_a['current_revision']
parents_a = [r['commit'] for r in review_a['revisions'][current_a]['commit']['parents']]
current_b = review_b['current_revision']
parents_b = [r['commit'] for r in review_b['revisions'][current_b]['commit']['parents']]
if current_a in parents_b:
return -1
elif current_b in parents_a:
return 1
else:
return cmp(review_a['number'], review_b['number'])
if args.topic:
for t in args.topic:
# Store current topic to process for change_numbers
topic = fetch_query(args.gerrit, 'status:open+topic:{0}'.format(t))
# Append topic to reviews, for later reference
reviews += topic
# Cycle through the current topic to get the change numbers
change_numbers += sorted([str(r['number']) for r in topic], key=int)
if args.query:
reviews = fetch_query(args.gerrit, args.query)
change_numbers = [str(r['number']) for r in sorted(reviews, key=cmp_to_key(cmp_reviews))]
if args.change_number:
change_url_re = re.compile('https?://.+?/([0-9]+(?:/[0-9]+)?)/?')
for c in args.change_number:
change_number = change_url_re.findall(c)
if change_number:
change_numbers.extend(change_number)
elif '-' in c:
templist = c.split('-')
for i in range(int(templist[0]), int(templist[1]) + 1):
change_numbers.append(str(i))
else:
change_numbers.append(c)
reviews = fetch_query(args.gerrit, ' OR '.join('change:{0}'.format(x.split('/')[0]) for x in change_numbers))
# make list of things to actually merge
mergables = []
# If --exclude is given, create the list of commits to ignore
exclude = []
if args.exclude:
exclude = args.exclude[0].split(',')
for change in change_numbers:
patchset = None
if '/' in change:
(change, patchset) = change.split('/')
if change in exclude:
continue
change = int(change)
if patchset:
patchset = int(patchset)
review = next((x for x in reviews if x['number'] == change), None)
if review is None:
print('Change %d not found, skipping' % change)
continue
mergables.append({
'subject': review['subject'],
'project': review['project'].split('/')[1],
'branch': review['branch'],
'change_id': review['change_id'],
'change_number': review['number'],
'status': review['status'],
'fetch': None,
'patchset': review['revisions'][review['current_revision']]['_number'],
})
mergables[-1]['fetch'] = review['revisions'][review['current_revision']]['fetch']
mergables[-1]['id'] = change
if patchset:
try:
mergables[-1]['fetch'] = [review['revisions'][x]['fetch'] for x in review['revisions'] if review['revisions'][x]['_number'] == patchset][0]
mergables[-1]['id'] = '{0}/{1}'.format(change, patchset)
mergables[-1]['patchset'] = patchset
except (IndexError, ValueError):
args.quiet or print('ERROR: The patch set {0}/{1} could not be found, using CURRENT_REVISION instead.'.format(change, patchset))
for item in mergables:
args.quiet or print('Applying change number {0}...'.format(item['id']))
# Check if change is open and exit if it's not, unless -f is specified
if (item['status'] != 'OPEN' and item['status'] != 'NEW' and item['status'] != 'DRAFT') and not args.query:
if args.force:
print('!! Force-picking a closed change !!\n')
else:
print('Change status is ' + item['status'] + '. Skipping the cherry pick.\nUse -f to force this pick.')
continue
# Convert the project name to a project path
# - check that the project path exists
project_path = None
if item['project'] in project_name_to_data and item['branch'] in project_name_to_data[item['project']]:
project_path = project_name_to_data[item['project']][item['branch']]
elif args.path:
project_path = args.path
elif item['project'] in project_name_to_data and len(project_name_to_data[item['project']]) == 1:
local_branch = list(project_name_to_data[item['project']])[0]
project_path = project_name_to_data[item['project']][local_branch]
print('WARNING: Project {0} has a different branch ("{1}" != "{2}")'.format(project_path, local_branch, item['branch']))
elif args.ignore_missing:
print('WARNING: Skipping {0} since there is no project directory for: {1}\n'.format(item['id'], item['project']))
continue
else:
sys.stderr.write('ERROR: For {0}, could not determine the project path for project {1}\n'.format(item['id'], item['project']))
sys.exit(1)
# If --start-branch is given, create the branch (more than once per path is okay; repo ignores gracefully)
if args.start_branch:
subprocess.check_output(['repo', 'start', args.start_branch[0], project_path])
# Determine the maximum commits to check already picked changes
check_picked_count = args.check_picked
branch_commits_count = int(subprocess.check_output(['git', 'rev-list', '--count', 'HEAD'], cwd=project_path))
if branch_commits_count <= check_picked_count:
check_picked_count = branch_commits_count - 1
# Check if change is already picked to HEAD...HEAD~check_picked_count
found_change = False
for i in range(0, check_picked_count):
if subprocess.call(['git', 'cat-file', '-e', 'HEAD~{0}'.format(i)], cwd=project_path, stderr=open(os.devnull, 'wb')):
continue
output = subprocess.check_output(['git', 'show', '-q', 'HEAD~{0}'.format(i)], cwd=project_path)
# make sure we have a string on Python 3
if isinstance(output, bytes):
output = output.decode('utf-8')
output = output.split()
if 'Change-Id:' in output:
head_change_id = ''
for j, t in enumerate(reversed(output)):
if t == 'Change-Id:':
head_change_id = output[len(output) - j]
break
if head_change_id.strip() == item['change_id']:
print('Skipping {0} - already picked in {1} as HEAD~{2}'.format(item['id'], project_path, i))
found_change = True
break
if found_change:
continue
# Print out some useful info
if not args.quiet:
print(u'--> Subject: "{0}"'.format(item['subject']))
print('--> Project path: {0}'.format(project_path))
print('--> Change number: {0} (Patch Set {1})'.format(item['id'], item['patchset']))
if 'anonymous http' in item['fetch']:
method = 'anonymous http'
else:
method = 'ssh'
# Try fetching from GitHub first if using default gerrit
if args.gerrit == default_gerrit:
if args.verbose:
print('Trying to fetch the change from GitHub')
if args.pull:
cmd = ['git pull --no-edit xtended', item['fetch'][method]['ref']]
else:
cmd = ['git fetch xtended', item['fetch'][method]['ref']]
if args.quiet:
cmd.append('--quiet')
else:
print(cmd)
result = subprocess.call([' '.join(cmd)], cwd=project_path, shell=True)
FETCH_HEAD = '{0}/.git/FETCH_HEAD'.format(project_path)
if result != 0 and os.stat(FETCH_HEAD).st_size != 0:
print('ERROR: git command failed')
sys.exit(result)
# Check if it worked
if args.gerrit != default_gerrit or os.stat(FETCH_HEAD).st_size == 0:
# If not using the default gerrit or github failed, fetch from gerrit.
if args.verbose:
if args.gerrit == default_gerrit:
print('Fetching from GitHub didn\'t work, trying to fetch the change from Gerrit')
else:
print('Fetching from {0}'.format(args.gerrit))
if args.pull:
cmd = ['git pull --no-edit', item['fetch'][method]['url'], item['fetch'][method]['ref']]
else:
cmd = ['git fetch', item['fetch'][method]['url'], item['fetch'][method]['ref']]
if args.quiet:
cmd.append('--quiet')
else:
print(cmd)
result = subprocess.call([' '.join(cmd)], cwd=project_path, shell=True)
if result != 0:
print('ERROR: git command failed')
sys.exit(result)
# Perform the cherry-pick
if not args.pull:
cmd = ['git cherry-pick --ff FETCH_HEAD']
if args.quiet:
cmd_out = open(os.devnull, 'wb')
else:
cmd_out = None
result = subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
if result != 0:
cmd = ['git diff-index --quiet HEAD --']
result = subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
if result == 0:
print('WARNING: git command resulted with an empty commit, aborting cherry-pick')
cmd = ['git cherry-pick --abort']
subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
elif args.reset:
print('ERROR: git command failed, aborting cherry-pick')
cmd = ['git cherry-pick --abort']
subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
sys.exit(result)
else:
print('ERROR: git command failed')
sys.exit(result)
if not args.quiet:
print('')
| 45.415507
| 156
| 0.587682
|
dd5d15506f5f50f59919538311551a0e0ff11fb1
| 18,745
|
py
|
Python
|
python/src/nnabla/utils/data_source_implements.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/utils/data_source_implements.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/utils/data_source_implements.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''data_source_implements
'''
import atexit
import csv
import os
import threading
from collections import OrderedDict
from time import sleep
import numpy
from nnabla.config import nnabla_config
from nnabla.logger import logger
from nnabla.utils.communicator_util import current_communicator
from six.moves import queue
from .data_source import DataSource
from .data_source_loader import FileReader, load
class SimpleDataSource(DataSource):
'''SimpleDataSource
Get data from user defined function.
'''
def _get_data(self, position):
return self._load_func(self._order[position])
def reset(self):
self._indexes = self._rng.permutation(
self._size) if self._shuffle else numpy.arange(self._size)
super(SimpleDataSource, self).reset()
def __init__(self, load_func, num_examples, shuffle=False, rng=None):
super(SimpleDataSource, self).__init__(shuffle=shuffle, rng=rng)
super(SimpleDataSource, self).reset()
self._load_func = load_func
self._size = num_examples
self._variables = ['x' + str(x)
for x in range(len(self._load_func(0)))]
if shuffle:
self._order = list(
self._rng.permutation(list(range(self._size))))
else:
self._order = list(range(self._size))
class CachePrefetcher(object):
def __init__(self, cachedir, variables):
self._lock = threading.Lock()
self._q = queue.Queue()
self.file_name = None
self._cachedir = cachedir
self._variables = variables
self._filereader = FileReader(self._cachedir)
self._current_data = None
self._thread = threading.Thread(target=self._worker)
self._thread.setDaemon(True)
self._thread.start()
self._closed = False
atexit.register(self.close)
def read_cache(self, file_name, variables):
retry = 1
while True:
if retry > 10:
logger.log(99, 'read_cache() retry count over give up.')
logger.log(
99, 'Cache file {} not found. pid={}'.format(file_name, os.getpid()))
logger.log(99, 'Fatal Error! send SIGKILL to myself.')
os.kill(os.getpid(), 9)
result = {}
try:
with FileReader(file_name).open(textmode=False) as f:
for v in variables:
result[v] = numpy.load(f, allow_pickle=True)
if set(result.keys()) == set(variables):
break
else:
logger.log(
99, 'read_cache() fails retrying count {}/10.'.format(retry))
retry += 1
sleep(0.5)
except:
logger.log(
99, 'Cache file {} not found, retry count {}.'.format(file_name, retry))
retry += 1
sleep(0.5)
return result
def _worker(self):
while True:
sleep(0.001)
cache_file_name = self._q.get()
self._current_data = {}
if cache_file_name is None:
self._q.task_done()
break
self._current_data = self.read_cache(
cache_file_name, self._variables)
self._q.task_done()
def request(self, cache_file_name):
self.file_name = cache_file_name
self._q.put(cache_file_name)
def check_if_hit(self, fn):
self._lock.acquire()
if fn == self.file_name:
self.file_name = None
self._lock.release()
return True
self._lock.release()
return False
def read(self):
self._q.join()
result = self._current_data
self.file_name = None
self._current_data = None
return result
def close(self):
if not self._closed:
self._q.join()
self._q.put(None)
self._q.join()
self._closed = True
class CacheReaderWithPrefetch(object):
def __init__(self, cachedir, num_threads, variables):
self._variables = variables
self._cache_prefetchers = [CachePrefetcher(
cachedir, variables) for _ in range(num_threads)]
self._closed = False
atexit.register(self.close)
def open_and_prefetch_cache(self, file_name, file_names_to_prefetch):
cp_file_names = [cf.file_name for cf in self._cache_prefetchers]
# print('cp files', cp_file_names)
result = None
for cf in self._cache_prefetchers:
if cf.check_if_hit(file_name):
result = cf.read()
break
if not result:
# print("no hit", file_name)
result = cf.read_cache(file_name, self._variables)
cp_file_names = [cf.file_name for cf in self._cache_prefetchers]
for i, fn in enumerate(cp_file_names):
if fn and fn not in file_names_to_prefetch:
self._cache_prefetchers[i].read() # waste prefetched cache
# print("wasted", fn)
for fn in file_names_to_prefetch:
if fn not in cp_file_names:
try:
index = cp_file_names.index(None)
cp_file_names[index] = fn
self._cache_prefetchers[index].request(
cp_file_names[index])
except:
continue
return result
def close(self):
if not self._closed:
for cf in self._cache_prefetchers:
cf.close()
self._closed = True
class CacheDataSource(DataSource):
'''
Get data from file cache directly.
'''
def _get_next_data(self, filename, file_names_to_prefetch, retry=1):
if retry > 10:
logger.log(99, '_get_next_data() retry count over give up.')
raise
if self._cache_type == '.npy':
next_data = self._cache_reader_with_prefetch.open_and_prefetch_cache(
filename, file_names_to_prefetch)
else:
# h5 format
next_data = {}
with self._filereader.open_cache(filename) as cache:
for k, v in cache.items():
next_data[k] = v[()]
if current_communicator():
if set(self._variables) != set(next_data.keys()):
logger.log(99, '_get_next_data() fails at worker {} retrying count {}/10.'.format(
current_communicator().rank, retry))
sleep(0.01)
return self._get_next_data(filename, file_names_to_prefetch, retry+1)
return next_data
def _get_data(self, position):
self._position = position
if current_communicator():
try:
filename, index = self._order[position]
except IndexError:
logger.log(99, '_get_data() fails at worker {} retrying.'.format(
current_communicator().rank))
sleep(0.01)
return self._get_data(position)
else:
filename, index = self._order[position]
if filename != self._current_filename:
file_names_to_prefetch = None
if self._cache_type == ".npy" and self._num_of_threads > 0:
file_names_to_prefetch = [o[0] for o in self._order[position + self._max_length:position + self._max_length *
self._num_of_threads:self._max_length]]
self._current_data = self._get_next_data(
filename, file_names_to_prefetch)
self._current_filename = filename
data = [self._current_data[v][index] for v in self.variables]
if self._normalize:
new_data = []
for d in data:
if d.dtype == numpy.uint8:
d = d.astype(numpy.float32) * (1.0 / 255.0)
elif d.dtype == numpy.uint16:
d = d.astype(numpy.float32) * (1.0 / 65535.0)
new_data.append(d)
data = new_data
return data
def initialize_cache_files(self, filename):
length = -1
with self._filereader.open_cache(filename) as cache:
# Check variables.
if self._variables is None:
self._variables = list(cache.keys())
else:
if current_communicator():
if not set(self._variables) == set(cache.keys()):
logger.log(99, 'Error at worker {} {} {}'.format(
current_communicator().rank, set(self._variables), set(cache.keys())))
raise
for k, v in cache.items():
if length < 0:
length = len(v)
else:
assert(length == len(v))
self._cache_files.append((filename, length))
logger.info('{} {}'.format(filename, length))
if length > self._max_length:
self._max_length = length
def initialize_cache_files_with_index(self, index_filename):
self._filenames = []
self._cache_files = []
try:
with FileReader(index_filename).open(textmode=True) as f:
reader = csv.reader(f)
for row in reader:
file_name = os.path.join(self._cachedir, row[0])
self._filenames.append(file_name)
length = int(row[1])
self._cache_files.append((file_name, length))
if length > self._max_length:
self._max_length = length
if self._variables is None:
with self._filereader.open_cache(file_name) as cache:
# Check variables.
self._variables = list(cache.keys())
except:
self._filenames = [f for f in self._filereader.listdir() if os.path.splitext(f)[
1].lower() == ".h5"]
for filename in self._filenames:
self.initialize_cache_files(filename)
def initialize_cache_info(self, info_filename):
try:
with FileReader(info_filename).open(textmode=True) as f:
self._variables = []
reader = csv.reader(f)
for row in reader:
self._variables.append(row[0])
self._cache_type = '.npy'
except:
self._cache_type = '.h5'
def __init__(self, cachedir, shuffle=False, rng=None, normalize=False):
super(CacheDataSource, self).__init__(shuffle=shuffle, rng=rng)
self._current_data = {}
self._current_filename = None
self._cachedir = cachedir
self._normalize = normalize
self._filereader = FileReader(self._cachedir)
self._num_of_threads = int(nnabla_config.get(
'DATA_ITERATOR', 'cache_file_cache_num_of_threads'))
self._variables = None
self._generation = -1
self._cache_files = []
self._max_length = 1
info_filename = os.path.join(self._cachedir, "cache_info.csv")
self.initialize_cache_info(info_filename)
index_filename = os.path.join(self._cachedir, "cache_index.csv")
self.initialize_cache_files_with_index(index_filename)
logger.info('{}'.format(len(self._cache_files)))
self._cache_reader_with_prefetch = CacheReaderWithPrefetch(
self._cachedir, self._num_of_threads, self._variables)
self._thread_lock = threading.Lock()
self._original_order = []
for i in range(len(self._cache_files)):
filename, length = self._cache_files[i]
for j in range(length):
self._original_order.append((filename, j))
self.reset()
def close(self):
if hasattr(self, '_cache_reader_with_prefetch') and self._cache_reader_with_prefetch:
self._cache_reader_with_prefetch.close()
self._cache_reader_with_prefetch = None
def reset(self):
with self._thread_lock:
super(CacheDataSource, self).reset()
self._order = []
if self._shuffle:
for i in list(self._rng.permutation(list(range(len(self._cache_files))))):
filename, length = self._cache_files[i]
for j in list(self._rng.permutation(list(range(length)))):
self._order.append((filename, j))
else:
for i in range(len(self._cache_files)):
filename, length = self._cache_files[i]
for j in range(length):
self._order.append((filename, j))
self._current_data = {}
self._current_filename = None
self._size = len(self._order)
self._generation += 1
class CsvDataSource(DataSource):
'''
'''
def _remove_comment_cols(self, header, rows):
for col_index in reversed(range(len(header))):
if header[col_index][0] == '#':
del header[col_index]
for row in rows:
del row[col_index]
def _process_header(self, row):
self._variables_dict = OrderedDict()
self._columns = []
for column, column_value in enumerate(row):
# Analyze header "NAME[__INDEX][:LABELNAME]"
# TODO: use regex instead of split....
try:
variable_with_index, label = column_value.split(':', 1)
except:
label = None
variable_with_index = column_value
try:
variable, index = variable_with_index.split('__', 1)
except:
variable = variable_with_index
index = None
self._columns.append((variable, index, label))
if index is None:
self._variables_dict[variable] = {
'label': label, 'value': None}
else:
if variable not in self._variables_dict:
self._variables_dict[variable] = []
self._variables_dict[variable].append(
{'label': label, 'value': None})
def _process_row(self, row):
values = OrderedDict()
if len(row) == len(self._columns):
for column, column_value in enumerate(row):
variable, index, label = self._columns[column]
if index is None:
values[variable] = self._get_value(
column_value, is_vector=True)
else:
if variable not in values:
values[variable] = []
values[variable].append(self._get_value(column_value))
return values.values()
def _get_value(self, value, is_vector=False):
try:
if is_vector:
value = [float(value)]
else:
value = float(value)
return value
except ValueError:
pass
ext = (os.path.splitext(value)[1]).lower()
with self._filereader.open(value) as f:
value = load(ext)(f, normalize=self._normalize)
return value
def _get_data(self, position):
return tuple(self._process_row(self._rows[self._order[position]]))
def __init__(self, filename, shuffle=False, rng=None, normalize=False):
super(CsvDataSource, self).__init__(shuffle=shuffle, rng=rng)
self._filename = filename
self._normalize = normalize
# Store contents of CSV file into the self._rows list.
self._generation = -1
self._rows = []
self._filereader = FileReader(self._filename)
with self._filereader.open(textmode=True, encoding='utf-8-sig') as f:
csvreader = csv.reader(f)
header = next(csvreader)
self._rows = list(csvreader)
self._size = len(self._rows)
self._remove_comment_cols(header, self._rows)
self._process_header(header)
self._original_source_uri = self._filename
self._original_order = list(range(self._size))
self._order = list(range(self._size))
self._variables = tuple(self._variables_dict.keys())
self.reset()
def reset(self):
if self._shuffle:
logger.debug('Shuffle start.')
self._order = list(
self._rng.permutation(list(range(self._size))))
logger.debug('Shuffle end.')
self._generation += 1
super(CsvDataSource, self).reset()
class ConcatDataSource(DataSource):
'''ConcatDataSource
Wrapper DataSource for Multiple DataSources.
'''
def __init__(self, data_source_list, shuffle=True, rng=None):
super(ConcatDataSource, self).__init__(shuffle=shuffle, rng=rng)
self._data_sources = data_source_list
self._sw_points = list(map(
lambda s: sum(
[x.size for x in data_source_list[:data_source_list.index(s) + 1]]),
data_source_list)) # Switching DataSource index
self._size = self._sw_points[-1]
self._variables = data_source_list[0].variables
self.reset()
def _get_data(self, position):
idx = self._indexes[position]
for i, data_bound in enumerate(self._sw_points):
if idx < data_bound:
_idx = idx - self._sw_points[i - 1] if i > 0 else idx
return self._data_sources[i]._get_data(_idx)
return None
def reset(self):
# reset method initialize self._indexes
if self._shuffle:
self._indexes = self._rng.permutation(self._size)
else:
self._indexes = numpy.arange(self._size)
super(ConcatDataSource, self).reset()
| 36.187259
| 125
| 0.570285
|
179bdbdd3c783962e1ae280b27542f0d11deb985
| 86,944
|
py
|
Python
|
tests/track/loader_test.py
|
paulcoghlan/rally
|
76f82265814836565ccf53edda3d5426e4c2db8a
|
[
"Apache-2.0"
] | null | null | null |
tests/track/loader_test.py
|
paulcoghlan/rally
|
76f82265814836565ccf53edda3d5426e4c2db8a
|
[
"Apache-2.0"
] | null | null | null |
tests/track/loader_test.py
|
paulcoghlan/rally
|
76f82265814836565ccf53edda3d5426e4c2db8a
|
[
"Apache-2.0"
] | null | null | null |
import re
import unittest.mock as mock
from unittest import TestCase
import jinja2
from esrally import exceptions, config
from esrally.utils import io
from esrally.track import loader, track
def strip_ws(s):
return re.sub(r"\s", "", s)
class StaticClock:
NOW = 1453362707.0
@staticmethod
def now():
return StaticClock.NOW
@staticmethod
def stop_watch():
return None
class SimpleTrackRepositoryTests(TestCase):
@mock.patch("os.path.exists")
@mock.patch("os.path.isdir")
def test_track_from_directory(self, is_dir, path_exists):
is_dir.return_value = True
path_exists.return_value = True
repo = loader.SimpleTrackRepository("/path/to/track/unit-test")
self.assertEqual("unit-test", repo.track_name)
self.assertEqual(["unit-test"], repo.track_names)
self.assertEqual("/path/to/track/unit-test", repo.track_dir("unit-test"))
self.assertEqual("/path/to/track/unit-test/track.json", repo.track_file("unit-test"))
@mock.patch("os.path.exists")
@mock.patch("os.path.isdir")
@mock.patch("os.path.isfile")
def test_track_from_file(self, is_file, is_dir, path_exists):
is_file.return_value = True
is_dir.return_value = False
path_exists.return_value = True
repo = loader.SimpleTrackRepository("/path/to/track/unit-test/my-track.json")
self.assertEqual("my-track", repo.track_name)
self.assertEqual(["my-track"], repo.track_names)
self.assertEqual("/path/to/track/unit-test", repo.track_dir("my-track"))
self.assertEqual("/path/to/track/unit-test/my-track.json", repo.track_file("my-track"))
@mock.patch("os.path.exists")
@mock.patch("os.path.isdir")
@mock.patch("os.path.isfile")
def test_track_from_named_pipe(self, is_file, is_dir, path_exists):
is_file.return_value = False
is_dir.return_value = False
path_exists.return_value = True
with self.assertRaises(exceptions.SystemSetupError) as ctx:
loader.SimpleTrackRepository("a named pipe cannot point to a track")
self.assertEqual("a named pipe cannot point to a track is neither a file nor a directory", ctx.exception.args[0])
@mock.patch("os.path.exists")
def test_track_from_non_existing_path(self, path_exists):
path_exists.return_value = False
with self.assertRaises(exceptions.SystemSetupError) as ctx:
loader.SimpleTrackRepository("/path/does/not/exist")
self.assertEqual("Track path /path/does/not/exist does not exist", ctx.exception.args[0])
@mock.patch("os.path.isdir")
@mock.patch("os.path.exists")
def test_track_from_directory_without_track(self, path_exists, is_dir):
# directory exists, but not the file
path_exists.side_effect = [True, False]
is_dir.return_value = True
with self.assertRaises(exceptions.SystemSetupError) as ctx:
loader.SimpleTrackRepository("/path/to/not/a/track")
self.assertEqual("Could not find track.json in /path/to/not/a/track", ctx.exception.args[0])
@mock.patch("os.path.exists")
@mock.patch("os.path.isdir")
@mock.patch("os.path.isfile")
def test_track_from_file_but_not_json(self, is_file, is_dir, path_exists):
is_file.return_value = True
is_dir.return_value = False
path_exists.return_value = True
with self.assertRaises(exceptions.SystemSetupError) as ctx:
loader.SimpleTrackRepository("/path/to/track/unit-test/my-track.xml")
self.assertEqual("/path/to/track/unit-test/my-track.xml has to be a JSON file", ctx.exception.args[0])
class GitRepositoryTests(TestCase):
class MockGitRepo:
def __init__(self, remote_url, root_dir, repo_name, resource_name, offline, fetch=True):
self.repo_dir = "%s/%s" % (root_dir, repo_name)
@mock.patch("os.path.exists")
@mock.patch("os.walk")
def test_track_from_existing_repo(self, walk, exists):
walk.return_value = iter([(".", ["unittest", "unittest2", "unittest3"], [])])
exists.return_value = True
cfg = config.Config()
cfg.add(config.Scope.application, "track", "track.name", "unittest")
cfg.add(config.Scope.application, "track", "repository.name", "default")
cfg.add(config.Scope.application, "system", "offline.mode", False)
cfg.add(config.Scope.application, "node", "root.dir", "/tmp")
cfg.add(config.Scope.application, "benchmarks", "track.repository.dir", "tracks")
repo = loader.GitTrackRepository(cfg, fetch=False, update=False, repo_class=GitRepositoryTests.MockGitRepo)
self.assertEqual("unittest", repo.track_name)
self.assertEqual(["unittest", "unittest2", "unittest3"], list(repo.track_names))
self.assertEqual("/tmp/tracks/default/unittest", repo.track_dir("unittest"))
self.assertEqual("/tmp/tracks/default/unittest/track.json", repo.track_file("unittest"))
class TrackPreparationTests(TestCase):
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_does_nothing_if_document_file_available(self, is_file, get_size, prepare_file_offset_table):
is_file.return_value = True
get_size.return_value = 2000
prepare_file_offset_table.return_value = 5
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
prepare_file_offset_table.assert_called_with("/tmp/docs.json")
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_decompresses_if_archive_available(self, is_file, get_size, prepare_file_offset_table):
is_file.return_value = True
get_size.return_value = 2000
prepare_file_offset_table.return_value = 5
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
prepare_file_offset_table.assert_called_with("/tmp/docs.json")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_raise_error_on_wrong_uncompressed_file_size(self, is_file, get_size, decompress):
# uncompressed file does not exist
# compressed file exists
# after decompression, uncompressed file exists
is_file.side_effect = [False, True, True]
# compressed file size is 200
# uncompressed is corrupt, only 1 byte available
get_size.side_effect = [200, 1]
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
self.assertEqual("[/tmp/docs.json] is corrupt. Extracted [1] bytes but [2000] bytes are expected.", ctx.exception.args[0])
decompress.assert_called_with("/tmp/docs.json.bz2", "/tmp")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_raise_error_if_compressed_does_not_contain_expected_document_file(self, is_file, get_size, decompress):
# uncompressed file does not exist
# compressed file exists
# after decompression, uncompressed file does not exist (e.g. because the output file name is called differently)
is_file.side_effect = [False, True, False]
# compressed file size is 200
get_size.return_value = 200
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url="http://benchmarks.elasticsearch.org/corpora/unit-test",
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
self.assertEqual("Decompressing [/tmp/docs.json.bz2] did not create [/tmp/docs.json]. Please check with the track author if the "
"compressed archive has been created correctly.", ctx.exception.args[0])
decompress.assert_called_with("/tmp/docs.json.bz2", "/tmp")
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_download_document_archive_if_no_file_available(self, is_file, get_size, ensure_dir, download, decompress,
prepare_file_offset_table):
# uncompressed file does not exist
# compressed file does not exist
# file check for compressed file before download attempt (for potential error message)
# after download compressed file exists
# after download uncompressed file still does not exist (in main loop)
# after download compressed file exists (in main loop)
# after decompression, uncompressed file exists
is_file.side_effect = [False, False, False, True, False, True, True, True]
# compressed file size is 200 after download
# compressed file size is 200 after download (in main loop)
# uncompressed file size is 2000 after decompression
# uncompressed file size is 2000 after decompression (in main loop)
get_size.side_effect = [200, 200, 2000, 2000]
prepare_file_offset_table.return_value = 5
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url="http://benchmarks.elasticsearch.org/corpora/unit-test",
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
ensure_dir.assert_called_with("/tmp")
decompress.assert_called_with("/tmp/docs.json.bz2", "/tmp")
download.assert_called_with("http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json.bz2",
"/tmp/docs.json.bz2", 200, progress_indicator=mock.ANY)
prepare_file_offset_table.assert_called_with("/tmp/docs.json")
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_download_document_file_if_no_file_available(self, is_file, get_size, ensure_dir, download, prepare_file_offset_table):
# uncompressed file does not exist
# file check for uncompressed file before download attempt (for potential error message)
# after download uncompressed file exists
# after download uncompressed file exists (main loop)
is_file.side_effect = [False, False, True, True]
# uncompressed file size is 2000
get_size.return_value = 2000
prepare_file_offset_table.return_value = 5
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url="http://benchmarks.elasticsearch.org/corpora/unit-test",
document_file="docs.json",
# --> We don't provide a document archive here <--
document_archive=None,
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
ensure_dir.assert_called_with("/tmp")
download.assert_called_with("http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json",
"/tmp/docs.json", 2000, progress_indicator=mock.ANY)
prepare_file_offset_table.assert_called_with("/tmp/docs.json")
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.isfile")
def test_raise_download_error_if_offline(self, is_file, ensure_dir, download):
# uncompressed file does not exist
is_file.return_value = False
p = loader.DocumentSetPreparator(track_name="unit-test", offline=True, test_mode=False)
with self.assertRaises(exceptions.SystemSetupError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url="http://benchmarks.elasticsearch.org/corpora/unit-test",
document_file="docs.json",
number_of_documents=5,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
self.assertEqual("Cannot find /tmp/docs.json. Please disable offline mode and retry again.", ctx.exception.args[0])
self.assertEqual(0, ensure_dir.call_count)
self.assertEqual(0, download.call_count)
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.isfile")
def test_raise_download_error_if_no_url_provided_and_file_missing(self, is_file, ensure_dir, download):
# uncompressed file does not exist
is_file.return_value = False
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url=None,
document_file="docs.json",
document_archive=None,
number_of_documents=5,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
self.assertEqual("/tmp/docs.json is missing and it cannot be downloaded because no base URL is provided.",
ctx.exception.args[0])
self.assertEqual(0, ensure_dir.call_count)
self.assertEqual(0, download.call_count)
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_raise_download_error_if_no_url_provided_and_wrong_file_size(self, is_file, get_size, ensure_dir, download):
# uncompressed file exists...
is_file.return_value = True
# but it's size is wrong
get_size.return_value = 100
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
number_of_documents=5,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
self.assertEqual("/tmp/docs.json is present but does not have the expected size of 2000 bytes and it cannot be downloaded because "
"no base URL is provided.", ctx.exception.args[0])
self.assertEqual(0, ensure_dir.call_count)
self.assertEqual(0, download.call_count)
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.isfile")
def test_raise_download_error_no_test_mode_file(self, is_file, ensure_dir, download):
import urllib.error
# uncompressed file does not exist
is_file.return_value = False
download.side_effect = urllib.error.HTTPError("http://benchmarks.elasticsearch.org.s3.amazonaws.com/corpora/unit-test/docs-1k.json",
404, "", None, None)
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=True)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url="http://benchmarks.elasticsearch.org/corpora/unit-test",
document_file="docs-1k.json",
number_of_documents=5,
uncompressed_size_in_bytes=None),
data_root="/tmp")
self.assertEqual("Track [unit-test] does not support test mode. Please ask the track author to add it or disable test mode "
"and retry.", ctx.exception.args[0])
ensure_dir.assert_called_with("/tmp")
download.assert_called_with("http://benchmarks.elasticsearch.org/corpora/unit-test/docs-1k.json",
"/tmp/docs-1k.json", None, progress_indicator=mock.ANY)
@mock.patch("esrally.utils.net.download")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.isfile")
def test_raise_download_error_on_connection_problems(self, is_file, ensure_dir, download):
import urllib.error
# uncompressed file does not exist
is_file.return_value = False
download.side_effect = urllib.error.HTTPError("http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json",
500, "Internal Server Error", None, None)
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
base_url="http://benchmarks.elasticsearch.org/corpora/unit-test",
document_file="docs.json",
number_of_documents=5,
uncompressed_size_in_bytes=2000),
data_root="/tmp")
self.assertEqual("Could not download [http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json] "
"to [/tmp/docs.json] (HTTP status: 500, reason: Internal Server Error)", ctx.exception.args[0])
ensure_dir.assert_called_with("/tmp")
download.assert_called_with("http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json",
"/tmp/docs.json", 2000, progress_indicator=mock.ANY)
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_prepare_bundled_document_set_if_document_file_available(self, is_file, get_size, decompress, prepare_file_offset_table):
is_file.return_value = True
# check only uncompressed
get_size.side_effect = [2000]
prepare_file_offset_table.return_value = 5
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
self.assertTrue(p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="."))
prepare_file_offset_table.assert_called_with("./docs.json")
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_prepare_bundled_document_set_does_nothing_if_no_document_files(self, is_file, get_size, decompress, prepare_file_offset_table):
# no files present
is_file.return_value = False
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
self.assertFalse(p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="."))
self.assertEqual(0, decompress.call_count)
self.assertEqual(0, prepare_file_offset_table.call_count)
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_prepare_bundled_document_set_decompresses_compressed_docs(self, is_file, get_size, decompress, prepare_file_offset_table):
# uncompressed is missing
# decompressed is present
# check if uncompressed is present after decompression
# final loop iteration - uncompressed is present now
is_file.side_effect = [False, True, True, True]
# compressed
# uncompressed after decompression
# uncompressed in final loop iteration
get_size.side_effect = [200, 2000, 2000]
prepare_file_offset_table.return_value = 5
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
self.assertTrue(p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root="."))
prepare_file_offset_table.assert_called_with("./docs.json")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_prepare_bundled_document_set_error_compressed_docs_wrong_size(self, is_file, get_size):
# uncompressed is missing
# decompressed is present
is_file.side_effect = [False, True]
# compressed has wrong size
get_size.side_effect = [150]
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root=".")
self.assertEqual("./docs.json.bz2 is present but does not have the expected size of 200 bytes.", ctx.exception.args[0])
@mock.patch("esrally.utils.io.prepare_file_offset_table")
@mock.patch("esrally.utils.io.decompress")
@mock.patch("os.path.getsize")
@mock.patch("os.path.isfile")
def test_prepare_bundled_document_set_uncompressed_docs_wrong_size(self, is_file, get_size, decompress, prepare_file_offset_table):
# uncompressed is present
is_file.side_effect = [True]
# uncompressed
get_size.side_effect = [1500]
p = loader.DocumentSetPreparator(track_name="unit-test", offline=False, test_mode=False)
with self.assertRaises(exceptions.DataError) as ctx:
p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs.json",
document_archive="docs.json.bz2",
number_of_documents=5,
compressed_size_in_bytes=200,
uncompressed_size_in_bytes=2000),
data_root=".")
self.assertEqual("./docs.json is present but does not have the expected size of 2000 bytes.", ctx.exception.args[0])
self.assertEqual(0, prepare_file_offset_table.call_count)
class TemplateRenderTests(TestCase):
def test_render_simple_template(self):
template = """
{
"key": {{'01-01-2000' | days_ago(now)}},
"key2": "static value"
}
"""
rendered = loader.render_template(
loader=jinja2.DictLoader({"unittest": template}), template_name="unittest", clock=StaticClock)
expected = """
{
"key": 5864,
"key2": "static value"
}
"""
self.assertEqual(expected, rendered)
def test_render_template_with_external_variables(self):
template = """
{
"greeting": "{{greeting | default("Aloha")}}",
"name": "{{name | default("stranger")}}"
}
"""
rendered = loader.render_template(
loader=jinja2.DictLoader({"unittest": template}), template_name="unittest", template_vars={"greeting": "Hi"}, clock=StaticClock)
expected = """
{
"greeting": "Hi",
"name": "stranger"
}
"""
self.assertEqual(expected, rendered)
def test_render_template_with_globbing(self):
def key_globber(e):
if e == "dynamic-key-*":
return [
"dynamic-key-1",
"dynamic-key-2",
"dynamic-key-3",
]
else:
return []
template = """
{% import "rally.helpers" as rally %}
{
"key1": "static value",
{{ rally.collect(parts="dynamic-key-*") }}
}
"""
rendered = loader.render_template(
loader=jinja2.DictLoader(
{
"unittest": template,
"dynamic-key-1": '"dkey1": "value1"',
"dynamic-key-2": '"dkey2": "value2"',
"dynamic-key-3": '"dkey3": "value3"',
}),
template_name="unittest", glob_helper=key_globber, clock=StaticClock)
expected = """
{
"key1": "static value",
"dkey1": "value1",
"dkey2": "value2",
"dkey3": "value3"
}
"""
self.assertEqualIgnoreWhitespace(expected, rendered)
def test_render_template_with_variables(self):
def key_globber(e):
if e == "dynamic-key-*":
return ["dynamic-key-1", "dynamic-key-2"]
else:
return []
template = """
{% set _clients = clients if clients is defined else 16 %}
{% set _bulk_size = bulk_size if bulk_size is defined else 100 %}
{% import "rally.helpers" as rally with context %}
{
"key1": "static value",
{{ rally.collect(parts="dynamic-key-*") }}
}
"""
rendered = loader.render_template(
loader=jinja2.DictLoader(
{
"unittest": template,
"dynamic-key-1": '"dkey1": {{ _clients }}',
"dynamic-key-2": '"dkey2": {{ _bulk_size }}',
}),
template_name="unittest", template_vars={"clients": 8}, glob_helper=key_globber, clock=StaticClock)
expected = """
{
"key1": "static value",
"dkey1": 8,
"dkey2": 100
}
"""
self.assertEqualIgnoreWhitespace(expected, rendered)
def assertEqualIgnoreWhitespace(self, expected, actual):
self.assertEqual(strip_ws(expected), strip_ws(actual))
class TrackPostProcessingTests(TestCase):
def test_post_processes_track_spec(self):
track_specification = {
"indices": [
{
"name": "test-index",
"body": "test-index-body.json",
"types": ["test-type"]
}
],
"corpora": [
{
"name": "unittest",
"documents": [
{
"source-file": "documents.json.bz2",
"document-count": 10,
"compressed-bytes": 100,
"uncompressed-bytes": 10000
}
]
}
],
"operations": [
{
"name": "index-append",
"operation-type": "bulk",
"bulk-size": 5000
},
{
"name": "search",
"operation-type": "search"
}
],
"challenges": [
{
"name": "default-challenge",
"description": "Default challenge",
"schedule": [
{
"clients": 8,
"operation": "index-append",
"warmup-time-period": 100,
"time-period": 240,
},
{
"parallel": {
"tasks": [
{
"name": "search #1",
"clients": 4,
"operation": "search",
"warmup-iterations": 1000,
"iterations": 2000
},
{
"name": "search #2",
"clients": 1,
"operation": "search",
"warmup-iterations": 1000,
"iterations": 2000
},
{
"name": "search #3",
"clients": 1,
"operation": "search",
"iterations": 1
}
]
}
}
]
}
]
}
expected_post_processed = {
"indices": [
{
"name": "test-index",
"body": "test-index-body.json",
"types": ["test-type"]
}
],
"corpora": [
{
"name": "unittest",
"documents": [
{
"source-file": "documents-1k.json.bz2",
"document-count": 1000
}
]
}
],
"operations": [
{
"name": "index-append",
"operation-type": "bulk",
"bulk-size": 5000
},
{
"name": "search",
"operation-type": "search"
}
],
"challenges": [
{
"name": "default-challenge",
"description": "Default challenge",
"schedule": [
{
"clients": 8,
"operation": "index-append",
"warmup-time-period": 0,
"time-period": 10,
},
{
"parallel": {
"tasks": [
{
"name": "search #1",
"clients": 4,
"operation": "search",
"warmup-iterations": 4,
"iterations": 4
},
{
"name": "search #2",
"clients": 1,
"operation": "search",
"warmup-iterations": 1,
"iterations": 1
},
{
"name": "search #3",
"clients": 1,
"operation": "search",
"iterations": 1
}
]
}
}
]
}
]
}
self.assertEqual(self.as_track(expected_post_processed),
loader.post_process_for_test_mode(self.as_track(track_specification)))
def as_track(self, track_specification):
reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
"/mappings/test-index-body.json": ['{"settings": {}}']
}))
return reader("unittest", track_specification, "/mappings")
class TrackPathTests(TestCase):
@mock.patch("os.path.exists")
def test_sets_absolute_path(self, path_exists):
from esrally import config
from esrally.track import track
path_exists.return_value = True
cfg = config.Config()
cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/data")
default_challenge = track.Challenge("default", default=True, schedule=[
track.Task(name="index", operation=track.Operation("index", operation_type=track.OperationType.Bulk), clients=4)
])
another_challenge = track.Challenge("other", default=False)
t = track.Track(name="u", challenges=[another_challenge, default_challenge],
corpora=[
track.DocumentCorpus("unittest", documents=[
track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
document_file="docs/documents.json",
document_archive="docs/documents.json.bz2")
])
],
indices=[track.Index(name="test", types=["docs"])])
loader.set_absolute_data_path(cfg, t)
self.assertEqual("/data/unittest/docs/documents.json", t.corpora[0].documents[0].document_file)
self.assertEqual("/data/unittest/docs/documents.json.bz2", t.corpora[0].documents[0].document_archive)
class TrackFilterTests(TestCase):
def test_create_filters_from_empty_included_tasks(self):
self.assertEqual(0, len(loader.filters_from_included_tasks(None)))
self.assertEqual(0, len(loader.filters_from_included_tasks([])))
def test_create_filters_from_mixed_included_tasks(self):
filters = loader.filters_from_included_tasks(["force-merge", "type:search"])
self.assertListEqual([track.TaskNameFilter("force-merge"), track.TaskOpTypeFilter("search")], filters)
def test_rejects_invalid_syntax(self):
with self.assertRaises(exceptions.SystemSetupError) as ctx:
loader.filters_from_included_tasks(["valid", "a:b:c"])
self.assertEqual("Invalid format for included tasks: [a:b:c]", ctx.exception.args[0])
def test_rejects_unknown_filter_type(self):
with self.assertRaises(exceptions.SystemSetupError) as ctx:
loader.filters_from_included_tasks(["valid", "op-type:index"])
self.assertEqual("Invalid format for included tasks: [op-type:index]. Expected [type] but got [op-type].", ctx.exception.args[0])
def test_filters_tasks(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index", "auto-managed": False}],
"operations": [
{
"name": "bulk-index",
"operation-type": "bulk"
},
{
"name": "node-stats",
"operation-type": "node-stats"
},
{
"name": "cluster-stats",
"operation-type": "custom-operation-type"
},
{
"name": "match-all",
"operation-type": "search",
"body": {
"query": {
"match_all": {}
}
}
},
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"parallel": {
"tasks": [
{
"name": "index-1",
"operation": "bulk-index",
},
{
"name": "index-2",
"operation": "bulk-index",
},
{
"name": "index-3",
"operation": "bulk-index",
},
{
"name": "match-all-parallel",
"operation": "match-all",
},
]
}
},
{
"operation": "node-stats"
},
{
"name": "match-all-serial",
"operation": "match-all"
},
{
"operation": "cluster-stats"
}
]
}
]
}
reader = loader.TrackSpecificationReader()
full_track = reader("unittest", track_specification, "/mappings")
self.assertEqual(4, len(full_track.challenges[0].schedule))
filtered = loader.filter_included_tasks(full_track, [track.TaskNameFilter("index-3"),
track.TaskOpTypeFilter("search"),
# Filtering should also work for non-core operation types.
track.TaskOpTypeFilter("custom-operation-type")
])
schedule = filtered.challenges[0].schedule
self.assertEqual(3, len(schedule))
self.assertEqual(["index-3", "match-all-parallel"], [t.name for t in schedule[0].tasks])
self.assertEqual("match-all-serial", schedule[1].name)
self.assertEqual("cluster-stats", schedule[2].name)
class TrackSpecificationReaderTests(TestCase):
def test_description_is_optional(self):
track_specification = {
# no description here
"challenges": []
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
self.assertEqual("", resulting_track.description)
def test_can_read_track_info(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index", "types": ["test-type"]}],
"corpora": [],
"operations": [],
"challenges": []
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
self.assertEqual("description for unit test", resulting_track.description)
def test_document_count_mandatory_if_file_present(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index", "types": ["docs"]}],
"corpora": [
{
"name": "test",
"base-url": "https://localhost/data",
"documents": [{ "source-file": "documents-main.json.bz2"}
]
}
],
"challenges": []
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Mandatory element 'document-count' is missing.", ctx.exception.args[0])
def test_parse_with_mixed_warmup_iterations_and_measurement(self):
track_specification = {
"description": "description for unit test",
"indices": [
{
"name": "test-index",
"body": "index.json",
"types": [ "docs" ]
}
],
"corpora": [
{
"name": "test",
"documents": [
{
"source-file": "documents-main.json.bz2",
"document-count": 10,
"compressed-bytes": 100,
"uncompressed-bytes": 10000
}
]
}
],
"operations": [
{
"name": "index-append",
"operation-type": "bulk",
"bulk-size": 5000,
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"clients": 8,
"operation": "index-append",
"warmup-iterations": 3,
"time-period": 60
}
]
}
]
}
reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
"/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'],
}))
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' defines '3' warmup "
"iterations and a time period of '60' seconds. Please do not mix time periods and iterations.",
ctx.exception.args[0])
def test_parse_missing_challenge_or_challenges(self):
track_specification = {
"description": "description for unit test",
"indices": [
{
"name": "test-index",
"body": "index.json",
"types": [ "docs" ]
}
],
"corpora": [
{
"name": "test",
"documents": [
{
"source-file": "documents-main.json.bz2",
"document-count": 10,
"compressed-bytes": 100,
"uncompressed-bytes": 10000
}
]
}
],
# no challenge or challenges element
}
reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
"/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'],
}))
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. You must define either 'challenge' or 'challenges' but none is specified.",
ctx.exception.args[0])
def test_parse_challenge_and_challenges_are_defined(self):
track_specification = {
"description": "description for unit test",
"indices": [
{
"name": "test-index",
"body": "index.json",
"types": [ "docs" ]
}
],
"corpora": [
{
"name": "test",
"documents": [
{
"source-file": "documents-main.json.bz2",
"document-count": 10,
"compressed-bytes": 100,
"uncompressed-bytes": 10000
}
]
}
],
# We define both. Note that challenges without any properties would not pass JSON schema validation but we don't test this here.
"challenge": {},
"challenges": []
}
reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
"/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'],
}))
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. 'challenge' and 'challenges' are defined but only one of them is allowed.",
ctx.exception.args[0])
def test_parse_with_mixed_warmup_time_period_and_iterations(self):
track_specification = {
"description": "description for unit test",
"indices": [
{
"name": "test-index",
"body": "index.json",
"types": [ "docs" ]
}
],
"corpora": [
{
"name": "test",
"documents": [
{
"source-file": "documents-main.json.bz2",
"document-count": 10,
"compressed-bytes": 100,
"uncompressed-bytes": 10000
}
]
}
],
"operations": [
{
"name": "index-append",
"operation-type": "index",
"bulk-size": 5000,
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"clients": 8,
"operation": "index-append",
"warmup-time-period": 20,
"iterations": 1000
}
]
}
]
}
reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
"/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'],
}))
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' defines a warmup time "
"period of '20' seconds and '1000' iterations. Please do not mix time periods and iterations.",
ctx.exception.args[0])
def test_parse_duplicate_implicit_task_names(self):
track_specification = {
"description": "description for unit test",
"operations": [
{
"name": "search",
"operation-type": "search",
"index": "_all"
}
],
"challenge": {
"name": "default-challenge",
"schedule": [
{
"operation": "search",
"clients": 1
},
{
"operation": "search",
"clients": 2
}
]
}
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Challenge 'default-challenge' contains multiple tasks with the name 'search'. Please"
" use the task's name property to assign a unique name for each task.",
ctx.exception.args[0])
def test_parse_duplicate_explicit_task_names(self):
track_specification = {
"description": "description for unit test",
"operations": [
{
"name": "search",
"operation-type": "search",
"index": "_all"
}
],
"challenge": {
"name": "default-challenge",
"schedule": [
{
"name": "duplicate-task-name",
"operation": "search",
"clients": 1
},
{
"name": "duplicate-task-name",
"operation": "search",
"clients": 2
}
]
}
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Challenge 'default-challenge' contains multiple tasks with the name "
"'duplicate-task-name'. Please use the task's name property to assign a unique name for each task.",
ctx.exception.args[0])
def test_parse_unique_task_names(self):
track_specification = {
"description": "description for unit test",
"operations": [
{
"name": "search",
"operation-type": "search",
"index": "_all"
}
],
"challenge": {
"name": "default-challenge",
"schedule": [
{
"name": "search-one-client",
"operation": "search",
"clients": 1
},
{
"name": "search-two-clients",
"operation": "search",
"clients": 2
}
]
}
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
schedule = resulting_track.challenges[0].schedule
self.assertEqual(2, len(schedule))
self.assertEqual("search-one-client", schedule[0].name)
self.assertEqual("search", schedule[0].operation.name)
self.assertEqual("search-two-clients", schedule[1].name)
self.assertEqual("search", schedule[1].operation.name)
def test_parse_valid_track_specification(self):
track_specification = {
"description": "description for unit test",
"indices": [
{
"name": "index-historical",
"body": "body.json",
"types": ["main", "secondary"]
}
],
"corpora": [
{
"name": "test",
"base-url": "https://localhost/data",
"documents": [
{
"source-file": "documents-main.json.bz2",
"document-count": 10,
"compressed-bytes": 100,
"uncompressed-bytes": 10000,
"target-index": "index-historical",
"target-type": "main"
},
{
"source-file": "documents-secondary.json.bz2",
"includes-action-and-meta-data": True,
"document-count": 20,
"compressed-bytes": 200,
"uncompressed-bytes": 20000
}
]
}
],
"operations": [
{
"name": "index-append",
"operation-type": "index",
"bulk-size": 5000,
"meta": {
"append": True
}
},
{
"name": "search",
"operation-type": "search",
"index": "index-historical"
}
],
"challenges": [
{
"name": "default-challenge",
"description": "Default challenge",
"meta": {
"mixed": True,
"max-clients": 8
},
"schedule": [
{
"clients": 8,
"operation": "index-append",
"meta": {
"operation-index": 0
}
},
{
"clients": 1,
"operation": "search"
}
]
}
]
}
reader = loader.TrackSpecificationReader(
track_params={"number_of_shards": 3},
source=io.DictStringFileSourceFactory({
"/mappings/body.json": ["""
{
"settings": {
"number_of_shards": {{ number_of_shards }}
},
"mappings": {
"main": "empty-for-test",
"secondary": "empty-for-test"
}
}
"""]
}))
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
self.assertEqual("description for unit test", resulting_track.description)
# indices
self.assertEqual(1, len(resulting_track.indices))
self.assertEqual("index-historical", resulting_track.indices[0].name)
self.assertDictEqual({
"settings": {
"number_of_shards": 3
},
"mappings":
{
"main": "empty-for-test",
"secondary": "empty-for-test"
}
}, resulting_track.indices[0].body)
self.assertEqual(2, len(resulting_track.indices[0].types))
self.assertEqual("main", resulting_track.indices[0].types[0])
self.assertEqual("secondary", resulting_track.indices[0].types[1])
# corpora
self.assertEqual(1, len(resulting_track.corpora))
self.assertEqual("test", resulting_track.corpora[0].name)
self.assertEqual(2, len(resulting_track.corpora[0].documents))
docs_primary = resulting_track.corpora[0].documents[0]
self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_primary.source_format)
self.assertEqual("documents-main.json", docs_primary.document_file)
self.assertEqual("documents-main.json.bz2", docs_primary.document_archive)
self.assertEqual("https://localhost/data", docs_primary.base_url)
self.assertFalse(docs_primary.includes_action_and_meta_data)
self.assertEqual(10, docs_primary.number_of_documents)
self.assertEqual(100, docs_primary.compressed_size_in_bytes)
self.assertEqual(10000, docs_primary.uncompressed_size_in_bytes)
self.assertEqual("index-historical", docs_primary.target_index)
self.assertEqual("main", docs_primary.target_type)
docs_secondary = resulting_track.corpora[0].documents[1]
self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_secondary.source_format)
self.assertEqual("documents-secondary.json", docs_secondary.document_file)
self.assertEqual("documents-secondary.json.bz2", docs_secondary.document_archive)
self.assertEqual("https://localhost/data", docs_secondary.base_url)
self.assertTrue(docs_secondary.includes_action_and_meta_data)
self.assertEqual(20, docs_secondary.number_of_documents)
self.assertEqual(200, docs_secondary.compressed_size_in_bytes)
self.assertEqual(20000, docs_secondary.uncompressed_size_in_bytes)
# This is defined by the action-and-meta-data line!
self.assertIsNone(docs_secondary.target_index)
self.assertIsNone(docs_secondary.target_type)
# challenges
self.assertEqual(1, len(resulting_track.challenges))
self.assertEqual("default-challenge", resulting_track.challenges[0].name)
self.assertEqual("Default challenge", resulting_track.challenges[0].description)
self.assertEqual({"mixed": True, "max-clients": 8}, resulting_track.challenges[0].meta_data)
self.assertEqual({"append": True}, resulting_track.challenges[0].schedule[0].operation.meta_data)
self.assertEqual({"operation-index": 0}, resulting_track.challenges[0].schedule[0].meta_data)
def test_parse_valid_track_specification_with_index_template(self):
track_specification = {
"description": "description for unit test",
"templates": [
{
"name": "my-index-template",
"index-pattern": "*",
"template": "default-template.json"
}
],
"operations": [],
"challenges": []
}
reader = loader.TrackSpecificationReader(
track_params={"index_pattern": "*"},
source=io.DictStringFileSourceFactory({
"/mappings/default-template.json": ["""
{
"index_patterns": [ "{{index_pattern}}"],
"settings": {
"number_of_shards": {{ number_of_shards | default(1) }}
}
}
"""],
}))
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
self.assertEqual("description for unit test", resulting_track.description)
self.assertEqual(0, len(resulting_track.indices))
self.assertEqual(1, len(resulting_track.templates))
self.assertEqual("my-index-template", resulting_track.templates[0].name)
self.assertEqual("*", resulting_track.templates[0].pattern)
self.assertDictEqual(
{
"index_patterns": ["*"],
"settings": {
"number_of_shards": 1
}
}, resulting_track.templates[0].content)
self.assertEqual(0, len(resulting_track.challenges))
def test_unique_challenge_names(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "test-challenge",
"description": "Some challenge",
"default": True,
"schedule": [
{
"operation": "index-append"
}
]
},
{
"name": "test-challenge",
"description": "Another challenge with the same name",
"schedule": [
{
"operation": "index-append"
}
]
}
]
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Duplicate challenge with name 'test-challenge'.", ctx.exception.args[0])
def test_not_more_than_one_default_challenge_possible(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "default-challenge",
"description": "Default challenge",
"default": True,
"schedule": [
{
"operation": "index-append"
}
]
},
{
"name": "another-challenge",
"description": "See if we can sneek it in as another default",
"default": True,
"schedule": [
{
"operation": "index-append"
}
]
}
]
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. Both 'default-challenge' and 'another-challenge' are defined as default challenges. "
"Please define only one of them as default.", ctx.exception.args[0])
def test_at_least_one_default_challenge(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "challenge",
"schedule": [
{
"operation": "index-append"
}
]
},
{
"name": "another-challenge",
"schedule": [
{
"operation": "index-append"
}
]
}
]
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. No default challenge specified. Please edit the track and add \"default\": true "
"to one of the challenges challenge, another-challenge.", ctx.exception.args[0])
def test_exactly_one_default_challenge(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "challenge",
"default": True,
"schedule": [
{
"operation": "index-append"
}
]
},
{
"name": "another-challenge",
"schedule": [
{
"operation": "index-append"
}
]
}
]
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual(2, len(resulting_track.challenges))
self.assertEqual("challenge", resulting_track.challenges[0].name)
self.assertTrue(resulting_track.challenges[0].default)
self.assertFalse(resulting_track.challenges[1].default)
def test_selects_sole_challenge_implicitly_as_default(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenge": {
"name": "challenge",
"schedule": [
{
"operation": "index-append"
}
]
}
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual(1, len(resulting_track.challenges))
self.assertEqual("challenge", resulting_track.challenges[0].name)
self.assertTrue(resulting_track.challenges[0].default)
def test_inline_operations(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"challenge": {
"name": "challenge",
"schedule": [
# an operation with parameters still needs to define a type
{
"operation": {
"operation-type": "bulk",
"bulk-size": 5000
}
},
# a parameterless operation can just use the operation type as implicit reference to the operation
{
"operation": "force-merge"
}
]
}
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual(2, len(resulting_track.challenges[0].schedule))
self.assertEqual(track.OperationType.Bulk.name, resulting_track.challenges[0].schedule[0].operation.type)
self.assertEqual(track.OperationType.ForceMerge.name, resulting_track.challenges[0].schedule[1].operation.type)
def test_supports_target_throughput(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenge": {
"name": "default-challenge",
"schedule": [
{
"operation": "index-append",
"target-throughput": 10,
}
]
}
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual(10, resulting_track.challenges[0].schedule[0].params["target-throughput"])
def test_supports_target_interval(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-append",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"operation": "index-append",
"target-interval": 5,
}
]
}
]
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual(5, resulting_track.challenges[0].schedule[0].params["target-interval"])
def test_parallel_tasks_with_default_values(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-1",
"operation-type": "bulk"
},
{
"name": "index-2",
"operation-type": "bulk"
},
{
"name": "index-3",
"operation-type": "bulk"
},
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"parallel": {
"warmup-time-period": 2400,
"time-period": 36000,
"tasks": [
{
"operation": "index-1",
"warmup-time-period": 300,
"clients": 2
},
{
"operation": "index-2",
"time-period": 3600,
"clients": 4
},
{
"operation": "index-3",
"target-throughput": 10,
"clients": 16
},
]
}
}
]
}
]
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
parallel_element = resulting_track.challenges[0].schedule[0]
parallel_tasks = parallel_element.tasks
self.assertEqual(22, parallel_element.clients)
self.assertEqual(3, len(parallel_tasks))
self.assertEqual("index-1", parallel_tasks[0].operation.name)
self.assertEqual(300, parallel_tasks[0].warmup_time_period)
self.assertEqual(36000, parallel_tasks[0].time_period)
self.assertEqual(2, parallel_tasks[0].clients)
self.assertFalse("target-throughput" in parallel_tasks[0].params)
self.assertEqual("index-2", parallel_tasks[1].operation.name)
self.assertEqual(2400, parallel_tasks[1].warmup_time_period)
self.assertEqual(3600, parallel_tasks[1].time_period)
self.assertEqual(4, parallel_tasks[1].clients)
self.assertFalse("target-throughput" in parallel_tasks[1].params)
self.assertEqual("index-3", parallel_tasks[2].operation.name)
self.assertEqual(2400, parallel_tasks[2].warmup_time_period)
self.assertEqual(36000, parallel_tasks[2].time_period)
self.assertEqual(16, parallel_tasks[2].clients)
self.assertEqual(10, parallel_tasks[2].params["target-throughput"])
def test_parallel_tasks_with_default_clients_does_not_propagate(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-1",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"parallel": {
"warmup-time-period": 2400,
"time-period": 36000,
"clients": 2,
"tasks": [
{
"name": "index-1-1",
"operation": "index-1"
},
{
"name": "index-1-2",
"operation": "index-1"
},
{
"name": "index-1-3",
"operation": "index-1"
},
{
"name": "index-1-4",
"operation": "index-1"
}
]
}
}
]
}
]
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
parallel_element = resulting_track.challenges[0].schedule[0]
parallel_tasks = parallel_element.tasks
# we will only have two clients *in total*
self.assertEqual(2, parallel_element.clients)
self.assertEqual(4, len(parallel_tasks))
for task in parallel_tasks:
self.assertEqual(1, task.clients)
def test_parallel_tasks_with_completed_by_set(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-1",
"operation-type": "bulk"
},
{
"name": "index-2",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"parallel": {
"warmup-time-period": 2400,
"time-period": 36000,
"completed-by": "index-2",
"tasks": [
{
"operation": "index-1"
},
{
"operation": "index-2"
}
]
}
}
]
}
]
}
reader = loader.TrackSpecificationReader()
resulting_track = reader("unittest", track_specification, "/mappings")
parallel_element = resulting_track.challenges[0].schedule[0]
parallel_tasks = parallel_element.tasks
# we will only have two clients *in total*
self.assertEqual(2, parallel_element.clients)
self.assertEqual(2, len(parallel_tasks))
self.assertEqual("index-1", parallel_tasks[0].operation.name)
self.assertFalse(parallel_tasks[0].completes_parent)
self.assertEqual("index-2", parallel_tasks[1].operation.name)
self.assertTrue(parallel_tasks[1].completes_parent)
def test_parallel_tasks_with_completed_by_set_no_task_matches(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-1",
"operation-type": "bulk"
},
{
"name": "index-2",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"parallel": {
"completed-by": "non-existing-task",
"tasks": [
{
"operation": "index-1"
},
{
"operation": "index-2"
}
]
}
}
]
}
]
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. 'parallel' element for challenge 'default-challenge' is marked with 'completed-by' "
"with task name 'non-existing-task' but no task with this name exists.", ctx.exception.args[0])
def test_parallel_tasks_with_completed_by_set_multiple_tasks_match(self):
track_specification = {
"description": "description for unit test",
"indices": [{"name": "test-index"}],
"operations": [
{
"name": "index-1",
"operation-type": "bulk"
}
],
"challenges": [
{
"name": "default-challenge",
"schedule": [
{
"parallel": {
"completed-by": "index-1",
"tasks": [
{
"operation": "index-1"
},
{
"operation": "index-1"
}
]
}
}
]
}
]
}
reader = loader.TrackSpecificationReader()
with self.assertRaises(loader.TrackSyntaxError) as ctx:
reader("unittest", track_specification, "/mappings")
self.assertEqual("Track 'unittest' is invalid. 'parallel' element for challenge 'default-challenge' contains multiple tasks with "
"the name 'index-1' which are marked with 'completed-by' but only task is allowed to match.",
ctx.exception.args[0])
| 44.111618
| 140
| 0.475156
|
07c3abb0fb29f5f9320d9cc3ce910578fbe705f1
| 3,568
|
py
|
Python
|
bettertexts/util/populate.py
|
citizenline/citizenline
|
5c8317fe7e18a485bb8c572cc3c55707d0303525
|
[
"MIT"
] | null | null | null |
bettertexts/util/populate.py
|
citizenline/citizenline
|
5c8317fe7e18a485bb8c572cc3c55707d0303525
|
[
"MIT"
] | 33
|
2017-02-14T15:45:16.000Z
|
2022-03-11T23:22:29.000Z
|
bettertexts/util/populate.py
|
citizenline/citizenline
|
5c8317fe7e18a485bb8c572cc3c55707d0303525
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from bettertexts.models import Question
from bettertexts.models import Text
from bettertexts.models import Type
class Init:
@staticmethod
def new():
# base_domain = "citizenline.local:8000"
base_domain = "citizenline.nl"
s0 = Site.objects.get_or_create(pk=1, defaults={"domain": "example.com"})[0]
s0.domain = "www." + base_domain
s0.save()
s1 = Site.objects.get_or_create(
pk=2, defaults={"domain": "denhaag." + base_domain}
)[0]
s1.save()
brief = Type.objects.get_or_create(
site=s1,
name="Brief",
defaults={
"header": "Te beoordelen tekst",
"rating_header": "Geef uw waardering",
"comment_header": "Geef een reactie",
},
)[0]
brief.save()
q1 = Question.objects.get_or_create(
type=brief, position=0, defaults={"question": "Is de brief duidelijk?"}
)[0]
q1.save()
q2 = Question.objects.get_or_create(
type=brief, position=1, defaults={"question": "Is de brief leesbaar?"}
)[0]
q2.save()
text = Text()
text.site = s1
text.type = brief
text.title = "Brief 1: Koolmonoxidevergiftiging"
text.body = """<p><strong>Beste <naam>,</strong></p>
<p>De brandweer heeft uw adres aan ons doorgegeven omdat er bij u thuis een zeer ernstige situatie was geweest met koolmonoxide uit uw geiser/kachel/cv. U bent vast erg geschrokken. Koolmonoxide is gevaarlijk voor de gezondheid. GGD Haaglanden bewaakt en beschermt de gezondheid, daarom nemen wij contact met u op.
</p>
<p><strong>Belangrijk:</strong>
</p>
<p>• Schakel de geiser niet meer in totdat hij gerepareerd is.<br />
• Maak een afspraak met een erkend bedrijf om de geiser te repareren of te vervangen.<br />
• Bel daarna de lokale Pandenbrigade (1234567).<br />
</p>
<p>Zij controleren of er geen koolmonoxide meer vrij komt.
</p>
<p><strong>Het gevaar van koolmonoxide</strong>
</p>
<p>De GGD geeft informatie over het gevaar van koolmonoxide. Wij kunnen uw vragen beantwoorden, uitleggen hoe koolmonoxide ontstaat en wanneer het tot klachten kan leiden. Daarnaast geven wij u tips om de kans op koolmonoxide(ongelukken) in de toekomst te verkleinen. Leest u daarom ook de folder die bij deze brief zit. Heeft u na het lezen van de folder nog vragen, dan kunt u mij bereiken op het telefoonnummer (012) 345 67 89 of e-mailen naar gezondheidenmilieu@example.nl.
</p>
<p>Koolmonoxide is een dodelijk gas. Als u nu nog gezondheidsklachten heeft neemt u dan contact op met uw huisarts. Vertel hem dat u koolmonoxide heeft ingeademd.
</p>
<p><strong>Enquêteformulier</strong>
</p>
<p>Bij deze brief zit ook een enquêteformulier. De GGD in uw regio wil graag weten hoeveel mensen gezondheids- klachten hebben door koolmonoxide(ongelukken). Daar is nog geen goed beeld van. Zou u de enquête willen invullen en terugsturen?
</p>
<p>Heeft u liever persoonlijk of telefonisch contact om de enquête in te vullen bel dan met (012) 345 67 89.
</p>
<p>
</p>
<p>Met vriendelijke groet,
</p>
<p>Naam medewerker
</p>
"""
text.version = 0
text.pub_date = "2016-02-01"
text.save()
| 42.987952
| 485
| 0.627242
|
f6899f89a0069492e875df76c4e5f75130a66928
| 6,717
|
py
|
Python
|
mutatePy/mutatePy/commandline.py
|
t94126/mutatePy
|
cfd649f95a00afb26103a8a1409cda04deeeadac
|
[
"Apache-2.0"
] | null | null | null |
mutatePy/mutatePy/commandline.py
|
t94126/mutatePy
|
cfd649f95a00afb26103a8a1409cda04deeeadac
|
[
"Apache-2.0"
] | null | null | null |
mutatePy/mutatePy/commandline.py
|
t94126/mutatePy
|
cfd649f95a00afb26103a8a1409cda04deeeadac
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from mutpy import controller, views, operators, utils
from mutpy import __version__ as version
def main(argv):
parser = build_parser()
run_mutpy(parser)
def build_parser():
DEF_TIMEOUT_FACTOR = 5
parser = argparse.ArgumentParser(description='Mutation testing tool for Python 3.x source code. ',
fromfile_prefix_chars='@')
parser.add_argument('--version', '-v', action='version', version='%(prog)s {}'.format(version))
parser.add_argument('--target', '-t', type=str, nargs='+', help='target module or package to mutate')
parser.add_argument('--unit-test', '-u', type=str, nargs='+',
help='test class, test method, module or package with unit tests')
parser.add_argument('--report', '-r', type=str, help='generate YAML report', metavar='REPORT_FILE')
parser.add_argument('--report-html', type=str, help='generate HTML report', metavar='DIR_NAME')
parser.add_argument('--timeout-factor', '-f', type=float, default=DEF_TIMEOUT_FACTOR,
help='max timeout factor (default {})'.format(DEF_TIMEOUT_FACTOR))
parser.add_argument('--show-mutants', '-m', action='store_true', help='show mutants source code')
parser.add_argument('--quiet', '-q', action='store_true', help='quiet mode')
parser.add_argument('--debug', action='store_true', help='dubug mode')
parser.add_argument('--colored-output', '-c', action='store_true', help='try print colored output')
parser.add_argument('--disable-stdout', '-d', action='store_true',
help='try disable stdout during mutation '
'(this option can damage your tests if you interact with sys.stdout)')
parser.add_argument('--experimental-operators', '-e', action='store_true', help='use experimental operators')
parser.add_argument('--operator', '-o', type=str, nargs='+',
help='use only selected operators', metavar='OPERATOR')
parser.add_argument('--disable-operator', type=str, nargs='+', default=[],
help='disable selected operators', metavar='OPERATOR')
parser.add_argument('--list-operators', '-l', action='store_true', help='list available operators')
parser.add_argument('--path', '-p', type=str, metavar='DIR', help='extend Python path')
parser.add_argument('--percentage', type=int, metavar='PERCENTAGE', default=100,
help='percentage of the generated mutants (mutation sampling)')
parser.add_argument('--coverage', action='store_true',
help='mutate only covered code')
parser.add_argument('--order', type=int, metavar='ORDER', default=1, help='mutation order')
parser.add_argument('--hom-strategy', type=str, metavar='HOM_STRATEGY', help='HOM strategy',
default='FIRST_TO_LAST')
parser.add_argument('--list-hom-strategies', action='store_true', help='list available HOM strategies')
parser.add_argument('--mutation-number', type=int, metavar='MUTATION_NUMBER',
help='run only one mutation (debug purpose)')
return parser
def run_mutpy(parser):
cfg = parser.parse_args()
if cfg.list_operators:
list_operators()
elif cfg.list_hom_strategies:
list_hom_strategies()
elif cfg.target:
mutation_controller = build_controller(cfg)
mutation_controller.run()
else:
parser.print_usage()
def build_controller(cfg):
built_views = build_views(cfg)
mutant_generator = build_mutator(cfg)
target_loader = utils.ModulesLoader(cfg.target, cfg.path)
return controller.MutationController(
target_loader=target_loader,
views=built_views,
mutant_generator=mutant_generator,
timeout_factor=cfg.timeout_factor,
disable_stdout=cfg.disable_stdout,
mutate_covered=cfg.coverage,
mutation_number=cfg.mutation_number,
)
def build_mutator(cfg):
operators_set = set()
if cfg.experimental_operators:
operators_set |= operators.experimental_operators
name_to_operator = build_name_to_operator_map()
if cfg.operator:
operators_set |= {get_operator(name, name_to_operator)
for name in cfg.operator}
else:
operators_set |= operators.standard_operators
operators_set -= {get_operator(name, name_to_operator)
for name in cfg.disable_operator}
if cfg.order == 1:
return controller.FirstOrderMutator(operators_set, cfg.percentage)
else:
hom_strategy = build_hom_strategy(cfg)
return controller.HighOrderMutator(operators_set, cfg.percentage, hom_strategy=hom_strategy)
def build_hom_strategy(cfg):
if cfg.order < 1:
print('Order should be > 0.')
sys.exit(-1)
try:
name_to_hom_strategy = {hom_strategy.name: hom_strategy for hom_strategy in controller.hom_strategies}
return name_to_hom_strategy[cfg.hom_strategy](order=cfg.order)
except KeyError:
print('Unsupported HOM strategy {}! Use --list-hom-strategies to show strategies.'.format(cfg.hom_strategy))
sys.exit(-1)
def get_operator(name, name_to_operator):
try:
return name_to_operator[name]
except KeyError:
print('Unsupported operator {}! Use -l to show all operators.'.format(name))
sys.exit(-1)
def build_name_to_operator_map():
result = {}
for operator in operators.standard_operators | operators.experimental_operators:
result[operator.name()] = operator
result[operator.long_name()] = operator
return result
def build_views(cfg):
views_list = []
if cfg.quiet:
views_list.append(views.QuietTextView(cfg.colored_output))
else:
views_list.append(views.TextView(cfg.colored_output, cfg.show_mutants))
if cfg.report:
views_list.append(views.YAMLReportView(cfg.report))
if cfg.report_html:
views_list.append(views.HTMLReportView(cfg.report_html))
if cfg.debug:
views_list.append(views.DebugView())
return views_list
def list_operators():
print('Standard mutation operators:')
for operator in utils.sort_operators(operators.standard_operators):
print(' - {:3} - {}'.format(operator.name(), operator.long_name()))
print('Experimental mutation operators:')
for operator in utils.sort_operators(operators.experimental_operators):
print(' - {:3} - {}'.format(operator.name(), operator.long_name()))
def list_hom_strategies():
print('HOM strategies:')
for strategy in controller.hom_strategies:
print(' - {}'.format(strategy.name))
| 40.957317
| 116
| 0.671431
|
bf98dc15565f71d65124dbccb68ebdc0b484deea
| 8,854
|
py
|
Python
|
lib/ecdsa/test_numbertheory.py
|
GaryFlynn/vro-jwt-python
|
2aa6213212ba37e39abe022c76350d08b1a8fb65
|
[
"MIT"
] | 3
|
2020-08-04T20:29:41.000Z
|
2020-11-09T09:28:19.000Z
|
lib/ecdsa/test_numbertheory.py
|
GaryFlynn/vro-jwt-python
|
2aa6213212ba37e39abe022c76350d08b1a8fb65
|
[
"MIT"
] | 27
|
2019-11-14T00:59:44.000Z
|
2019-12-08T22:52:26.000Z
|
lib/ecdsa/test_numbertheory.py
|
GaryFlynn/vro-jwt-python
|
2aa6213212ba37e39abe022c76350d08b1a8fb65
|
[
"MIT"
] | 2
|
2021-03-16T12:41:29.000Z
|
2021-03-16T14:50:08.000Z
|
import operator
from six import print_
from functools import reduce
import operator
try:
import unittest2 as unittest
except ImportError:
import unittest
import hypothesis.strategies as st
import pytest
from hypothesis import given, settings, example
try:
from hypothesis import HealthCheck
HC_PRESENT=True
except ImportError:
HC_PRESENT=False
from .numbertheory import (SquareRootError, factorization, gcd, lcm,
jacobi, inverse_mod,
is_prime, next_prime, smallprimes,
square_root_mod_prime)
BIGPRIMES = (999671,
999683,
999721,
999727,
999749,
999763,
999769,
999773,
999809,
999853,
999863,
999883,
999907,
999917,
999931,
999953,
999959,
999961,
999979,
999983)
@pytest.mark.parametrize(
"prime, next_p",
[(p, q) for p, q in zip(BIGPRIMES[:-1], BIGPRIMES[1:])])
def test_next_prime(prime, next_p):
assert next_prime(prime) == next_p
@pytest.mark.parametrize(
"val",
[-1, 0, 1])
def test_next_prime_with_nums_less_2(val):
assert next_prime(val) == 2
@pytest.mark.parametrize("prime", smallprimes)
def test_square_root_mod_prime_for_small_primes(prime):
squares = set()
for num in range(0, 1 + prime // 2):
sq = num * num % prime
squares.add(sq)
root = square_root_mod_prime(sq, prime)
# tested for real with TestNumbertheory.test_square_root_mod_prime
assert root * root % prime == sq
for nonsquare in range(0, prime):
if nonsquare in squares:
continue
with pytest.raises(SquareRootError):
square_root_mod_prime(nonsquare, prime)
@st.composite
def st_two_nums_rel_prime(draw):
# 521-bit is the biggest curve we operate on, use 1024 for a bit
# of breathing space
mod = draw(st.integers(min_value=2, max_value=2**1024))
num = draw(st.integers(min_value=1, max_value=mod-1)
.filter(lambda x: gcd(x, mod) == 1))
return num, mod
@st.composite
def st_primes(draw, *args, **kwargs):
if "min_value" not in kwargs:
kwargs["min_value"] = 1
prime = draw(st.sampled_from(smallprimes) |
st.integers(*args, **kwargs)
.filter(is_prime))
return prime
@st.composite
def st_num_square_prime(draw):
prime = draw(st_primes(max_value=2**1024))
num = draw(st.integers(min_value=0, max_value=1 + prime // 2))
sq = num * num % prime
return sq, prime
@st.composite
def st_comp_with_com_fac(draw):
"""
Strategy that returns lists of numbers, all having a common factor.
"""
primes = draw(st.lists(st_primes(max_value=2**512), min_size=1,
max_size=10))
# select random prime(s) that will make the common factor of composites
com_fac_primes = draw(st.lists(st.sampled_from(primes),
min_size=1, max_size=20))
com_fac = reduce(operator.mul, com_fac_primes, 1)
# select at most 20 lists (returned numbers),
# each having at most 30 primes (factors) including none (then the number
# will be 1)
comp_primes = draw(
st.integers(min_value=1, max_value=20).
flatmap(lambda n: st.lists(st.lists(st.sampled_from(primes),
max_size=30),
min_size=1, max_size=n)))
return [reduce(operator.mul, nums, 1) * com_fac for nums in comp_primes]
@st.composite
def st_comp_no_com_fac(draw):
"""
Strategy that returns lists of numbers that don't have a common factor.
"""
primes = draw(st.lists(st_primes(max_value=2**512),
min_size=2, max_size=10, unique=True))
# first select the primes that will create the uncommon factor
# between returned numbers
uncom_fac_primes = draw(st.lists(
st.sampled_from(primes),
min_size=1, max_size=len(primes)-1, unique=True))
uncom_fac = reduce(operator.mul, uncom_fac_primes, 1)
# then build composites from leftover primes
leftover_primes = [i for i in primes if i not in uncom_fac_primes]
assert leftover_primes
assert uncom_fac_primes
# select at most 20 lists, each having at most 30 primes
# selected from the leftover_primes list
number_primes = draw(
st.integers(min_value=1, max_value=20).
flatmap(lambda n: st.lists(st.lists(st.sampled_from(leftover_primes),
max_size=30),
min_size=1, max_size=n)))
numbers = [reduce(operator.mul, nums, 1) for nums in number_primes]
insert_at = draw(st.integers(min_value=0, max_value=len(numbers)))
numbers.insert(insert_at, uncom_fac)
return numbers
HYP_SETTINGS = {}
if HC_PRESENT:
HYP_SETTINGS['suppress_health_check']=[HealthCheck.filter_too_much,
HealthCheck.too_slow]
# the factorization() sometimes takes a long time to finish
HYP_SETTINGS['deadline'] = 5000
class TestNumbertheory(unittest.TestCase):
def test_gcd(self):
assert gcd(3 * 5 * 7, 3 * 5 * 11, 3 * 5 * 13) == 3 * 5
assert gcd([3 * 5 * 7, 3 * 5 * 11, 3 * 5 * 13]) == 3 * 5
assert gcd(3) == 3
@unittest.skipUnless(HC_PRESENT,
"Hypothesis 2.0.0 can't be made tolerant of hard to "
"meet requirements (like `is_prime()`), the test "
"case times-out on it")
@settings(**HYP_SETTINGS)
@given(st_comp_with_com_fac())
def test_gcd_with_com_factor(self, numbers):
n = gcd(numbers)
assert 1 in numbers or n != 1
for i in numbers:
assert i % n == 0
@unittest.skipUnless(HC_PRESENT,
"Hypothesis 2.0.0 can't be made tolerant of hard to "
"meet requirements (like `is_prime()`), the test "
"case times-out on it")
@settings(**HYP_SETTINGS)
@given(st_comp_no_com_fac())
def test_gcd_with_uncom_factor(self, numbers):
n = gcd(numbers)
assert n == 1
@given(st.lists(st.integers(min_value=1, max_value=2**8192),
min_size=1, max_size=20))
def test_gcd_with_random_numbers(self, numbers):
n = gcd(numbers)
for i in numbers:
# check that at least it's a divider
assert i % n == 0
def test_lcm(self):
assert lcm(3, 5 * 3, 7 * 3) == 3 * 5 * 7
assert lcm([3, 5 * 3, 7 * 3]) == 3 * 5 * 7
assert lcm(3) == 3
@given(st.lists(st.integers(min_value=1, max_value=2**8192),
min_size=1, max_size=20))
def test_lcm_with_random_numbers(self, numbers):
n = lcm(numbers)
for i in numbers:
assert n % i == 0
@unittest.skipUnless(HC_PRESENT,
"Hypothesis 2.0.0 can't be made tolerant of hard to "
"meet requirements (like `is_prime()`), the test "
"case times-out on it")
@settings(**HYP_SETTINGS)
@given(st_num_square_prime())
def test_square_root_mod_prime(self, vals):
square, prime = vals
calc = square_root_mod_prime(square, prime)
assert calc * calc % prime == square
@settings(**HYP_SETTINGS)
@given(st.integers(min_value=1, max_value=10**12))
@example(265399 * 1526929)
@example(373297 ** 2 * 553991)
def test_factorization(self, num):
factors = factorization(num)
mult = 1
for i in factors:
mult *= i[0] ** i[1]
assert mult == num
@settings(**HYP_SETTINGS)
@given(st.integers(min_value=3, max_value=1000).filter(lambda x: x % 2))
def test_jacobi(self, mod):
if is_prime(mod):
squares = set()
for root in range(1, mod):
assert jacobi(root * root, mod) == 1
squares.add(root * root % mod)
for i in range(1, mod):
if i not in squares:
assert jacobi(i, mod) == -1
else:
factors = factorization(mod)
for a in range(1, mod):
c = 1
for i in factors:
c *= jacobi(a, i[0]) ** i[1]
assert c == jacobi(a, mod)
@given(st_two_nums_rel_prime())
def test_inverse_mod(self, nums):
num, mod = nums
inv = inverse_mod(num, mod)
assert 0 < inv < mod
assert num * inv % mod == 1
def test_inverse_mod_with_zero(self):
assert 0 == inverse_mod(0, 11)
| 32.551471
| 78
| 0.579851
|
03ccf7027a95754c9f01484c62e00e57f4104182
| 405
|
py
|
Python
|
problem_10.py
|
coderistan/projecteuler
|
d030a7e405d437502a6a8c4997e809781ad6302b
|
[
"MIT"
] | null | null | null |
problem_10.py
|
coderistan/projecteuler
|
d030a7e405d437502a6a8c4997e809781ad6302b
|
[
"MIT"
] | null | null | null |
problem_10.py
|
coderistan/projecteuler
|
d030a7e405d437502a6a8c4997e809781ad6302b
|
[
"MIT"
] | null | null | null |
from math import sqrt
def find_prime(min_number,max_number):
for i in range(min_number,max_number):
if(i%2==0):
continue
else:
find=True
for k in range(min_number,int(sqrt(i))+1):
if(i%k==0):
find=False
break
if(find):
yield i
print(sum(find_prime(2,2000000))+2)
| 23.823529
| 54
| 0.481481
|
6ce218c1eb02f10a1640eb3bcd89e6064a6c258a
| 1,747
|
py
|
Python
|
modules/models/zeror.py
|
guilhermemg/trace-links-tc-br
|
965cb57d17057d1c9c3841c4aba01e72cf008cab
|
[
"MIT"
] | null | null | null |
modules/models/zeror.py
|
guilhermemg/trace-links-tc-br
|
965cb57d17057d1c9c3841c4aba01e72cf008cab
|
[
"MIT"
] | null | null | null |
modules/models/zeror.py
|
guilhermemg/trace-links-tc-br
|
965cb57d17057d1c9c3841c4aba01e72cf008cab
|
[
"MIT"
] | null | null | null |
import time
import pandas as pd
import numpy as np
class ZeroR_Model:
def __init__(self, oracle):
self.oracle = oracle
self.name = None
self.gen_name = "zero_r"
def set_name(self, name):
self.name = name
def get_model_gen_name(self):
return self.gen_name
def recover_links(self):
starttime = time.time()
major_target_artifact = self._get_major()
print('major_target_artifact: {}'.format(major_target_artifact))
self._sim_matrix = []
for idx,row in self.oracle.iterrows():
#print('idx: {}'.format(idx))
if idx in major_target_artifact:
self._sim_matrix.append([1 for i in range(len(self.oracle.columns))])
else:
self._sim_matrix.append([0 for j in range(len(self.oracle.columns))])
self._sim_matrix = pd.DataFrame(data=self._sim_matrix, index=self.oracle.index, columns=self.oracle.columns)
#display(self._sim_matrix)
endtime = time.time()
print(f' ..Total processing time: {round(endtime-starttime,2)} seconds')
def _get_major(self):
counts = self.oracle.apply(lambda row : np.sum(row), axis=1)
self.major_counts_df = pd.DataFrame(data=zip(self.oracle.index,counts))
self.major_counts_df.sort_values(by=1, inplace=True, ascending=False)
max_ = self.major_counts_df.iloc[0,1]
max_counts_df = self.major_counts_df[self.major_counts_df[1] == max_]
return list(max_counts_df.iloc[:,0])
def get_sim_matrix(self):
return self._sim_matrix
def get_major_counts_df(self):
return self.major_counts_df
| 34.254902
| 116
| 0.620492
|
b2a0c97c357f9ae1eed292f68c8f0609106b9296
| 4,128
|
py
|
Python
|
tests/test_cluster_control.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 4
|
2022-02-17T19:47:52.000Z
|
2022-02-17T20:11:06.000Z
|
tests/test_cluster_control.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 2
|
2022-03-26T00:07:05.000Z
|
2022-03-30T21:20:00.000Z
|
tests/test_cluster_control.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 1
|
2022-02-18T13:33:00.000Z
|
2022-02-18T13:33:00.000Z
|
import pytest
from pytest_mock import MockerFixture
from labfunctions.cluster import Inventory, ProviderSpec, get_spec_from_file
from labfunctions.cluster.cluster_file import load_cluster_file, load_spec
from labfunctions.cluster.control import ClusterControl, apply_scale_items
from labfunctions.cluster.inventory import Inventory
from labfunctions.cluster.shortcuts import create_cluster_control
from labfunctions.control_plane.register import AgentRegister
from labfunctions.errors.cluster import ClusterSpecNotFound
from labfunctions.types.cluster import (
ClusterFile,
ClusterPolicy,
ClusterSpec,
ClusterState,
ScaleIdle,
ScaleItems,
)
from labfunctions.utils import open_yaml
from .factories import ClusterStateFactory
i = Inventory()
i.reload("tests/machines_test.yaml")
def test_cluster_inventory_init():
i = Inventory("tests/machines_test.yaml")
i2 = Inventory()
assert i.machines
assert i.volumes
assert "gce" in i.providers
assert i._inventory_from.endswith("machines_test.yaml")
assert i2.machines
assert i2.volumes
assert not i2._inventory_from.endswith("machines.yaml")
def test_cluster_inventory_machines_by():
i = Inventory()
machines = i.machines_by_provider("local")
assert machines
def test_cluster_inventory_provider():
i = Inventory()
p = i.get_provider("local")
assert isinstance(p, ProviderSpec)
def test_cluster_file_load():
clusters = load_cluster_file("tests/clusters_test.yaml")
assert len(clusters.clusters) == 2
assert clusters.clusters["external"].network == "non-default"
assert isinstance(clusters, ClusterFile)
assert isinstance(clusters.clusters["local"], ClusterSpec)
assert isinstance(clusters.clusters["local"].policy, ClusterPolicy)
assert isinstance(clusters.clusters["local"].policy.strategies[0], ScaleIdle)
assert clusters.inventory == "tests/machines_test.yaml"
def test_cluster_file_load_spec():
data = open_yaml("tests/clusters_test.yaml")
spec = load_spec(data["clusters"]["local"])
assert isinstance(spec, ClusterSpec)
assert isinstance(spec.policy.strategies[0], ScaleIdle)
def test_cluster_file_get_spec():
spec = get_spec_from_file("tests/clusters_test.yaml")
external = get_spec_from_file("tests/clusters_test.yaml", "external")
with pytest.raises(ClusterSpecNotFound):
nospec = get_spec_from_file("tests/clusters_test.yaml", "nonexist")
assert isinstance(spec, ClusterSpec)
assert isinstance(spec.policy.strategies[0], ScaleIdle)
assert spec.name == "local"
assert external.name == "external"
def test_cluster_control_create(mocker: MockerFixture, redis):
mocker.patch("labfunctions.cluster.shortcuts.redis.from_url", return_value=redis)
cc = create_cluster_control("tests/clusters_test.yaml", "redis_url", "local")
with pytest.raises(ClusterSpecNotFound):
cc = create_cluster_control("tests/clusters_test.yaml", "sara", "non-exist")
assert cc.spec.name == "local"
def test_cluster_control_init(redis):
clusters = load_cluster_file("tests/clusters_test.yaml")
inventory = Inventory(clusters.inventory)
are = AgentRegister(redis, "local")
cc = ClusterControl(are, clusters.clusters["local"], inventory)
policy = cc.policy
assert cc.cluster_name == "local"
assert isinstance(cc.state, ClusterState)
assert cc.state.agents_n == 0
assert isinstance(policy, ClusterPolicy)
def test_cluster_control_scale_items_gt():
state = ClusterStateFactory()
state.queue_items["default"] = 10
scale = ScaleItems(qname="default", items_gt=1, increase_by=5)
new_state = apply_scale_items(state, scale)
assert id(state) != id(new_state)
assert new_state.agents_n == state.agents_n + 5
def test_cluster_control_scale_items_lt():
state = ClusterStateFactory()
state.queue_items["default"] = 1
scale = ScaleItems(qname="default", items_gt=10, items_lt=2)
new_state = apply_scale_items(state, scale)
assert new_state.agents_n == state.agents_n - 1
assert len(new_state.agents) < len(state.agents)
| 34.115702
| 85
| 0.753149
|
bda4f6f1cdc374c61784e28eebabf897526cc751
| 9,525
|
py
|
Python
|
config/settings/base.py
|
blaisejames/django-loginregistration
|
c4422d7fb8fe3d5c4d06589aa0d076be3e6fde4c
|
[
"Apache-1.1"
] | null | null | null |
config/settings/base.py
|
blaisejames/django-loginregistration
|
c4422d7fb8fe3d5c4d06589aa0d076be3e6fde4c
|
[
"Apache-1.1"
] | null | null | null |
config/settings/base.py
|
blaisejames/django-loginregistration
|
c4422d7fb8fe3d5c4d06589aa0d076be3e6fde4c
|
[
"Apache-1.1"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (loginregistration/config/settings/base.py - 3 = loginregistration/)
APPS_DIR = ROOT_DIR.path('loginregistration')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'America/New York'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///loginregistration'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
]
LOCAL_APPS = [
'loginregistration.users.apps.UsersAppConfig',
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'loginregistration.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Blaise James""", 'blaise@blaisejames.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'loginregistration.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'loginregistration.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
| 39.522822
| 109
| 0.630236
|
c00f2d85ae7607f69a01bfe55e2324e59becd20a
| 5,610
|
py
|
Python
|
docs/conf.py
|
GammaGames/Circuitpython_Keystore
|
dcf6935410e376828764baa4c88428852d7ae3da
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
docs/conf.py
|
GammaGames/Circuitpython_Keystore
|
dcf6935410e376828764baa4c88428852d7ae3da
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
docs/conf.py
|
GammaGames/Circuitpython_Keystore
|
dcf6935410e376828764baa4c88428852d7ae3da
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"CircuitPython": ("https://docs.circuitpython.org/en/latest/", None),
}
# Show the docstring from both the class and its __init__() method.
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "CircuitPython Keystore Library"
copyright = "2022 Jesse Lieberg"
author = "Jesse Lieberg"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
".env",
"CODE_OF_CONDUCT.md",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "CircuitPython_Keystore_Librarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"CircuitPython_Keystore_Library.tex",
"CircuitPython Keystore Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"CircuitPython_Keystore_Library",
"CircuitPython Keystore Library Documentation",
[author],
1,
),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"CircuitPython_Keystore_Library",
"CircuitPython Keystore Library Documentation",
author,
"CircuitPython_Keystore_Library",
"One line description of project.",
"Miscellaneous",
),
]
| 29.840426
| 84
| 0.674688
|
643c7a9edcba807eb7bd5428b6ac4a30709b2ed0
| 166,563
|
py
|
Python
|
cellpack/autopack/upy/hostHelper.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
cellpack/autopack/upy/hostHelper.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | 21
|
2021-10-02T00:07:05.000Z
|
2022-03-30T00:02:10.000Z
|
cellpack/autopack/upy/hostHelper.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/hostHelper.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
import sys
import os
import math
from math import cos, sin, sqrt
import random
import numpy
import PIL as Image
from pymunk.util import is_clockwise, calc_area
from cellpack.mgl_tools.upy import colors
if sys.version_info >= (3, 0, 0):
unicode = str
def vdistance(c0, c1):
"""get the distance between two points c0 and c1"""
d = numpy.array(c1) - numpy.array(c0)
s = numpy.sum(d * d)
return math.sqrt(s)
def vdiff(p1, p2):
# returns p1 - p2
x1, y1, z1 = p1
x2, y2, z2 = p2
return (x1 - x2, y1 - y2, z1 - z2)
def vcross(v1, v2):
x1, y1, z1 = v1
x2, y2, z2 = v2
return (y1 * z2 - y2 * z1, z1 * x2 - z2 * x1, x1 * y2 - x2 * y1)
def dot(v1, v2):
x1, y1, z1 = v1
x2, y2, z2 = v2
return (x1 * x2) + (y1 * y2) + (z1 * z2)
def vnorm(v1):
x1, y1, z1 = v1
n1 = 1.0 / math.sqrt(x1 * x1 + y1 * y1 + z1 * z1)
return (x1 * n1, y1 * n1, z1 * n1)
def vlen(v1):
x1, y1, z1 = v1
return math.sqrt(x1 * x1 + y1 * y1 + z1 * z1)
class Helper:
"""
The Helper abstract Object
==========================
This is the main class from which all helper derived. The Helper
give access to the basic function need for create and edit object
in the host.
Most of the function define at this loevel are overwrite by the class child.
matrix and transformation came from http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
>>> import upy
>>> hClass = upy.getHelperClass()
>>> helper = helper.hClass()
See examples in upy/examples
"""
BONES = None
IK = None
CAM_OPTIONS = {"ortho": "ortho", "persp": "persp"} # define the type of camera
LIGHT_OPTIONS = {
"Area": "AREA",
"Sun": "SUN",
"Spot": "SPOT",
} # define the type of light
dupliVert = False
def __init__(
self,
):
self.noise_type = {
"boxNoise": None,
"buya": None,
"cellNoise": None,
"cellVoronoi": None,
"cranal": None,
"dents": None,
"displacedTurbulence": None,
"electrico": None,
"fbm": None,
"fire": None,
"gas": None,
"hama": None,
"luka": None,
"modNoie": None,
"naki": None,
"noise": None,
"none": None,
"nutous": None,
"ober": None,
"pezo": None,
"poxo": None,
"sema": None,
"sparseConvolution": None,
"stupl": None,
"turbulence": None,
"vlNoise": None,
"voronoi1": None,
"voronoi2": None,
"voronoi3": None,
"wavyTurbulence": None,
"zada": None,
}
self.nogui = False
self.instance_dupliFace = False
self.quad = {
"+Z": [[-1, 1, 0], [1, 1, 0], [1, -1, 0], [-1, -1, 0]], # XY
"+Y": [[-1, 0, 1], [1, 0, 1], [1, 0, -1], [-1, 0, -1]], # XZ
"+X": [[0, -1, 1], [0, 1, 1], [0, 1, -1], [0, -1, -1]], # YZ
"-Z": [[-1, 1, 0], [1, 1, 0], [1, -1, 0], [-1, -1, 0]], # XY
"-Y": [[-1, 0, 1], [1, 0, 1], [1, 0, -1], [-1, 0, -1]], # XZ
"-X": [[0, -1, 1], [0, 1, 1], [0, 1, -1], [0, -1, -1]], # YZ
}
self.axis_dic = {
"+X": [1.0, 0.0, 0.0],
"-X": [-1.0, 0.0, 0.0],
"+Y": [0.0, 1.0, 0.0],
"-Y": [0.0, -1.0, 0.0],
"+Z": [0.0, 0.0, 1.0],
"-Z": [0.0, 0.0, -1.0],
}
# ==============================================================================
# some helper for treading and asynchrone stuff
# ==============================================================================
def testForEscape(
self,
):
"""
return True if ESC is press
"""
return False
# ==============================================================================
# mathutils
# ==============================================================================
def norm(self, a, b, c):
"""
return the norm of the vector [a,b,c]
>>> result = helper.norm(a,b,c) #a,b,c being double
@type a: float
@param a: first value of the vector
@type b: float
@param b: second value of the vector
@type c: float
@param c: third value of the vector
@rtype: float
@return: the norm of the vector
"""
return math.sqrt(a * a + b * b + c * c)
def normalize(self, A):
"""
return the normalized vector A [x,y,z]
>>> a = [1.0,3.0,5.0]
>>> a_normalized = helper.normalize(a)
@type A: vector
@param A: the 3d vector
@rtype: vector
@return: the normalized 3d vecor
"""
norm = self.norm(A[0], A[1], A[2])
if norm == 0.0:
return A
else:
return [A[0] / norm, A[1] / norm, A[2] / norm]
def measure_distance(self, c0, c1, vec=False):
"""measure distance between 2 point specify by x,y,z
c0,c1 should be Numeric.array
>>> a = [1.0,3.0,5.0]
>>> b = [5.0,1.0,2.0]
>>> distance = helper.measure_distance(a,b)
>>> distance, vector_a_to_b = helper.measure_distance(a,b)
@type c0: vector
@param c0: the first 3d vector
@type c1: vector
@param c1: the second 3d vector
@type vec: Boolean
@param vec: if the function return the vector c1-c0
@rtype: float ? vector
@return: the distance, and optionly the distance vetor
"""
d = numpy.array(c1) - numpy.array(c0)
s = numpy.sum(d * d)
if vec:
return d, math.sqrt(s)
else:
return math.sqrt(s)
# direction, angle authorized
def advance_randpoint_onsphere(self, radius, marge=math.pi, vector=None):
# radius r, inclination θ, azimuth φ
r = radius
azimuth = random.uniform(-1, 1) * (marge * 2.0)
inclination = random.uniform(-1, 1) * (marge)
x = r * math.sin(inclination) * math.cos(azimuth)
y = r * math.sin(inclination) * math.sin(azimuth)
z = r * math.cos(inclination)
pos = [x, y, z]
if vector is not None:
absolute_vector = numpy.array([0, 0, radius])
matrice = self.rotVectToVect(absolute_vector, vector)
pos = self.ApplyMatrix(
[
pos,
],
matrice,
)[0]
return pos
def randpoint_onsphere(self, radius, biased=None):
"""
Generate a random point on the outside of a sphere.
>>> r = 2.0
>>> bias = 2.0
>>> point = helper.randpoint_onsphere(r)
>>> point2 = helper.randpoint_onsphere(r,bias)
@type radius: float
@param radius: the radius of the sphere
@type biased: float
@param biased: optional float vale to use instead of the random function
@rtype: vector
@return: a random 3d point on the sphere of the given radius
-points (x,y,z) so that (x-a)^2 +(y-b)^2 + (z-c)^2 = R^2
-To generate a random point on the sphere, it is necessary only
to generate two random numbers, z between -R and R, phi between
0 and 2 pi, each with a uniform distribution.
To find the latitude (theta) of this point, note that z=R*sin(theta),
so theta=sin-1(z/R); its longitude is (surprise!) phi.
In rectilinear coordinates,
tetha = asin-1(z/R)
x=R*cos(theta)*cos(phi),
y=R*cos(theta)*sin(phi),
z=R*sin(theta)= (surprise!) z.
-hemispher
theta (0 <= theta < 360) and phi (0 <= phi <= pi/2)
x = cos(sqrt(phi)) cos(theta)
y = cos(sqrt(phi)) sin(theta)
z = sin(sqrt(phi))
A whole sphere is obtained by simply randomising the sign of z.
-Azimuth axis is X axis. The elevation angle is measured as the angle
between the Z-axis pointing upwards and the radius vector.
From elementary spherical geometry:
X coordinate=r*cos(pi/2-el)*cos(az)
Y coordinate=r*cos(pi/2-el)*sin(az)
Z Coordinate=r*sin(pi/2-el)
"""
if biased is not None:
theta = biased * (2 * math.pi)
u = biased * 2 - 1 # represent sin(phi)
else:
theta = random.uniform(0.0, 1.0) * (2 * math.pi)
u = random.uniform(0.0, 1.0) * 2 - 1
x = radius * math.sqrt(1 - u ** 2) * math.cos(theta)
y = radius * math.sqrt(1 - u ** 2) * math.sin(theta)
z = radius * u
return [x, y, z]
def transposeMatrix(self, matrice):
if matrice is not None:
matrice = numpy.array(matrice)
if isinstance(matrice, numpy.ndarray):
mat = matrice.transpose().tolist()
return mat
else:
return matrice
return matrice
def rotatePoint(self, pt, m, ax):
"""
Rotate the point pt [x,y,z] around axe ax[0],ax[1],ax[2] by ax[3] radians,
and translate by m [x,y,z].
>>> point = [1.0,2.0,0.0]
>>> trans = [5.0,0.0,0.0]
>>> axes = [1.0,0.0,0.0,math.pi]
>>> point = helper.rotatePoint(point,trans,axes)
>>> print point
[6.0, -2.0, 2.4492935982947064e-16] #[6.0, -2.0, 0.0]
@type pt: 3d vector
@param pt: the 3d point to be rotated
@type m: 3d vector
@param m: translation to apply after rotation
@type ax: 4d vector
@param ax: axe of rotation ax[0],ax[1],ax[2] and angle ax[3] radians
@rtype: 3d vector
@return: the transformed point
"""
x = pt[0]
y = pt[1]
z = pt[2]
u = ax[0]
v = ax[1]
w = ax[2]
ux = u * x
uy = u * y
uz = u * z
vx = v * x
vy = v * y
vz = v * z
wx = w * x
wy = w * y
wz = w * z
sa = math.sin(ax[3])
ca = math.cos(ax[3])
pt[0] = (
u * (ux + vy + wz)
+ (x * (v * v + w * w) - u * (vy + wz)) * ca
+ (-wy + vz) * sa
) + m[0]
pt[1] = (
v * (ux + vy + wz)
+ (y * (u * u + w * w) - v * (ux + wz)) * ca
+ (wx - uz) * sa
) + m[1]
pt[2] = (
w * (ux + vy + wz)
+ (z * (u * u + v * v) - w * (ux + vy)) * ca
+ (-vx + uy) * sa
) + m[2]
return pt
def eulerToMatrix(self, euler): # double heading, double attitude, double bank
"""
Code from 'http://www.euclideanspace.com/maths/geometry/rotations/conversions/'.
This conversion uses NASA standard aeroplane conventions as described on page:
'http://www.euclideanspace.com/maths/geometry/rotations/euler/index.htm'
Coordinate System: right hand
Positive angle: right hand
Order of euler angles: heading first, then attitude, then bank
matrix row column ordering:
[m00 m01 m02]
[m10 m11 m12]
[m20 m21 m22]
>>> euler = [0.8,3.14,2.0]#radians
>>> emat = helper.eulerToMatrix(euler)
>>> print emat
[[-0.69670582573323303, 0.65275180908484898, -0.29751650059422086, 0.0],
[0.0015926529164868282, 0.41614630875957009, 0.90929627358879683, 0.0],
[0.71735518109654839, 0.6330381706044601, -0.29097116474265428, 0.0],
[0.0, 0.0, 0.0, 1.0]]
@type euler: 3d array
@param euler: the euler angle to convert in matrice
@rtype: 4x4array
@return: the matrix computed from the euler angle
"""
# Assuming the angles are in radians.
heading = euler[0]
attitude = euler[1]
bank = euler[2]
m = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
ch = math.cos(heading)
sh = math.sin(heading)
ca = math.cos(attitude)
sa = math.sin(attitude)
cb = math.cos(bank)
sb = math.sin(bank)
m[0][0] = ch * ca
m[0][1] = sh * sb - ch * sa * cb
m[0][2] = ch * sa * sb + sh * cb
m[1][0] = sa
m[1][1] = ca * cb
m[1][2] = -ca * sb
m[2][0] = -sh * ca
m[2][1] = sh * sa * cb + ch * sb
m[2][2] = -sh * sa * sb + ch * cb
return m
def getTubeProperties(self, coord1, coord2):
"""
From two point return the length, and the orientation from one to another.
This function is used to build a cylinder from two points (see oneCylinder function)
>>> coord1 = [1.0,0.0,0.0]
>>> coord2 = [2.0,0.0,0.0]
>>> distance,rsz,rz,coord = helper.getTubeProperties(coord1,coord2)
>>> helper.setTransformation(obj,trans=coord,scale=[1., 1., distance],
rot=[0.,rz,rsz])
@type coord1: vector
@param coord1: first point
@type coord2: vector
@param coord2: second point
@rtype: tupple
@return: length, orientation (rotation XY,Z), and intermediate point OR
length and matrix of transformation (see getTubePropertiesMatrix that use numpy)
"""
x1 = float(coord1[0])
y1 = float(coord1[1])
z1 = float(coord1[2])
x2 = float(coord2[0])
y2 = float(coord2[1])
z2 = float(coord2[2])
laenge = math.sqrt(
(x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2)
)
wsz = math.atan2((y1 - y2), (x1 - x2))
wz = math.acos((z1 - z2) / laenge)
return laenge, wsz, wz, [float(x1 + x2) / 2, (y1 + y2) / 2, (z1 + z2) / 2]
def isSphere(self, obj):
v = self.getMeshVertices(obj)
dmd = 10
r = 10.0
alld = numpy.linalg.norm(v, axis=1)
# dmd = numpy.average(alld - numpy.average(alld))
r = alld.max()
dmd = r - numpy.average(alld)
if dmd < 0.1:
obj["radius"] = r
return True
else:
return False
def update(
self,
):
"""
Update the host viewport, ui or gl draw
This function can't be call in a thread.
* overwrited by children class for each host
"""
pass
def fit_view3D(self):
"""
Function that should recenter the viewport to the object in the scene.
* overwrited by children class for each host
"""
pass
def checkName(self, name):
"""
Check the provide name to avoid invalid caracter for the
host. ie maya didnt support object name starting with number, and automatically rename the object.
In order to retrieve the object use this functon. If a invalid
caracter is found, the caracter is removed.
This function can be change in the features, as it currently only look for number.
>>> name = "1sphere"
>>> sphere_obj,sphere_mesh = helper.Sphere(name)
>>> print (sphere_obj,sphere_mesh)#in maya
(u'sphere', u'makeNurbSphere1')
>>> corrected_name = helper.checkName(name)
>>> print (corrected_name)
sphere
>>> sphere = helper.getObject(name)
>>> print (sphere)
sphere
@type name: string
@param name: name of the molecule.
@rtype: string
@return: corrected name of the molecule.
"""
invalid = []
for i in range(9):
invalid.append(str(i))
if name[0] in invalid:
name = name[1:]
return name
def getObject(self, name):
"""
Retrieve an object from his name.
* overwrited by children class for each host
>>> oname = "mysphere"
>>> object= helper.getObject(oname)
>>> print oname,object#the result depnds on the host
mysphere <c4d.BaseObject object at 0x1e4fc4b0> # Cinema4D
mysphere # Maya
@type name: string
@param name: request name of an host object
@rtype: hostObject
@return: the object with the requested name or None
"""
return None
def getObjectName(self, o):
"""
Return the name of an host object.
* overwrited by children class for each host
>>> obj = helper.Sphere("mySphere")
>>> name = helper.getObjectName(obj)
>>> print (name)
mySphere
@type o: hostObject
@param o: an host object
@rtype: string
@return: the name of the host object
"""
pass
@classmethod
def getCurrentScene(
self,
):
"""
Return the current/active working document or scene.
* overwrited by children class for each host
>>> sc = helper.getCurrentScene()
>>> print (sc)
None #in maya there is no scene concept
<bpy_strct, Scene("Scene") #blender 2.6
[Scene "Scene"] #blender 2.49b
<c4d.documents.BaseDocument object at 0x246c01a0> #Cinema4D
@rtype: scene
@return: the active scene
"""
pass
@classmethod
def getCurrentSceneName(self):
"""
Return the current/active working document or scene name.
* overwrited by children class for each host
>>> scname = helper.getCurrentSceneName()
>>> print (scname)
None #maya
Scene #blender 2.6
Scene #blender 2.49b
Untitled #Cinema4D
@rtype: strng
@return: the active scene name
"""
pass
def getCurrentSelection(
self,
):
"""
Return the current/active selected object in the document or scene.
* overwrited by children class for each host
>>> liste_objects = helper.getCurrentSelection()
>>> print (liste_objects)
[<c4d.BaseObject object at 0x1e4fd3a0>, <c4d.BaseObject object at 0x1e4fd3d0>] #cinema4D
@rtype: liste
@return: the list of selected object
"""
pass
def setCurrentSelection(self, obj):
"""
Return the current/active selected object in the document or scene.
* overwrited by children class for each host
>>> liste_objects = [helper.getObject("obj1"),helper.getObject("obj2")]
>>> helper.setCurrentSelection(liste_objects)
@type obj: hostObject
@param obj: the object to be selected
"""
pass
def getPosUntilRoot(self, object):
"""
Go through the hierarchy of the object until reaching the top level,
increment the position to get the transformation due to parents.
DEPRECATED
@type object: hostObject
@param object: the object
@rtype: list
@return: the cumulative translation along the parenting hierarchy
"""
stop = False
# get the first parent
pos = [0, 0, 0]
while not stop:
# get the parent position, and add it to pos
# get the parent of the previous parent
parent = None
if parent is None:
stop = True
return pos
def addObjectToScene(self, doc, object, parent=None, centerRoot=True, rePos=None):
"""
Insert/add an object to the current document under the specified parent, and
at the specified location. This function is used by all the basic object creation function.
* overwrited by children class for each host
@type doc: hostScene
@param doc: the scene where to insert the object
@type object: hostObject
@param object: the object to insert
@type parent: hostObject
@param parent: the parent of the object to insert under
@type centerRoot: boolean
@param centerRoot: if the object have to be recentered according the top-level
@type rePos: list
@param rePos: the location of the object in the scene
"""
# get the object name
name = ""
# if the object is not already in the scene
if self.getObject(name) is None:
if parent is not None:
if type(parent) == str:
parent = self.getObject(parent)
# if parent exist, insert the object under it
pass
else:
# insert the object
pass
def AddObject(self, object, parent=None, centerRoot=True, rePos=None):
"""
Insert/add an object to the current document under the specified parent, and
at the specified location. This function is an alias for addObjectToScene to
permit to some script to work either in dejavu and the host.
* overwrited by children class for each host
@type object: hostObject
@param object: the object to insert
@type parent: hostObject
@param parent: the parent of the object to insert under
@type centerRoot: boolean
@param centerRoot: if the object have to be recentered according the top-level
@type rePos: list
@param rePos: the location of the object in the scene
"""
doc = self.getCurrentScene()
self.addObjectToScene(
doc, object, parent=parent, centerRoot=centerRoot, rePos=rePos
)
def ObjectsSelection(self, listeObjects, typeSel="new"):
"""
Modify the current object selection. Redundant with setCurrentSelection.
This function make the distinction between adding (typeSel="add") object to the selection and creating
a new selection (typeSel="new")
* overwrited by children class for each host
@type listeObjects: list
@param listeObjects: list of object to joins
@type typeSel: string
@param listeObjects: type of modification: new,add,...
"""
# Put here the code to add/set an object to the current selection
def JoinsObjects(self, listeObjects):
"""
Merge the given liste of object in one unique geometry.
* overwrited by children class for each host
@type listeObjects: list
@param listeObjects: list of object to joins
"""
def addCameraToScene(self, name, Type, focal, center, scene, **kw):
"""
Add a camera object to the scene
* overwrited by children class for each host
>>> sc = helper.getCurrentScene()
>>> center=[0.,-12.,40.]
>>> cam = helper.addCameraToScene("cam1","persp",30.,center,sc)
@type name: string
@param name: name of the camera
@type Type: cameraType
@param Type: perspective, orthogonale etc...
@type focal: float
@param focal: the focal of the camera
@type center: list
@param center: the position of the camera
@type scene: host scene
@param scene: the scene
#we add a **kw for futur arguments
"""
pass
def addLampToScene(
self,
name,
Type="Area",
rgb=[1.0, 1.0, 1.0],
dist=25.0,
energy=1.0,
soft=1.0,
shadow=False,
center=[0.0, 0.0, 0.0],
scene=None,
**kw
):
"""
Add a light to the scene
* overwrited by children class for each host
>>> sc = helper.getCurrentScene()
>>> center=[0.,-12.,40.]
>>> color = [1.,1.,1.]
>>> light = helper.addLampToScene("light1","Sun",color,20.,1.0,1.0,True,center,sc)
@type name: string
@param name: name of the instance
@type Type: light hostType/int etc..
@param Type: the light type : spot,sun,omni,etc..
@type rgb: list of int 0-255
@param rgb: color of the light in rgb
@type dist: float
@param dist: light distance of attenuation
@type energy: float
@param energy: intensity of the light
@type soft: bool
@param soft: soft light
@type shadow: boolean
@param shadow: does the light produce shadow
@type scene: host scene
@param scene: the scene
#we add a **kw for futur arguments
"""
lamp = None # c4d.BaseObject(LIGHT)
# lamp name (name)
# lamp position (center)
# lamp color (float(rgb[0]), float(rgb[1]), float(rgb[2]))#color
# lamp energy float(energy) #intensity
# lamp type dicType[Type] #type
if shadow:
# lampe shadow
pass
self.addObjectToScene(scene, lamp, centerRoot=False)
def newEmpty(self, name, location=None, parentCenter=None, **kw):
"""
Create a new Null/Empty Object
* overwrited by children class for each host
>>> empty = helper.newEmpty("null1",location=[10.0,0.0,0.0])
>>> empty_child = helper.newEmpty("null2",location=[15.0,0.0,0.0],parent = empty)
@type name: string
@param name: name of the empty
@type location: list
@param location: position of the null object
@type parentCenter: list
@param parentCenter: position of the parent object DEPRECATED
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
-"parent"
@rtype: hostObject
@return: the null object
"""
empty = None #
if location is not None:
if parentCenter is not None:
location = location - parentCenter
# set the position of the object to location
return empty
def newInstance(
self, name, object, location=None, hostmatrice=None, matrice=None, **kw
):
"""
Create a new Instance from another Object
* overwrited by children class for each host
>>> sph = helper.Sphere("sph1")
>>> instance_sph = helper.newInstance("isph1",sph,location = [10.0,0.0,0.0])
@type name: string
@param name: name of the instance
@type object: hostObject
@param object: the object to inherit from
@type location: list/Vector
@param location: position of the null object
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
-"parent"
-"material"
@rtype: hostObject
@return: the instance object
"""
# instance = None#
# instance parent = object
# instance name = name
if location is not None:
# set the position of instance with location
pass
# set the instance matrice
self.setObjectMatrix(object, matrice=matrice, hostmatrice=hostmatrice)
return None
def getMasterInstance(self, instance, **kw):
"""
Return the object use for the instanciation
"""
return instance
def updateMasterInstance(self, instance, objects, add=True, hide=True, **kw):
"""
Update the reference of the passed instance by adding/removing-hiding objects
* overwrited by children class for each host
>>> sph = helper.Sphere("sph1")
>>> instance_sph = helper.newInstance("isph1",sph,location = [10.0,0.0,0.0])
@type instance: string/hostObj
@param instance: name of the instance
@type objects: list hostObject/string
@param objects: the list of object to remove/add to the instance reference
@type add: bool
@param add: if True add the objec else remove
@type hide: bool
@param hide: hide instead of remove
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
pass
def toggleDisplay(self, object, display):
"""
Toggle on/off the display/visibility/rendermode of an hostObject in the host viewport.
* overwrited by children class for each host
>>> helper.toggleDisplay("polygone1",True)
>>> obj = helper.getObject("polygone1")
>>> helper.toggleDisplay(obj,False)
@type object: hostObject
@param object: the object
@type display: boolean
@param display: if the object is displayed
"""
def toggleXray(self, object, xray):
"""
Toggle on/off the Xray visibility of an hostObject in the host viewport. Currently not supported in Maya
* overwrited by children class for each host
>>> helper.toggleXray("polygone1",True)
>>> obj = helper.getObject("polygone1")
>>> helper.toggleXray(obj,False)
@type object: hostObject
@param object: the object
@type xray: boolean
@param xray: if the object is Xray displayed
"""
print("not supported yet in ", self.host)
def getVisibility(self, obj, editor=True, render=False, active=False):
"""
return the editor/renedring/active visibility state of the given object
* overwrited by children class for each host
@type obj: hostObject
@param obj: the object
@type editor: boolean
@param editor: request editor visibility
@type render: boolean
@param render: request rendering visibility
@type active: boolean
@param active: request active states ie C4D
@rtype: bool/array of bool
@return: the current visibility state of the object
"""
pass
def setViewport(self, **kw):
"""
set the property of the viewport
* overwrited by children class for each host
@type kw: dictionary
@param kw: the list of parameter and their value to change
"""
print("setViewport helper class")
pass
def toggleEditMode(self):
"""
Turn off edit mode (if any)
"""
pass
def restoreEditMode(self, editmode=1):
"""
Restor any edit mode (if any)
"""
pass
def setObjectMatrix(self, object, matrice, hostmatrice=None, absolue=True, **kw):
"""
Set a matrix to an hostObject
* overwrited by children class for each host
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
@type absolue: Boolean
@param absolue: absolute or local transformation
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
if hostmatrice is not None:
# set the instance matrice
pass
if matrice is not None:
# convert the matrice in host format
# set the instance matrice
pass
def concatObjectMatrix(self, object, matrice, hostmatrice=None):
"""
Apply a matrix to an hostObject
* overwrited by children class for each host
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
# get current transformation
if hostmatrice is not None:
# compute the new matrix: matrice*current
# set the new matrice
pass
if matrice is not None:
# convert the matrice in host format
# compute the new matrix: matrice*current
# set the new matrice
pass
def translateObj(self, object, position, use_parent=True, absolue=True, **kw):
"""
Global Translation : Move the object to the vector position
* overwrited by children class for each host
@type object: hostObject
@param object: the object
@type position: liste/array
@param position: the new object position px,py,pz
@type use_parent: boolean
@param use_parent: if the parent position is used
@type absolue: Boolean
@param absolue: absolute or local transformation
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
pass
def scaleObj(self, object, absolue=True, **kw):
"""
Global Scale : scale the object by the vector scale
* overwrited by children class for each host
@type object: hostObject
@param object: the object
@type absolue: Boolean
@param absolue: absolute or local transformation
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
pass
def rotateObj(self, object, rotation, absolue=True, **kw):
"""
Global Rotation : Rotate the object
This method take a 3d array [rotation_X,rotatio_Y,rotation_Z]
* overwrited by children class for each host
@type object: hostObject
@param object: the object
@type rotation: liste/array - matrice
@param rotation: the new object rotation
@type absolue: Boolean
@param absolue: absolute or local transformation
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
pass
def setTranslation(self, name, pos=[0.0, 0.0, 0.0], absolue=True, **kw):
"""
Return the current position (translation) of the given object in absolute or local world
* overwrited by children class for each host
@type name: hostObject
@param name: the object name
@type pos: list<float>
@param pos: the new position
@type absolue: Boolean
@param absolue: absolute or local transformation
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
pass
def getTranslation(self, name, absolue=True, **kw):
"""
Return the current position (translation) of the given object in absolute or local world
* overwrited by children class for each host
@type name: hostObject
@param name: the object name
@type absolue: Boolean
@param absolue: absolute or local transformation
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
@rtype: 3d vector/list
@return: the position
"""
return [0.0, 0.0, 0.0]
def getSize(self, name, **kw):
"""
Return the current size in x, y and z of the given object if applcable
* overwrited by children class for each host
@type name: hostObject
@param name: the object name
@rtype: 3d vector/list
@return: the size in x y and z
"""
return [0.0, 0.0, 0.0]
def getScale(self, name, absolue=True, **kw):
"""
Return the current scale of the given object in absolute or local world
* overwrited by children class for each host
@type name: hostObject
@param name: the object name
@type absolue: Boolean
@param absolue: absolute or local transformation
@rtype: 3d vector/list
@return: the scale
"""
return [1.0, 1.0, 1.0]
def resetTransformation(self, object, **kw):
"""
ReSet the transformation of a given Object to identity
* can be overwriten by children class for each host
@type object: string or Object
@param object: the object who receive the identity transformation
"""
m = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
self.setObjectMatrix(object, m)
def setTransformation(
self, name, mat=None, rot=None, scale=None, trans=None, order="str", **kw
):
"""
Set the transformatio of a given Object
* can be overwriten by children class for each host
@type name: string
@param name: the object who receive the transformation
@type mat: list/Matrix
@param mat: transformation matrix
@type rot: list
@param rot: rotation along [X,Y,Z]
@type scale: list
@param scale: scale along [X,Y,Z]
@type trans: list
@param trans: translation along [X,Y,Z]
@type order: string
@param order: order of transformation
@type kw: Dictionary
@param kw: additional arguemts
"""
obj = self.getObject(name)
absolue = True
if "abs" in kw:
absolue = kw["abs"]
if mat is not None:
self.setObjectMatrix(obj, mat, absolue=absolue, **kw)
if rot is not None:
self.rotateObj(obj, rot, absolue=absolue)
if scale is not None:
self.scaleObj(obj, scale, absolue=absolue)
if trans is not None:
self.translateObj(obj, trans, absolue=absolue)
def updateTubeObjs(self, listeObj, listePts, listeInd=None):
"""
This function will update a liste of Tupe according the given liste of new points.
One Tube is define by two 3d points.
"""
if listeInd is None:
[
self.updateTubeObj(listeObj[i], listePts[j], listePts[j + 1])
for i, j in zip(list(range(len(listeObj))), list(range(len(listePts))))
]
else:
[
self.updateTubeObj(
listeObj[i], listePts[listeInd[i][0]], listePts[listeInd[i][1]]
)
for i, j in range(len(listeObj))
]
def convertColor(self, col, toint=True):
"""
This function will convert a color array [r,g,b] from range 1-255
to range 0.-1 (vice/versa)
@type col: array
@param col: the color [r,g,b]
@type toint: boolean
@param toint: way of the convertion, if true convert to 1-255, if false
convert to range 0-1
@rtype: array
@return: the converted color [0-1.,0-1.,0-1.] or [1-255,1-255,1-255]
"""
# its seems that pymol color for instance are in range 0. 1.00001 ?
if toint and max(col) <= 1.00001:
col = [int(x * 255) for x in col]
elif not toint and max(col) > 1.00001:
col = [x / 255.0 for x in col]
return col
def addMaterialFromDic(self, dic):
"""
Add material to the current scene given a dictionary {"name":[r,b,g]}
>>> matDic={"mat1":[0,0,0],"mat2":[1,1,1]}
>>> helper.addMaterialFromDic(matDic)
@type dic: Dictionary
@param dic: the name:color dictionary for creating materials
"""
# dic: Name:Color
[self.addMaterial(x, dic[x]) for x in list(dic.keys())]
def createColorsMat(self):
"""
Create a Material for all defined colors in upy.colors
@rtype: list
@return: the list of the new colors material
"""
Mat = []
for col in colors.cnames:
Mat.append(self.addMaterial(col, eval("colors." + col)))
return Mat
def retrieveColorMat(self, color):
"""
Retrieve a material in the current document from his color (r,g,b), if his
color is defined in upy.colors
@type color: array
@param color: the material color (r,g,b)
@rtype: hostMaterial
@return: the material of color color
"""
if color is None:
return None
mat = self.getMaterial(color)
if mat is not None and type(mat) != list and type(mat) != tuple:
return mat
if len(mat) == 1:
if mat[0] is not None and type(mat[0]) != list and type(mat[0]) != tuple:
return mat[0]
if type(color) == str or type(color) == unicode:
if color in colors.cnames:
if mat is None:
return self.addMaterial(color, eval("colors." + color))
else:
return mat
for col in colors.cnames:
if tuple(color) == eval("colors." + col):
mat = self.getMaterial(col)
if mat is None:
return self.addMaterial(col, eval("colors." + col))
name = "customMat" + str(color[0]) + str(color[1]) + str(color[2])
return self.addMaterial(name.replace(".", ""), color)
def addMaterial(self, name, color, **kw):
"""
Add a material in the current document
* overwrited by children class for each host
@type name: string
@param name: the material name
@type color: array
@param color: the material color (r,g,b)
@rtype: hostMaterial
@return: the new material
"""
return None
def createMaterial(self, name, color, type="Phong", **kw):
# type can be lamber phong etc... need a dictionry
return self.addMaterial(name, color, type=type, **kw)
def assignMaterial(self, object, matname, texture=True, **kw):
"""
Assign the provided material to the object
* overwrited by children class for each host
@type object: hostApp object
@param object: the object
@type matname: string
@param matname: the material name
@type texture: Boolean
@param texture: is the material use a textue
@type kw: dictionary
@param kw: additional keywords options
"""
# verify if the mat exist, if the string.
# apply it to the object
pass
def assignNewMaterial(self, matname, color, type, object):
mat = self.createMaterial(matname, color, type)
self.assignMaterial(object, mat)
def colorMaterial(self, mat, col):
"""
Color a given material using the given color (r,g,b).
* overwrited by children class for each host
@type mat: hostMaterial
@param mat: the material to change
@type col: array
@param col: the color (r,g,b)
"""
# put your code here
# get the materiel if mat is a string for instance
# or verify if the material exist in the current document
# then change his color channel using 'col'
pass
def getMaterial(self, name):
"""
Get the maerial of the given name.
* overwrited by children class for each host
@type name: string
@param name: the name of the desired material
@rtype: hostMaterial
@return: the new material
"""
pass
def getAllMaterials(self):
"""
Get all the maerials of the current scene.
* overwrited by children class for each host
@rtype: list
@return: the list of all materials available
"""
def changeMaterialProperty(self, material, **kw):
"""
Change a material properties.
* overwrited by children class for each host
@type material: string/Material
@param material: the material to modify
@type kw: dictionary
@param kw: propertie to modify with new value
- color
- specular
- ...
"""
mat = self.getMaterial(material)
if mat is None:
return
def getMaterialProperty(self, material, **kw):
"""
Get a material properties.
* overwrited by children class for each host
@type material: string/Material
@param material: the material to modify
@type kw: dictionary
@param kw: propertie to modify with new value
- color
- specular
- ...
"""
mat = self.getMaterial(material)
if mat is None:
return
def colorObject(self, obj, color, **options):
"""
Apply the given color to the given object,
* overwrited by children class for each host
@type obj: string or hostObject
@param obj: the object to be colored
@type color: list
@param color: the color to apply [r,g,b]
@type options: Dictionary
@param options: additional keyword options :
useMaterial : crete a materal with the given color and assign it to the object
useObjectColors : change the color propertie of the object (Viewport)
"""
def changeColor(
self,
obj,
colors,
perVertex=False,
proxyObject=True,
doc=None,
pb=False,
facesSelection=None,
faceMaterial=False,
):
"""
Apply the given set of color to the given object,
if the object is a mesh this function handle the color per vertex.
* overwrited by children class for each host
@type obj: string or hostObject
@param obj: the object to be colored
@type colors: list
@param colors: the list of colors to apply [[r,g,b],[r,g,b],...]
@type perVertex: Boolean
@param perVertex: is it color per Vertex
@type proxyObject: Boolean
@param proxyObject: special keyword for Cinema4D which doesnt support vertex color
@type doc: Scene
@param doc: the current working documents
@type pb: Boolean
@param pb: use the progress bar
@type facesSelection: liste
@param facesSelection: only assign color to the given face selecion
@type faceMaterial: Boolean
@param faceMaterial: assign color per Face
"""
pass
def changeObjColorMat(self, obj, color):
"""
Change the diffuse color of the object material.
* overwrited by children class for each host
@type obj: string or hostObject
@param obj: the object forwhich we want to change e material color
@type color: list
@param color: the new color to apply [r,g,b]
"""
pass
def getMesh(self, name):
"""
Get the mesh of given name
* overwrited by children class for each host
@type name: string
@param name: the name of the deired mesh
@rtype: hostMesh
@return: the mesh
"""
return name
def getLayers(self, scn):
"""
Return a list of active layers of a scene or an object
"""
return []
def setLayers(self, scn, layers):
"""
Set the layers of a scene or an object, expects a list of integers
"""
def checkIsMesh(self, name):
"""
Verify that name correspond to a valid mesh.
* overwrited by children class for each host
@type name: string
@param name: the name of the deired mesh
@rtype: hostMesh
@return: the mesh
"""
return name
def getName(self, object):
"""
Return the name of an host object. Redundant with getObjecName
* overwrited by children class for each host
>>> obj = helper.Sphere("mySphere")
>>> name = helper.getObjectName(obj)
>>> print (name)
mySphere
@type object: hostObject
@param object: an host object
@rtype: string
@return: the name of the host object
"""
def setName(self, object, name):
"""
Set the name of an host object. Redundant with getObjecName
* overwrited by children class for each host
>>> obj = helper.Sphere("mySphere")
>>> name = "mySpinningsphere"
>>> helper.setName(obj,name)
@type object: hostObject
@param object: an host object
@type name: string
@param name: the new name
"""
def reParent(self, objs, parent):
"""
Change the object parent using the specified parent objects
* overwrited by children class for each host
@type objs: hostObject
@param objs: the object or liste of objects to be reparented
@type parent: hostObject
@param parent: the new parent object
"""
pass
def deleteChildrens(self, obj):
"""
Delete recursively all the children of the given object.
@type obj: hostObject
@param obj: the object for which we want to delete the childs
"""
# recursively delete obj and childrenobject
obj = self.getObject(obj)
childs = self.getChilds(obj)
# print childs
if childs:
if type(childs) is list or type(childs) is tuple:
[self.deleteChildrens(ch) for ch in childs]
else:
self.deleteChildrens(childs)
# else :
self.deleteObject(obj)
def constraintLookAt(self, object):
"""
Cosntraint an hostobject to look at the camera.
* overwrited by children class for each host
@type object: Hostobject
@param object: object to constraint
"""
pass
# ===============================================================================
# Basic object
# ===============================================================================
def Text(
self,
name="",
string="",
parent=None,
size=5.0,
pos=None,
font=None,
lookAt=False,
**kw
):
"""
Create a hostobject of type Text.
* overwrited by children class for each host
@type name: string
@param name: name of the circle
@type string: string
@param string: text to display
@type parent: Hostobject
@param parent: parent of the text
@type size: Float
@param size: height of the text
@type pos: Vector
@param pos: position of the text
@type font: ?
@param font: the font to use
@type lookAt: boolean
@param lookAt: either the text is constraint to look at the camera/view
@type kw: dictionary
@param kw: additional keywords options
@rtype: hostObject
@return: the created text object
"""
text = None
print("not supported")
return [text, text]
def Circle(self, name, rad=1.0, **kw):
"""
Create a hostobject of type 2d circle.
* overwrited by children class for each host
@type name: string
@param name: name of the circle
@type rad: float
@param rad: the radius of the cylinder (default = 1.)
@type kw: dictionary
@param kw: additional keywords options
@rtype: hostObject
@return: the created circle
"""
# put the apropriate code here
circle = None
# set name and rad for the circle
return circle
def rerieveAxis(self, axis):
"""
Return the axis from the given array (X,Y or Z +/-).
@type axis: list
@param axis: the aray [x,y,z]
@rtype: string
@return: the axis of the array
"""
dic = {
"+X": [1.0, 0.0, 0.0],
"-X": [-1.0, 0.0, 0.0],
"+Y": [0.0, 1.0, 0.0],
"-Y": [0.0, -1.0, 0.0],
"+Z": [0.0, 0.0, 1.0],
"-Z": [0.0, 0.0, -1.0],
}
axis = [float(int(axis[0])), float(int(axis[1])), float(int(axis[2]))]
for k in dic:
if list(axis) == dic[k]:
return k
def CylinderHeadTails(self, cylinder, **kw):
res = self.getPropertyObject(
cylinder, key=["pos", "rotation", "length", "axis"]
)
if res is None:
return None, None
pos, rot, l, axis = res
h = numpy.array(axis) * l / 2.0
t = numpy.array(axis) * l / 2.0
m = numpy.matrix(rot)
h, t = self.ApplyMatrix([h, t], m.I)
head = numpy.array(pos) + h
tail = numpy.array(pos) - t
# self.ToMat(rot))#invert/transpose he matrix?
return head, tail
def Cylinder(self, name, radius=1.0, length=1.0, res=16, pos=[0.0, 0.0, 0.0], **kw):
"""
Create a hostobject of type cylinder.
* overwrited by children class for each host
@type name: string
@param name: name of the cylinder
@type radius: float
@param radius: the radius of the cylinder
@type length: float
@param length: the length of the cylinder
@type res: float
@param res: the resolution/quality of the cylinder
@type pos: array
@param pos: the position of the cylinder
@type kw: dictionary
@param kw: additional keywords options
@rtype: hostObject,hostMesh
@return: the created cylinder object and mesh
"""
return None, None
def Sphere(self, name, radius=1.0, res=0, pos=[0.0, 0.0, 0.0], **kw):
"""
Create a hostobject of type sphere.
* overwrited by children class for each host
@type name: string
@param name: name of the sphere
@type radius: float
@param radius: the radius of the sphere
@type res: float
@param res: the resolution/quality of the sphere
@type pos: array
@param pos: the position of the cylinder
@type kw: dictionary
@param kw: additional keywords options
@rtype: hostObject,hostMesh
@return: the created sphere object and mesh
"""
return None, None
def getBoxSize(self, name, **kw):
"""
Return the current size in x, y and z of the given Box if applcable
* overwrited by children class for each host
@type name: hostObject
@param name: the Box name
@rtype: 3d vector/list
@return: the size in x y and z
"""
return [1.0, 1.0, 1.0]
def box(
self,
name,
center=[0.0, 0.0, 0.0],
size=[1.0, 1.0, 1.0],
cornerPoints=None,
visible=1,
**kw
):
"""
Create a hostobject of type cube.
* overwrited by children class for each host
@type name: string
@param name: name of the box
@type center: array
@param center: the center of the box
@type size: array
@param size: the size in x y z direction
@type cornerPoints: array list
@param cornerPoints: the upper-left and bottom right corner point coordinates
@type visible: booelan
@param visible: visibility of the cube after creation (deprecated)
@type kw: dictionary
@param kw: additional keywords options
@rtype: hostObject,hostMesh
@return: the created box object and mesh
"""
# put your code
box = None
# set the name 'name'
# if corner is provided compute the cube dimension in x,y,z, and the center
if cornerPoints is not None:
for i in range(3):
size[i] = cornerPoints[1][i] - cornerPoints[0][i]
# position the cube to center
# set the dimension to size
# return the box
return box, None
def updateBox(
self,
box,
center=[0.0, 0.0, 0.0],
size=[1.0, 1.0, 1.0],
cornerPoints=None,
visible=1,
mat=None,
**kw
):
"""
Update the given box.
* overwrited by children class for each host
@type box: string
@param box: name of the box
@type center: array
@param center: the new center of the box
@type size: array
@param size: the new size in x y z direction
@type cornerPoints: array list
@param cornerPoints: the new upper-left and bottom right corner point coordinates
@type visible: booelan
@param visible: visibility of the cube after creation (deprecated)
@type kw: dictionary
@param kw: additional keywords options
"""
# put your code
# set the name 'name'
# if corner is provided compute the cube dimension in x,y,z, and the center
if cornerPoints is not None:
for i in range(3):
size[i] = cornerPoints[1][i] - cornerPoints[0][i]
# position the cube to center
# set the dimension to size
def getCornerPointCube(self, obj):
"""
Return the corner Point of the Given Cube/Box
@type obj: string
@param obj: name of the box
@rtype: array 2x3
@return: the upper-left and bottom right corner point coordinates
"""
size = self.ToVec(self.getBoxSize(obj), pos=False)
# try :
# size = self.ToVec(obj[1100])#this will broke other host!
# except :
# size = self.ToVec(self.getScale(obj))
center = self.ToVec(self.getTranslation(obj))
# print center
cornerPoints = []
# lowCorner
lc = [
center[0] - size[0] / 2.0,
center[1] - size[1] / 2.0,
center[2] - size[2] / 2.0,
]
# upperCorner
uc = [
center[0] + size[0] / 2.0,
center[1] + size[1] / 2.0,
center[2] + size[2] / 2.0,
]
cornerPoints = [[lc[0], lc[1], lc[2]], [uc[0], uc[1], uc[2]]]
return cornerPoints
def spline(self, name, points, close=0, type=1, scene=None, parent=None, **kw):
"""
This will return a hostApp spline/curve object according the given list
of point.
* overwrited by children class for each host
@type name: string
@param name: name for the object
@type points: liste/array vector
@param points: list of position coordinate of the curve point
@type close: bool/int
@param close: is the curve is closed
@type type: int/string
@param type: ususally describe type of curve, ie : bezier, linear, cubic, etc...
@type scene: hostApp scene
@param scene: the current scene
@type parent: hostObject
@param parent: the parent for the curve
@type kw: dictionary
@param kw: additional keywords options
@rtype: hostObject,hostMesh
@return: the created spline object and data
"""
# create the spline
spline = None
# define type, close, name
# set the points
for i, p in enumerate(points):
# set every i point
pass
# add the object to the scene
if scene is not None:
self.addObjectToScene(scene, spline, parent=parent)
return spline, None
def plane(
self,
name,
center=[0.0, 0.0, 0.0],
size=[1.0, 1.0],
cornerPoints=None,
visible=1,
**kw
):
"""
Create a hostobject of type cube.
* overwrited by children class for each host
@type name: string
@param name: name of the plane
@type center: array
@param center: the center of the plane
@type size: array
@param size: the size in x y z direction
@type cornerPoints: array list
@param cornerPoints: the upper-left and bottom right corner point coordinates
@type visible: booelan
@param visible: visibility of the plane after creation (deprecated)
@type kw: dictionary
@param kw: list of additional arguments : "material", subdivision", axis"
@rtype: hostObject,hostMesh
@return: the created plane object and data
"""
# put your code
plane = None
# set the name 'name'
# if corner is provided compute the cube dimension in x,y,z, and the center
if cornerPoints is not None:
for i in range(3):
size[i] = cornerPoints[1][i] - cornerPoints[0][i]
# position the cube to center
# set the dimension to size
# return the box
return plane, None
def update_spline(self, name, coords):
"""
This will update the spline points coordinates
* overwrited by children class for each host
@type name: string
@param name: name for the spline to update
@type coords: liste/array vector
@param coords: list of new position coordinate to apply to the curve point
"""
pass
# ===============================================================================
# Platonic
# ===============================================================================
# this already exist in c4d,overwrite in host if support it
# also exist in glut
def Platonic(self, name, Type, radius, **kw):
"""Generate one of the 5 platonic solid. The name of each figure is derived from its number of faces: respectively "tetra" 4, "hexa" 6, "ocata" 8, "dodeca" 12, and 20.
@type name: string
@param name: name of the platonic
@type Type: string or int
@param Type: type of the platonic, can be tetra" 4, "hexa" 6, "ocata" 8, "dodeca" 12, and "ico" 20.
@type radius: float
@param radius: radius of the embeding sphere
@type kw: dictionary
@param kw: additional arguement such as meterial,parent
@rtype: hostObject
@return: the created platonic
"""
dicT = {
"tetra": self.tetrahedron,
"hexa": self.hexahedron,
"octa": self.octahedron,
"dodeca": self.dodecahedron,
"icosa": self.icosahedron,
} # BuckyBall ?
dicTF = {
4: self.tetrahedron,
6: self.hexahedron,
8: self.octahedron,
12: self.dodecahedron,
20: self.icosahedron,
}
parent = None
if "parent" in kw:
parent = kw["parent"]
if Type in dicT:
v, f, n = dicT[Type](radius)
return self.createsNmesh(name, v, None, f, parent=parent)
elif Type in dicTF:
v, f, n = dicTF[Type](radius)
return self.createsNmesh(name, v, None, f, parent=parent)
else:
return None, None
def tetrahedron(self, radius):
"""
Create the mesh data of a tetrahedron of a given radius
@type radius: float
@param radius: radius of the embeding sphere
@rtype: array
@return: vertex,face, face normal of the tetrahedron
"""
faceIndices = (
(0, 2, 3),
(3, 2, 1), # bot
(3, 1, 4),
(3, 4, 0),
(2, 4, 1),
(2, 0, 4),
)
a = 1 / sqrt(3)
faceNormals = (
(a, a, -a),
(-a, a, -a),
(a, -a, -a),
(-a, -a, -a),
)
_corners = (
(-radius, 0.0, 0.0),
(radius, 0.0, 0.0),
(0.0, radius, 0.0),
(0.0, -radius, 0.0),
(0.0, 0.0, radius),
) # (0.0, 0.0, radius))
return _corners, faceIndices, faceNormals
def Tetrahedron(self, name, radius):
"""
Create the mesh data and the mesh object of a Tetrahedron of a given radius
@type name: string
@param name: name for the spline to update
@type radius: float
@param radius: radius of the embeding sphere
@rtype: Object, Mesh
@return: Tetrahedron Object and Mesh
"""
v, f, n = self.tetrahedron(radius)
ob, obme = self.createsNmesh(name, v, None, f)
return ob, obme
def hexahedron(self, radius):
"""
Create the mesh data of a hexahedron of a given radius
@type radius: float
@param radius: radius of the embeding sphere
@rtype: array
@return: vertex,face, face normal of the hexahedron
"""
faceIndices = (
(3, 2, 0),
(1, 2, 3), # bot
(4, 6, 7),
(7, 6, 5), # top
(0, 2, 6),
(6, 4, 0), # front
(2, 1, 5),
(5, 6, 2), # right
(1, 3, 7),
(7, 5, 1), # rear
(3, 0, 4),
(4, 7, 3), # left
)
faceNormals = None
depth = radius * sin(math.radians(45.0))
_corners = (
(-radius, 0.0, -depth),
(radius, 0.0, -depth),
(0.0, -radius, -depth),
(0.0, radius, -depth),
(-radius, 0.0, depth),
(radius, 0.0, depth),
(0.0, -radius, depth),
(0.0, radius, depth),
)
return _corners, faceIndices, faceNormals
def Hexahedron(self, name, radius):
"""
Create the mesh data and the mesh object of a Hexahedron of a given radius
@type name: string
@param name: name for the spline to update
@type radius: float
@param radius: radius of the embeding sphere
@rtype: Object, Mesh
@return: Hexahedron Object and Mesh
"""
v, f, n = self.hexahedron(radius)
ob, obme = self.createsNmesh(name, v, None, f)
return ob, obme
def octahedron(self, radius):
"""
Create the mesh data of a octahedron of a given radius
@type radius: float
@param radius: radius of the embeding sphere
@rtype: array
@return: vertex,face, face normal of the octahedron
"""
faceIndices = (
(3, 5, 1),
(3, 1, 4),
(3, 4, 0),
(3, 0, 5),
(2, 1, 5),
(2, 4, 1),
(2, 0, 4),
(2, 5, 0),
)
a = 1 / sqrt(3)
faceNormals = (
(a, a, a),
(a, a, -a),
(-a, a, -a),
(-a, a, a),
(a, -a, a),
(a, -a, -a),
(-a, -a, -a),
(-a, -a, a),
)
_corners = (
(-radius, 0.0, 0.0),
(radius, 0.0, 0.0),
(0.0, -radius, 0.0),
(0.0, radius, 0.0),
(0.0, 0.0, -radius),
(0.0, 0.0, radius),
)
return _corners, faceIndices, faceNormals
def Octahedron(self, name, radius):
"""
Create the mesh data and the mesh object of a Octahedron of a given radius
@type name: string
@param name: name for the spline to update
@type radius: float
@param radius: radius of the embeding sphere
@rtype: Object, Mesh
@return: Octahedron Object and Mesh
"""
v, f, n = self.octahedron(radius)
ob, obme = self.createsNmesh(name, v, None, f)
return ob, obme
def dodecahedron(self, radius):
"""
Create the mesh data of a dodecahedron of a given radius
@type radius: float
@param radius: radius of the embeding sphere
@rtype: array
@return: vertex,face, face normal of the dodecahedron
"""
# from http://www.cs.umbc.edu/~squire/reference/polyhedra.shtml#dodecahedron
vertices = [] # ; /* 20 vertices with x, y, z coordinate */
Pi = math.pi
phiaa = 52.62263590
# /* the two phi angles needed for generation */
phibb = 10.81231754
r = radius
# /* any radius in which the polyhedron is inscribed */
phia = Pi * phiaa / 180.0
# /* 4 sets of five points each */
phib = Pi * phibb / 180.0
phic = Pi * (-phibb) / 180.0
phid = Pi * (-phiaa) / 180.0
the72 = Pi * 72.0 / 180
theb = the72 / 2.0 # ; /* pairs of layers offset 36 degrees */
the = 0.0
for i in range(5):
x = r * cos(the) * cos(phia)
y = r * sin(the) * cos(phia)
z = r * sin(phia)
vertices.append([x, y, z])
the = the + the72
the = 0.0
for i in range(5, 10): # (i=5; i<10; i++)
x = r * cos(the) * cos(phib)
y = r * sin(the) * cos(phib)
z = r * sin(phib)
vertices.append([x, y, z])
the = the + the72
the = theb
for i in range(10, 15): # for(i=10; i<15; i++)
x = r * cos(the) * cos(phic)
y = r * sin(the) * cos(phic)
z = r * sin(phic)
vertices.append([x, y, z])
the = the + the72
the = theb
for i in range(15, 20): # for(i=15; i<20; i++)
x = r * cos(the) * cos(phid)
y = r * sin(the) * cos(phid)
z = r * sin(phid)
vertices.append([x, y, z])
the = the + the72
# /* map vertices to 12 faces */
# these are the ngons
ngonsfaces = (
(0, 1, 2, 3, 4),
(0, 1, 6, 10, 5),
(1, 2, 7, 11, 6),
(2, 3, 8, 12, 7),
(3, 4, 9, 13, 8),
(4, 0, 5, 14, 9),
(15, 16, 11, 6, 10),
(16, 17, 12, 7, 11),
(17, 18, 13, 8, 12),
(18, 19, 14, 9, 13),
(19, 15, 10, 5, 14),
(15, 16, 17, 18, 19),
)
# triangualte
faces = []
# wrong sense for some face
for i, f in enumerate(ngonsfaces):
# create three faces from one ngonsface
if i in [1, 2, 3, 4, 5, len(ngonsfaces) - 1]: # last one
faces.append([f[2], f[1], f[0]])
faces.append([f[2], f[0], f[3]])
faces.append([f[3], f[0], f[4]])
else:
faces.append([f[0], f[1], f[2]])
faces.append([f[3], f[0], f[2]])
faces.append([f[4], f[0], f[3]])
return vertices, faces, None
def Dodecahedron(self, name, radius):
"""
Create the mesh data and the mesh object of a Dodecahedron of a given radius
@type name: string
@param name: name for the spline to update
@type radius: float
@param radius: radius of the embeding sphere
@rtype: Object, Mesh
@return: Dodecahedron Object and Mesh
"""
v, f, n = self.dodecahedron(radius)
ob, obme = self.createsNmesh(name, v, None, f)
return ob, obme
def icosahedron(self, radius):
"""
Create the mesh data of a icosahedron of a given radius
@type radius: float
@param radius: radius of the embeding sphere
@rtype: array
@return: vertex,face, face normal of the icosahedron
"""
# from http://www.cs.umbc.edu/~squire/reference/polyhedra.shtml#dodecahedron
vertices = [] # ; /* 20 vertices with x, y, z coordinate */
Pi = math.pi
phiaa = 26.56505
# /* the two phi angles needed for generation */
r = radius
# /* any radius in which the polyhedron is inscribed */
phia = Pi * phiaa / 180.0
# /* 2 sets of four points */
theb = Pi * 36.0 / 180.0
# /* offset second set 36 degrees */
the72 = Pi * 72.0 / 180
# /* step 72 degrees */
vertices.append([0.0, 0.0, r])
the = 0.0
for i in range(1, 6):
x = r * cos(the) * cos(phia)
y = r * sin(the) * cos(phia)
z = r * sin(phia)
vertices.append([x, y, z])
the = the + the72
the = theb
for i in range(6, 11): # for(i=10; i<15; i++)
x = r * cos(the) * cos(-phia)
y = r * sin(the) * cos(-phia)
z = r * sin(-phia)
vertices.append([x, y, z])
the = the + the72
vertices.append([0.0, 0.0, -r])
# /* map vertices to 12 faces */
# these are the ngons
faces = [
[0, 1, 2],
[0, 2, 3],
[0, 3, 4],
[0, 4, 5],
[0, 5, 1],
[7, 6, 11],
[8, 7, 11],
[9, 8, 11],
[10, 9, 11],
[6, 10, 11],
[6, 2, 1],
[7, 3, 2],
[8, 4, 3],
[9, 5, 4],
[10, 1, 5],
[6, 7, 2],
[7, 8, 3],
[8, 9, 4],
[9, 10, 5],
[10, 6, 1],
]
return vertices, faces, None
def Icosahedron(self, name, radius):
"""
Create the mesh data and the mesh object of a Icosahedron of a given radius
@type name: string
@param name: name for the spline to update
@type radius: float
@param radius: radius of the embeding sphere
@rtype: Object, Mesh
@return: Icosahedron Object and Mesh
"""
v, f, n = self.icosahedron(radius)
ob, obme = self.createsNmesh(name, v, None, f)
return ob, obme
def MidPoint(self, p1, p2):
return [(p1[0] + p2[0]) / 2.0, (p1[1] + p2[1]) / 2.0, (p1[2] + p2[2]) / 2.0]
def createUnitSphereData(self, iterations):
"""from http://paulbourke.net/geometry/circlesphere/csource2.c"""
i = 0
j = 0
n = 0
nstart = 0
vertices = []
p1 = self.normalize((1.0, 1.0, 1.0))
p2 = self.normalize((-1.0, -1.0, 1.0))
p3 = self.normalize((1.0, -1.0, -1.0))
p4 = self.normalize((-1.0, 1.0, -1.0))
vertices.extend([p1, p2, p3, p4])
allfacets = int(math.pow(4, iterations))
facets = numpy.zeros((allfacets, 3), "int")
facets[0] = [0, 1, 2] # p1; facets[0].p2 = p2; facets[0].p3 = p3;
facets[1] = [0, 1, 3] # .p1 = p2; facets[1].p2 = p1; facets[1].p3 = p4;
facets[2] = [1, 3, 2] # .p1 = p2; facets[2].p2 = p4; facets[2].p3 = p3;
facets[3] = [0, 2, 3] # .p1 = p1; facets[3].p2 = p3; facets[3].p3 = p4;
n = 4
for i in range(1, iterations): # (i=1;i<iterations;i++) {
nstart = n
for j in range(nstart): # (j=0;j<nstart;j++) {
# /* Create initially copies for the new facets */
facets[n] = facets[j]
facets[n + 1] = facets[j]
facets[n + 2] = facets[j]
# /* Calculate the midpoints */
p1 = self.MidPoint(vertices[facets[j][0]], vertices[facets[j][1]])
p2 = self.MidPoint(vertices[facets[j][1]], vertices[facets[j][2]])
p3 = self.MidPoint(vertices[facets[j][2]], vertices[facets[j][0]])
vertices.extend([p1, p2, p3])
ip1 = len(vertices) - 3
ip2 = len(vertices) - 2
ip3 = len(vertices) - 1
# /* Replace the current facet */
facets[j][1] = ip1
facets[j][2] = ip3
# /* Create the changed vertices in the new facets */
facets[n][0] = ip1
facets[n][2] = ip2
facets[n + 1][0] = ip3
facets[n + 1][1] = ip2
facets[n + 2][0] = ip1
facets[n + 2][1] = ip2
facets[n + 2][2] = ip3
n += 3
vertices = [self.normalize(v) for v in vertices]
return vertices, facets
def unitSphere(self, name, iterations, radius):
"""
Create the mesh data and the mesh object of a Sphere of a given radius
@type name: string
@param name: name for the sphere
@type iterations: int
@param iterations: resolution
@type radius: float
@param radius: radius of the sphere
@rtype: Object, Mesh
@return: Sphere Object and Mesh
"""
v, f = self.createUnitSphereData(iterations)
ob, obme = self.createsNmesh(name, numpy.array(v) * radius, None, f)
return ob, obme
def reporthook(self, count, blockSize, totalSize):
percent = float(count * blockSize / totalSize)
self.progressBar(percent, "Downloading...")
if percent >= 1.0:
self.resetProgressBar()
def progressBar(self, progress, label):
"""
Update the progress bar status by progress value and label string
* overwrited by children class for each host
@type progress: Int/Float
@param progress: the new progress
@type label: string
@param label: the new message to put in the progress status
"""
pass
def resetProgressBar(self, value=None):
"""
Reset the Progress Bar, using value
* overwrited by children class for each host
"""
pass
# ===============================================================================
# Texture Mapping / UV
# ===============================================================================
def getUVs(self):
"""
Reset the Progress Bar, using value
* overwrited by children class for each host
"""
pass
def setUVs(self):
"""
Reset the Progress Bar, using value
* overwrited by children class for each host
"""
pass
def getUV(self, object, faceIndex, vertexIndex, perVertice=True):
"""
Return the UV coordinate of the given object according faceIndex and vertexIndex
* overwrited by children class for each host
@type object: string/hostObject
@param object: the object from which we want the UV
@type faceIndex: list
@param faceIndex: the liste of face index for which we want the UV
@type vertexIndex: list
@param vertexIndex: the liste of vertex index for which we want the UV
@type perVertice: Boolean
@param perVertice: UV coordinate access per verticer or per face
@rtype: list
@return: the list of UV coordinates for the given object according faceIndex and vertexIndex
"""
pass
def setUV(self, object, faceIndex, vertexIndex, uv, perVertice=True):
"""
Update/Set the UV coordinate of the given object according faceIndex and vertexIndex
* overwrited by children class for each host
@type object: string/hostObject
@param object: the object from which we want to update the UV
@type faceIndex: list
@param faceIndex: the liste of face index for which we want to update the UV
@type vertexIndex: list
@param vertexIndex: the liste of vertex index for which we want to update the UV
@type uv: list
@param uv: the new uv coordinate [i,j]
@type perVertice: Boolean
@param perVertice: UV coordinate access per verticer or per face
"""
pass
# ===============================================================================
# mesh
# ===============================================================================
def findClosestPoint(self, point, object, transform=True):
"""
Find the closest vertices to the given 3d points in Python implementation
@type point: 3 points
@param point: the point to look up
@type object: hostObj/hostMesh/String
@param object: the object to scan for closest vertices
@type transform: Boolean
@param transform: take in account the object transformation or not
@rtype :list
@return : the minimal distance found and the closest vertices in the given polygon
"""
# python lookup for closest point,probably not the fastest way
vertices = self.getMeshVertices(object)
if transform:
mat = self.getTransformation(object)
vertices = self.ApplyMatrix(vertices, self.ToMat(mat))
# bhtree?
mini = 9999.0
miniv = vertices[0]
for v in range(len(vertices)):
d = self.measure_distance(vertices[v], point)
if d < mini:
mini = d
miniv = vertices[v]
return mini, miniv
def ToMat(self, mat):
"""
Return a python (4,4) matrice array from a host matrice
* overwrited by children class for each host
@type mat: host matrice array
@param mat: host matrice array
@rtype: matrice
@return: the converted matrice array
"""
return mat
def ToVec(self, v, pos=False):
"""
Return a python xyz array from a host xyz array/vector
* overwrited by children class for each host
@type v: host vector array
@param v: host vector array
@rtype: array
@return: the converted vector array
"""
return v
# ===============================================================================
# Advanced Objects
# ===============================================================================
def createsNmesh(
self,
name,
vertices,
vnormals,
faces,
smooth=False,
material=None,
proxyCol=False,
color=[
[1, 0, 0],
],
**kw
):
"""
Function that generate a Polygon object from the given vertices, face and normal.
material or color can be passed and apply to the created polygon.
Return the object and the mesh.
* overwrited by children class for each host
@type name: string
@param name: name of the pointCloud
@type vertices: list
@param vertices: the list of vertices
@type vnormals: list
@param vnormals: the list of vertices normal
@type faces: list
@param faces: the list of normal
@type smooth: string
@param smooth: smooth the mesh or not
@type material: hostMaterial
@param material: the material to apply to the mesh object
@type proxyCol: Boolean
@param proxyCol: special option for C4D DEPRECATED
@type color: list
@param color: color to apply to the mesh object
@type kw: dictionary
@param kw: dictionary of arg options, ie :
'parent' hostAp parent object
@rtype: hostObj/hostMesh
@return: the polygon object and data
"""
pass
def PointCloudObject(self, name, **kw):
"""
This function create a special polygon which have only point.
See createsNmesh or particul if the hostdoes no support only point mesh
* overwrited by children class for each host
@type name: string
@param name: name of the pointCloud
@type kw: dictionary
@param kw: dictionary of arg options, ie :
'vertices' array of coordinates ;
'faces' int array of faces ;
'parent' hostAp parent object
@rtype: hostApp obj
@return: the polygon object and data
"""
return None, None
def addBone(
self,
i,
armData,
headCoord,
tailCoord,
roll=10,
hR=0.5,
tR=0.5,
dDist=0.4,
boneParent=None,
name=None,
editMode=True,
**kw
):
"""
Add one bone to an armature.
Optional function for creation of the armature
* overwrited by children class for each host
@type i: int
@param i: indice for the new bone
@type armData: armature host data
@param armData: the armature
@type headCoord: array xyz
@param headCoord: coordinate of the head of the bone
@type tailCoord: array xyz
@param tailCoord: coordinate of the tail of the bone
@type boneParent: bone
@param boneParent: the parent for the created bone
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: bone
@return: the created bone
"""
eb = None
return eb
def updateArmature(self, basename, x, listeName=None, scn=None, root=None, **kw):
pass
def armature(self, name, coords, **kw):
"""
Create an armature along the given coordinates
* overwrited by children class for each host
@type name: string
@param name: name of the armature object
@type coords: list of array xyz
@param coords: coordinate foreach bone
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: host Object,list of bone
@return: the created armature and the created bones
"""
print("not supported")
return None, None
# return armObj,bones
def oneMetaBall(self, metab, rad, coord, **kw):
"""
Add one ball to a metaball object.
Optional function for creation of the metaball
* overwrited by children class for each host
@type metab: metaball host data
@param metab: the metaball
@type rad: float
@param rad: radius for the new ball
@type coord: array xyz
@param coord: coordinate of the ball
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: ball/None
@return: the ball or None
"""
pass
def metaballs(self, name, listePt, listeR, **kw):
"""
Create a metaballs along the given coordinates
* overwrited by children class for each host
@type name: string
@param name: name of the metaballs object
@type listePt: list of array xyz
@param listePt: coordinate foreach bone
@type listeR: list of float
@param listeR: radius foreach ball
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: host Object,list of bone/metaball data
@return: the created metaballs,the created ball
"""
return None, None
# ==============================================================================
# Particle
# ==============================================================================
def particle(
self,
name,
coords,
group_name=None,
radius=None,
color=None,
hostmatrice=None,
**kw
):
"""
Create a particle system along the given coordinates
* overwrited by children class for each host
@type name: string
@param name: name of the particle system
@type coords: list of array xyz
@param coords: coordinate foreach particle
@type radius: list of float
@param radius: radius foreach particle
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: host Object,list of bone/metaball data
@return: the created metaballs,the created ball
"""
pass
def updateParticles(self, newPos, PS=None, **kw):
"""
Update the particle system along the given new coordinates.
remove or add particle.
* overwrited by children class for each host
@type newPos: list of array xyz
@param newPos: coordinate foreach particle
@type PS: Particle object
@param PS: the particle system
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def getParticles(self, name, **kw):
"""
Return a particle system along the given name
* overwrited by children class for each host
@type name: string
@param name: name of the particle system
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: host Object particle data
@return: the created particle
"""
return None
def setParticulesPosition(self, newPos, PS=None, **kw):
"""
Update he particle position of a particle system along the given new coordinates
* overwrited by children class for each host
@type newPos: list of array xyz
@param newPos: coordinate foreach particle
@type PS: Particle object
@param PS: the particle system
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def getParticulesPosition(self, PS=None, **kw):
"""
Get the particle position of a particle system
* overwrited by children class for each host
@type PS: Particle object
@param PS: the particle system
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list of array xyz
@return: coordinate foreach particle
"""
pass
# ===============================================================================
# Mesh Function
# ===============================================================================
def getMeshVertice(self, poly, vertex_indice, **kw):
"""
Get the vertices of the given polygon object data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want the vertices
@type vertex_indice: int
@param vertex_indice: return only the give vertice coordinates
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list of float xyz
@return: coordinate for one vertice of the given object
"""
pass
def getMeshVertices(self, poly, selected=False, **kw):
"""
Get the vertices of the given polygon object data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want the vertices
@type selected: Boolean
@param selected: return only the selected vertices or not
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list of array xyz
@return: coordinate for all or for selected vertices of the given object
"""
pass
def getMeshNormales(self, poly, selected=False, **kw):
"""
Get the normals of the given polygon object data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want the normals
@type selected: Boolean
@param selected: return only the selected normals or not
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list of array xyz
@return: coordinate for all or for selected normals of the given object
"""
pass
def getMeshEdge(self, hostedge, **kw):
"""
Convert the host edge in python format
* overwrited by children class for each host
@type hostedge: hostEdge
@param hostedge: the edge to conver to python
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list
@return: the edge in python format
"""
pass
def getMeshEdges(self, poly, selected=False, **kw):
"""
Get the edges of the given polygon object data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want the edges
@type selected: Boolean
@param selected: return only the selected edges or not
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list
@return: all or selected edges of the given object
"""
pass
def getFaceEdges(self, poly, faceindice, selected=False, **kw):
"""
Get the edges of the given face object data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want the edges of the face
@type faceindice: int
@param faceindice: the face indice
@type selected: Boolean
@param selected: return only the selected edges or not
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list
@return: all or selected edges of the given face
"""
pass
def getFace(self, hostface, r=True, **kw):
"""
Convert the face edge in python format
* overwrited by children class for each host
@type hostface: hostFace
@param hostface: the face to convert to python
@type kw: dictionary
@param kw: dictionary of arg options.
- r=True : Cinema4D reverse face order
@rtype: list
@return: the face in python format [i,j,k]
"""
pass
def getFaces(self, object, selected=False, **kw):
"""
Get the faces of the given polygon object data
* overwrited by children class for each host
@type object: hostObject
@param object: the object from which we want the faces
@type selected: Boolean
@param selected: return only the selected faces or not
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list
@return: all or selected faces of the given object
"""
pass
def getMeshFaces(self, poly, selected=False, **kw):
"""
Get the faces of the given polygon object data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want the faces
@type selected: Boolean
@param selected: return only the selected faces or not
@type kw: dictionary
@param kw: dictionary of arg options
@rtype: list
@return: all or selected faces of the given object
"""
return self.getFaces(poly, selected=selected, **kw)
def setMeshVertice(
self, poly, vertice_indice, vertice_coordinate, select=True, **kw
):
"""
Set the vertice for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to set the vertice
@type vertice_indice: int
@param vertice_indice: vertice indice
@type vertice_coordinate: list<float>[3]
@param vertice_coordinate: x y z coordinate for vertice vertice_indice
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def setMeshVertices(
self, poly, vertices_coordinates, vertices_indices=None, select=True, **kw
):
"""
Set the vertices for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to set the vertices
@type vertices_coordinates: list<float>[3]
@param vertices_coordinates: x y z coordinates for all vertice or vertices_indices
@type vertices_indices: array<int>
@param vertices_indices: list of vertices indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def setMeshFace(self, obj, faceindce, face_vertices_indices, select=True, **kw):
"""
Set the face for the given face
* overwrited by children class for each host
@type obj: hostObject
@param obj: the object from which we want to set the face
@type faceindce: int
@param faceindce: the face indice
@type vertices_indices: array<int>
@param vertices_indices: list of vertices indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def setMeshFaces(self, obj, faces_vertices_indices, faces=None, select=True, **kw):
"""
Set the faces for the given mesh data
* overwrited by children class for each host
@type obj: hostObject
@param obj: the object from which we want to set the faces
@type faces_vertices_indices: list<array<int>>
@param faces_vertices_indices: list of faces vertices indices
@type faces: array<int>
@param faces: list of faces indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def setMeshEdge(self, obj, edgeindce, edge_vertices_indices, select=True, **kw):
"""
Set the edge for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to set the edge
@type edgeindce: int
@param edgeindce: egde indice
@type edge_vertices_indices: array<int>
@param edge_vertices_indices: list of edge vertices indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def setMeshEdges(self, obj, edges_vertices_indices, edges, select=True, **kw):
"""
Set the edges selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the edges
@type edge_vertices_indices: list<array<int>>
@param edge_vertices_indices: list of edges vertices indices
@type edges: array<int>
@param edges: list of edges indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def addMeshVertice(self, poly, vertice_coordinate, **kw):
"""
Add the vertice for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to add the vertice
@type vertice_coordinate: list<float>[3]
@param vertice_coordinate: x y z coordinate for the new vertice
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def addMeshVertices(self, poly, vertices_coordinates, vertices_indices=None, **kw):
"""
Add the vertices for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to add the vertices
@type vertices_coordinates: list<float>[3]
@param vertices_coordinates: x y z coordinates for all vertice or vertices_indices
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def addMeshFace(self, obj, face_vertices_indices, **kw):
"""
Add the face for the given face
* overwrited by children class for each host
@type obj: hostObject
@param obj: the object from which we want to add the face
@type vertices_indices: array<int>
@param vertices_indices: list of vertices indices
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def addMeshFaces(self, obj, faces_vertices_indices, **kw):
"""
Add the faces for the given mesh data
* overwrited by children class for each host
@type obj: hostObject
@param obj: the object from which we want to add the faces
@type faces_vertices_indices: list<array<int>>
@param faces_vertices_indices: list of faces vertices indices
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def addMeshEdge(self, obj, edge_vertices_indices, **kw):
"""
Set the edge for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to add the edge
@type edge_vertices_indices: array<int>
@param edge_vertices_indices: list of edge vertices indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def addMeshEdges(self, obj, edges_vertices_indices, **kw):
"""
Add the edges selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to add the edges
@type edge_vertices_indices: list<array<int>>
@param edge_vertices_indices: list of edges vertices indices
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def selectVertice(self, poly, vertice_indice, select=True, **kw):
"""
Set the vertice selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the vertice
@type vertice_indice: int
@param vertice_indice: vertice indice
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def selectVertices(self, poly, vertices_indices, select=True, **kw):
"""
Set the vertices selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the vertices
@type vertices_indices: array<int>
@param vertices_indices: list of vertices indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def selectFace(self, obj, faceindce, select=True, **kw):
"""
Set the selecion status for the given face
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the face
@type faceindce: int
@param faceindce: the face indice
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def selectFaces(self, obj, faces, select=True, **kw):
"""
Set the faces selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the faces
@type faces: array<int>
@param faces: list of faces indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def selectEdge(self, obj, edgeindce, select=True, **kw):
"""
Set the edge selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the edge
@type edgeindce: int
@param edgeindce: egde indice
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def selectEdges(self, obj, edges, select=True, **kw):
"""
Set the edges selecion status for the given mesh data
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to select the edges
@type edges: array<int>
@param edges: list of edges indices
@type select: Boolean
@param select: select status
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def deleteMeshVertices(self, poly, vertices=None, select=False, **kw):
"""
Delete the give vertices indices
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to delete the vertices
@type faces: array<int>
@param faces: list of vertices indices or None for all
@type select: Boolean
@param select: delete selected faces
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def deleteMeshFaces(self, poly, faces=None, select=False, **kw):
"""
Delete the give faces indices
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to delete the faces
@type faces: array<int>
@param faces: list of faces indices or None for all
@type select: Boolean
@param select: delete selected faces
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def deleteMeshEdges(self, poly, edges=None, select=False, **kw):
"""
Delete the give edges indices
* overwrited by children class for each host
@type poly: hostObject
@param poly: the object from which we want to delete the edges
@type faces: array<int>
@param faces: list of edges indices or None for all
@type select: Boolean
@param select: delete selected faces
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def getFacesfromV(self, vindice, faces):
# print vindice
ifaces = []
indfaces = []
for i, f in enumerate(faces):
# print (vindice, f)
if vindice in f:
# print "OK"
ifaces.append(f)
indfaces.append(i)
return indfaces, ifaces
def getFaceNormalsArea(self, vertices, faces):
"""compute the face normal of the compartment mesh"""
normals = []
vnormals = numpy.array(vertices[:])
areas = [] # added by Graham
face = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
v = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for f in faces:
for i in range(3):
face[i] = vertices[f[i]]
for i in range(3):
v[0][i] = face[1][i] - face[0][i]
v[1][i] = face[2][i] - face[0][i]
normal = vcross(v[0], v[1])
n = vlen(normal)
if n == 0.0:
n1 = 1.0
else:
n1 = 1.0 / n
normals.append((normal[0] * n1, normal[1] * n1, normal[2] * n1))
# The area of a triangle is equal to half the magnitude of the cross product of two of its edges
for i in range(3):
vnormals[f[i]] = [normal[0] * n1, normal[1] * n1, normal[2] * n1]
areas.append(0.5 * vlen(normal)) # added by Graham
return vnormals, normals, areas
def FixNormals(self, v, f, vn, fn=None):
newnormals = []
for indice, vertex in enumerate(v):
ifaces, faces = self.getFacesfromV(indice, f)
n = []
# print len(faces)
for i, af in enumerate(faces):
if fn is not None:
n.append(fn[ifaces[i]])
else:
for iv in af:
n.append(vn[iv])
nn = numpy.average(numpy.array(n), 0)
# print nn
newnormals.append(nn)
return newnormals
def normalize_v3(self, arr):
"""Normalize a numpy array of 3 component vectors shape=(n,3)"""
# return self.unit_vector(arr,axis=1)
# lens = numpy.linalg.norm(arr,axis=1)
lens = numpy.sqrt(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2)
arr[:, 0] /= lens
arr[:, 1] /= lens
arr[:, 2] /= lens
return arr
def normal_array(self, vertices, faces):
vertices = numpy.array(vertices)
faces = numpy.array(faces)
# Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal
norm = numpy.zeros(vertices.shape, dtype=vertices.dtype)
# Create an indexed view into the vertex array using the array of three indices for triangles
tris = vertices[faces]
# Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle
n = numpy.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
# n is now an array of normals per triangle. The length of each normal is dependent the vertices, # we need to normalize these, so that our next step weights each normal equally.normalize_v3(n)
# now we have a normalized array of normals, one per triangle, i.e., per triangle normals.
# But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle,
# the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards.
# The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array
norm[faces[:, 0]] += n
norm[faces[:, 1]] += n
norm[faces[:, 2]] += n
return self.normalize_v3(norm)
def matrixToVNMesh(
self, name, matrices, vector=[0.0, 1.0, 0.0], transpose=True, **kw
): # edge size ?
"""convert liste of matrix (rotation/position) to point mesh in order to use cloner / dupliVert"""
pass
def matrixToFacesMesh(
self, name, matrices, vector=[0.0, 1.0, 0.0], transpose=True, **kw
): # edge size ?
"""convert liste of matrix (rotation/position) to quad mesh in order to use cloner / dupliFace"""
pass
def toggle(self, variable, value):
pass
# Quad
# 0 1
# 3 2
# Tri
# 0 1 3
# 3 1 2
# OR
# Quad A B C D
# Triangles A B C / A C D
# Triangles A B D / D B C (compare A-C B-D)
def triangulateFace(self, f, vertices):
A = vertices[f[0]]
B = vertices[f[1]]
C = vertices[f[2]]
D = vertices[f[3]]
a = self.measure_distance(A, C)
b = self.measure_distance(B, D)
if a < b:
return [[f[0], f[1], f[2]], [f[0], f[2], f[3]]]
else:
return [[f[0], f[1], f[3]], [f[3], f[1], f[3]]]
def triangulateFaceArray(self, faces):
trifaces = []
for f in faces:
if len(f) == 2:
trifaces.append([f[0], f[1], f[1]])
elif len(f) == 3:
trifaces.append(f)
elif len(f) == 4: # triangulate
f1 = [f[0], f[1], f[3]]
f2 = [f[3], f[1], f[2]]
trifaces.extend([f1, f2])
return trifaces
# from pymunk.vec2d import Vec2d
# "hidden" functions
def _is_corner(self, a, b, c):
# returns if point b is an outer corner
return not (is_clockwise([a, b, c]))
def _point_in_triangle(self, p, a, b, c):
# measure area of whole triangle
whole = abs(calc_area([a, b, c]))
# measure areas of inner triangles formed by p
parta = abs(calc_area([a, b, p]))
partb = abs(calc_area([b, c, p]))
partc = abs(calc_area([c, a, p]))
# allow for potential rounding error in area calcs
# (not that i've encountered one yet, but just in case...)
thresh = 0.0000001
# return if the sum of the inner areas = the whole area
return (parta + partb + partc) < (whole + thresh)
def _get_ear(self, poly):
count = len(poly)
# not even a poly
if count < 3:
return [], []
# only a triangle anyway
if count == 3:
return poly, []
# start checking points
for i in range(count):
ia = (i - 1) % count
ib = i
ic = (i + 1) % count
a = poly[ia]
b = poly[ib]
c = poly[ic]
# is point b an outer corner?
if self._is_corner(a, b, c):
# are there any other points inside triangle abc?
valid = True
for j in range(count):
if not (j in (ia, ib, ic)):
p = poly[j]
if self._point_in_triangle(p, a, b, c):
valid = False
# if no such point found, abc must be an "ear"
if valid:
remaining = []
for j in range(count):
if j != ib:
remaining.append(poly[j])
# return the ear, and what's left of the polygon after the ear is clipped
return [a, b, c], remaining
# no ear was found, so something is wrong with the given poly (not anticlockwise? self-intersects?)
return [], []
# major functions
def _triangulate(self, poly):
"""
triangulates poly and returns a list of triangles
poly: list of points that form an anticlockwise polygon (self-intersecting polygons won't work, results are... undefined)
"""
triangles = []
remaining = poly[:]
# while the poly still needs clipping
while len(remaining) > 2:
# rotate the list:
# this stops the starting point from getting stale which sometimes a "fan" of polys, which often leads to poor convexisation
remaining = remaining[1:] + remaining[:1]
# clip the ear, store it
ear, remaining = self._get_ear(remaining)
if ear != []:
triangles.append(ear)
# return stored triangles
return triangles
def triangulate(self, poly, **kw):
"""
Convert quad to triangle the selected face of the given polygon object.
* overwrited by children class for each host
@type poly: hostObj
@param poly: the object to triangulate
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def recalc_normals(self, obj, **kw):
"""
Recalcul normals mesh outside/inside
* overwrited by children class for each host
@type poly: hostObj
@param poly: the object to change the normal
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def IndexedPolgonsToTriPoints(self, geom, transform=True, **kw):
"""
Convert DejaVu IndexPolygon vertices data in a python list.
* overwrited by children class for each host
@type geom: DejaVu IndexedPolygon
@param geom: the object to triangulate
@type transform: Boolean
@param transform: apply the object transformation to the vertices
@type kw: dictionary
@param kw: dictionary of arg options
@rtype : list
@return : the vertices data as list
"""
verts = self.getMeshVertices(geom)
tri = self.getMeshFaces(geom)
assert tri.shape[1] == 3
if transform:
mat = self.getTransformation(geom)
verts = self.ApplyMatrix(verts, mat)
triv = []
for t in tri:
triv.append([verts[i].tolist() for i in t])
return triv
# ==============================================================================
# Object Properties function
# ==============================================================================
# object or scene property ?
def getPropertyObject(self, obj, key=["radius"]):
"""
Return the property "key" of the object obj
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@rtype : int, float, str, dict, list
@return : the property value
"""
return None
def setPropertyObject(self, obj, key, value):
"""
Create a property "key" for the object obj and set his value
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@type value: int, float, str, dict, list
@param value: the value of the property
"""
pass
# ==============================================================================
# Properties function
# ==============================================================================
# object or scene property ?
def getProperty(self, obj, key):
"""
Return the property "key" of the object obj
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@rtype : int, float, str, dict, list
@return : the property value
"""
return None
def setProperty(self, obj, key, value):
"""
Create a property "key" for the object obj and set his value
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@type value: int, float, str, dict, list
@param value: the value of the property
"""
pass
# ===============================================================================
# Advanced Function
# ===============================================================================
def pathDeform(self, *args, **kw):
"""
Should create a modifierfor the given object using the given path/curve/spline
TO DO.
* overwrited by children class for each host
@type args: list
@param args: list of arguments options
@type kw: dictionary
@param kw: dictionary of arguments options
"""
pass
def updatePathDeform(self, *args, **kw):
"""
Should update the modifierfor the given object using the given path/curve/spline
TO DO.
* overwrited by children class for each host
@type args: list
@param args: list of arguments options
@type kw: dictionary
@param kw: dictionary of arguments options
"""
pass
# ===============================================================================
# numpy dependant function
# we should have alternative from the host
# overwrite if possible by the host math module
# ===============================================================================
def vector_norm(self, data, axis=None, out=None):
"""
Return length, i.e. eucledian norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3), dtype=numpy.float64)
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1.0])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
@classmethod
def unit_vector(self, data, axis=None, out=None):
"""
Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data * data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def getAngleAxis(self, vec1, vec2):
"""
Return angle (radians) and axis of rotation between two given vectors.
"""
angle = self.angle_between_vectors(vec1, vec2)
cr = numpy.cross(vec1, vec2)
axis = self.unit_vector(cr)
return angle, axis
def scalar(self, v1, v2):
"""
calculates the scalar product of two vectors
v1 and v2 are numpy.array objects.
returns a float for a one-dimensional array.
"""
return numpy.sum(v1 * v2)
def dihedral(self, v1, v2, v3, v4):
"""
Returns a float value for the dihedral angle between
the four vectors. They define the bond for which the
torsion is calculated (~) as:
V1 - V2 ~ V3 - V4
The vectors vec1 .. vec4 can be array objects, lists or tuples of length
three containing floats.
For Scientific.geometry.Vector objects the behavior is different
on Windows and Linux. Therefore, the latter is not a featured input type
even though it may work.
If the dihedral angle cant be calculated (because vectors are collinear),
the function raises a DihedralGeometryError
"""
# create array instances.
# numpy.array
# v1,v2,v3,v4 =create_vectors(vec1,vec2,vec3,vec4)
all_vecs = [v1, v2, v3, v4]
# rule out that two of the atoms are identical
# except the first and last, which may be.
for i in range(len(all_vecs) - 1):
for j in range(i + 1, len(all_vecs)):
if i > 0 or j < 3: # exclude the (1,4) pair
equals = all_vecs[i] == all_vecs[j]
if equals.all():
return None # raise DihedralGeometryError(\
# "Vectors #%i and #%i may not be identical!"%(i,j))
# calculate vectors representing bonds
v12 = v2 - v1
v23 = v3 - v2
v34 = v4 - v3
# calculate vectors perpendicular to the bonds
normal1 = numpy.cross(v12, v23)
normal2 = numpy.cross(v23, v34)
# check for linearity
if numpy.linalg.norm(normal1) == 0 or numpy.linalg.norm(normal2) == 0:
return None # raise DihedralGeometryError(\
# "Vectors are in one line; cannot calculate normals!")
# normalize them to length 1.0
normal1 = normal1 / numpy.linalg.norm(normal1)
normal2 = normal2 / numpy.linalg.norm(normal2)
# calculate torsion and convert to degrees
torsion = math.degrees(
self.angle_between_vectors(normal1, normal2)
) # * 180.0/pi
# take into account the determinant
# (the determinant is a scalar value distinguishing
# between clockwise and counter-clockwise torsion.
if self.scalar(normal1, v34) >= 0:
return torsion
else:
torsion = 360 - torsion
if torsion == 360:
torsion = 0.0
return torsion
@classmethod
def rotation_matrix(self, angle, direction, point=None, trans=None):
"""
Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2.0, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [ 1., -1., 0., 1.])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = self.unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(
((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=numpy.float64
)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array(
(
(0.0, -direction[2], direction[1]),
(direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0),
),
dtype=numpy.float64,
)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(
[point[0], point[1], point[2]], dtype=numpy.float64, copy=False
)
M[:3, 3] = point - numpy.dot(R, point)
if trans is not None:
M[:3, 3] = numpy.array(
[trans[0], trans[1], trans[2]], dtype=numpy.float64, copy=False
)
return M
def rotate_about_axis(self, B, theta, axis=2):
"""
from http://480.sagenb.org/home/pub/20/
Create the rotation matrix for a angle theta around the given axis, and apply it to the given point (B).
Rotation about
x-axis corresponds to axis==0,
y-axis corresponds to axis==1,
z-axis corresponds to axis==2,
"""
M = numpy.array([])
if axis == 0:
M = numpy.array(
[[1, 0, 0], [0, cos(theta), -sin(theta)], [0, sin(theta), cos(theta)]],
dtype=numpy.float64,
)
elif axis == 1:
M = numpy.array(
[[cos(theta), 0, -sin(theta)], [0, 1, 0], [sin(theta), 0, cos(theta)]],
dtype=numpy.float64,
)
elif axis == 2:
M = numpy.array(
[[cos(theta), -sin(theta), 0], [sin(theta), cos(theta), 0], [0, 0, 1]],
dtype=numpy.float64,
)
# Numpy makes large floating point matrix manipulations easy
return numpy.dot(M, B)
def angle_between_vectors(self, v0, v1, directed=True, axis=0):
"""
Return the angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0., 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= self.vector_norm(v0, axis=axis) * self.vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def rotVectToVect(self, vect1, vect2, i=None):
"""returns a 4x4 transformation that will align vect1 with vect2
vect1 and vect2 can be any vector (non-normalized)
"""
v1x, v1y, v1z = vect1
v2x, v2y, v2z = vect2
# normalize input vectors
norm = 1.0 / sqrt(v1x * v1x + v1y * v1y + v1z * v1z)
v1x *= norm
v1y *= norm
v1z *= norm
norm = 1.0 / sqrt(v2x * v2x + v2y * v2y + v2z * v2z)
v2x *= norm
v2y *= norm
v2z *= norm
# compute cross product and rotation axis
cx = v1y * v2z - v1z * v2y
cy = v1z * v2x - v1x * v2z
cz = v1x * v2y - v1y * v2x
# normalize
nc = sqrt(cx * cx + cy * cy + cz * cz)
if nc == 0.0:
return [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
cx /= nc
cy /= nc
cz /= nc
# compute angle of rotation
if nc < 0.0:
if i is not None:
print("truncating nc on step:", i, nc)
nc = 0.0
elif nc > 1.0:
if i is not None:
print("truncating nc on step:", i, nc)
nc = 1.0
alpha = math.asin(nc)
if (v1x * v2x + v1y * v2y + v1z * v2z) < 0.0:
alpha = math.pi - alpha
# rotate about nc by alpha
# Compute 3x3 rotation matrix
ct = cos(alpha)
ct1 = 1.0 - ct
st = sin(alpha)
rot = [
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
rv2x, rv2y, rv2z = cx * cx, cy * cy, cz * cz
rv3x, rv3y, rv3z = (1.0 - rv2x) * ct, (1.0 - rv2y) * ct, (1.0 - rv2z) * ct
rot[0][0] = rv2x + rv3x
rot[1][1] = rv2y + rv3y
rot[2][2] = rv2z + rv3z
rot[3][3] = 1.0
rv4x, rv4y, rv4z = cx * st, cy * st, cz * st
rot[0][1] = cx * cy * ct1 - rv4z
rot[1][2] = cy * cz * ct1 - rv4x
rot[2][0] = cz * cx * ct1 - rv4y
rot[1][0] = cx * cy * ct1 + rv4z
rot[2][1] = cy * cz * ct1 + rv4x
rot[0][2] = cz * cx * ct1 + rv4y
return rot
def ApplyMatrix(self, coords, mat):
"""
Apply the 4x4 transformation matrix to the given list of 3d points.
@type coords: array
@param coords: the list of point to transform.
@type mat: 4x4array
@param mat: the matrix to apply to the 3d points
@rtype: array
@return: the transformed list of 3d points
"""
# 4x4matrix"
mat = numpy.array(mat)
coords = numpy.array(coords)
one = numpy.ones((coords.shape[0], 1), coords.dtype.char)
c = numpy.concatenate((coords, one), 1)
return numpy.dot(c, numpy.transpose(mat))[:, :3]
def Decompose4x4(self, matrix):
"""
Takes a matrix in shape (16,) in OpenGL form (sequential values go
down columns) and decomposes it into its rotation (shape (16,)),
translation (shape (3,)), and scale (shape (3,))
@type matrix: 4x4array
@param matrix: the matrix to decompose
@rtype: list of array
@return: the decomposition of the matrix ie : rotation,translation,scale
"""
m = matrix
transl = numpy.array((m[12], m[13], m[14]), "f")
scale0 = numpy.sqrt(m[0] * m[0] + m[4] * m[4] + m[8] * m[8])
scale1 = numpy.sqrt(m[1] * m[1] + m[5] * m[5] + m[9] * m[9])
scale2 = numpy.sqrt(m[2] * m[2] + m[6] * m[6] + m[10] * m[10])
scale = numpy.array((scale0, scale1, scale2)).astype("f")
mat = numpy.reshape(m, (4, 4))
rot = numpy.identity(4).astype("f")
rot[:3, :3] = mat[:3, :3].astype("f")
rot[:, 0] = (rot[:, 0] / scale0).astype("f")
rot[:, 1] = (rot[:, 1] / scale1).astype("f")
rot[:, 2] = (rot[:, 2] / scale2).astype("f")
rot.shape = (16,)
# rot1 = rot.astype('f')
return rot, transl, scale
def getTubePropertiesMatrix(self, coord1, coord2):
"""
From two point return the length, and the orientation from one to another.
This function is used to build a cylinder from two points (see oneCylinder function)
>>> coord1 = [1.0,0.0,0.0]
>>> coord2 = [2.0,0.0,0.0]
>>> distance,matrix = helper.getTubePropertiesMatrix(coord1,coord2)
>>> helper.setObjectMatrix(obj,matrix)
@type coord1: vector
@param coord1: first point
@type coord2: vector
@param coord2: second point
@rtype: tupple
@return: length, 4*4 matrix of rotation
"""
# need ot overwrite in C4D
x1 = float(coord1[0])
y1 = float(coord1[1])
z1 = float(coord1[2])
x2 = float(coord2[0])
y2 = float(coord2[1])
z2 = float(coord2[2])
v, length = self.measure_distance(coord2, coord1, vec=True)
vx, vy, vz = v
offset = numpy.array(
[float(x1 + x2) / 2, float(y1 + y2) / 2, float(z1 + z2) / 2]
)
v_2 = self.unit_vector(v, axis=1)
v_1 = numpy.array([float(0.0), float(1.0), float(2.0)])
v_3 = numpy.cross(v_1, v_2)
v_3 = self.unit_vector(v_3, axis=1)
v_1 = numpy.cross(v_2, v_3)
v_1 = self.unit_vector(v_1, axis=1)
M = numpy.identity(4)
M[0, :3] = v_1
M[1, :3] = v_2
M[2, :3] = v_3
M[3, :3] = offset
return length, numpy.array(M).transpose()
def getCenter(self, coords):
"""
Get the center from a 3d array of coordinate x,y,z.
@type coords: liste/array
@param coords: the coordinates
@rtype: list/array
@return: the center of mass of the coordinates
"""
# if len(coords) == 3 :
# if type(coords[0]) is int or type(coords[0]) is float :
# coords = [coords,]
coords = numpy.array(coords) # self.allAtoms.coords
center = sum(coords) / (len(coords) * 1.0)
center = list(center)
for i in range(3):
center[i] = round(center[i], 4)
# print "center =", center
return center
# ===============================================================================
# animation features
# ===============================================================================
def setKeyFrame(self, obj, **kw):
"""
Set a keyframe for the curret object
@type obj: hostObj
@param obj: the object
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def setFrame(self, value, **kw):
"""
Set the current frame
@type value: int/float
@param value: the desired frame
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
def frameAdvanced(self, doc=None, duration=None, display=False, cb=None, **kw):
"""
Play frame for a specifiy duration with/without display and with/without a callbac
@type doc: document/scene
@param doc: the desired scene
@type duration: float
@param duration: how long shoud we play
@type display: bool
@param display: toggle the update of the viewport
@type cb: function
@param cb: the callback function to execute at every frame
@type kw: dictionary
@param kw: dictionary of arg options
"""
def animationStart(self, doc=None, forward=True, duration=None, **kw):
"""
Play frame for a specifiy duration
@type doc: document/scene
@param doc: the desired scene
@type duration: float
@param duration: how long shoud we play
@type forward: bool
@param forward: toggle the direction of the animation
@type kw: dictionary
@param kw: dictionary of arg options
"""
def animationStop(self, doc=None, **kw):
"""
Stop the animation
@type doc: document/scene
@param doc: the desired scene
@type kw: dictionary
@param kw: dictionary of arg options
"""
# ==============================================================================
# Dynamics simulation
# ==============================================================================
def setRigidBody(
self,
obj,
shape="auto",
child=False,
dynamicsBody="on",
dynamicsLinearDamp=0.0,
dynamicsAngularDamp=0.0,
massClamp=0.0,
rotMassClamp=1.0,
**kw
):
"""
Set the curren objec as a rigid body
@type doc: document/scene
@param doc: the desired scene
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
return None
def setSoftBody(self, obj, **kw):
"""
Set the curren object as a soft body
@type doc: document/scene
@param doc: the desired scene
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
return None
def updateSpring(
self,
spring,
targetA=None,
tragetB=None,
rlength=0.0,
stifness=1.0,
damping=1.0,
**kw
):
"""
Update the spring control
@type doc: document/scene
@param doc: the desired scene
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
return None
def createSpring(
self,
name,
targetA=None,
tragetB=None,
rlength=0.0,
stifness=1.0,
damping=1.0,
parent=None,
**kw
):
"""
Create a sprin between two physics objects
@type doc: document/scene
@param doc: the desired scene
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
return None
def addConstraint(self, obj, type="spring", target=None, **kw):
"""
Add a constraint to the given object
@type doc: document/scene
@param doc: the desired scene
@type kw: dictionary
@param kw: dictionary of arg options
"""
pass
return None
# ==============================================================================
# Noise and Fractal
# ==============================================================================
def get_noise(
self,
point,
ntype,
nbasis,
dimension=1.0,
lacunarity=2.0,
offset=1.0,
octaves=6,
gain=1.0,
**kw
):
# multi_fractal(position, H, lacunarity, octaves, noise_basis=noise.types.STDPERLIN)
# NotePlease use InitFbm() before you use one of the following noise types:
# NOISE_ZADA, NOISE_DISPL_VORONOI, NOISE_OBER, NOISE_FBM, NOISE_BUYA.
return 0.0
# ===============================================================================
# depends on PIL
# ===============================================================================
def getImage(self, img, draw, sizex, sizey):
if img is None and draw:
img = Image.new("RGB", (int(sizex), int(sizey)), (0, 0, 0))
elif not draw:
img = numpy.zeros((int(sizex), int(sizey), 3))
return img
def makeTexture(
self,
object,
filename=None,
img=None,
colors=None,
sizex=0,
sizey=0,
s=20,
draw=True,
faces=None,
invert=False,
):
"""
Experiment for baking faces colors using a PIL image
"""
invert = False
img = self.getImage(img, draw, sizex, sizey)
# object handling
if faces is None:
faces = self.getFaces(object)
order = [2, 1, 0]
if self.host != "c4d":
invert = True
order = [0, 1, 2]
x = 0
y = 0
s = s
uvindex = 0
self.resetProgressBar(0)
for i, f in enumerate(faces):
xys = [(x, y), (x + s, y + s), (x + s, y)]
c1 = numpy.array(self.convertColor(colors[f[order[0]]]))
c2 = numpy.array(self.convertColor(colors[f[order[1]]]))
c3 = numpy.array(self.convertColor(colors[f[order[2]]]))
img = self.fillTriangleColor(img, c1, c2, c3, xys)
if invert:
uv = [
[(x + 2) / sizex, 1 - ((y + 2) / sizey), 0],
[(x + s - 2) / sizex, 1 - ((y + s - 2) / sizey), 0],
[(x + s - 2) / sizex, 1 - ((y + 2) / sizey), 0],
[(x + s - 2) / sizex, 1 - ((y + 2) / sizey), 0],
]
else:
uv = [
[(x + 2) / sizex, (y + 2) / sizey, 0],
[(x + s - 2) / sizex, (y + s - 2) / sizey, 0],
[(x + s - 2) / sizex, (y + 2) / sizey, 0],
[(x + s - 2) / sizex, (y + 2) / sizey, 0],
]
uvindex = self.setUV(
object, i, 0, uv, perVertice=False, uvid=uvindex
) # perFaces give 3-4 uv
self.progressBar(
i / len(faces), "faces " + str(i) + "/" + str(len(faces)) + " done"
)
x = x + s
if x >= sizex or x + s >= sizex:
x = 0
y = y + s
if draw:
img.save(filename)
else:
# what to do with the array - > build image using host or PIL
# n=int(len(img)/(sizex*sizey))
# img=img.reshape(sizex,sizey,n)
mode = "RGB"
# else : mode = "RGBA"
pilImage = Image.fromstring(mode, img.shape[0:2], img.tostring())
# img=numpy.array(img)
# pilImage = Image.fromarray(img, mode)
pilImage.save(filename)
self.resetProgressBar(0)
return img
def fillTriangleColor(self, array, col1, col2, col3, xys):
"""
Draw and color a Triangle according a color per corner.
Interpolate the color as OpenGL will do with per vertex
"""
for i in range(20):
a = i / 20.0
for j in range(20):
b = j / 20.0
xcol = col1 + b * (col3 - col1)
ycol = xcol + a * (col2 - xcol)
array[xys[0][0] + j][xys[0][1] + i][:] = [ycol[0], ycol[1], ycol[2]]
return array
def drawGradientLine(self, imdraw, col1, col2, col3, xys):
"""
Draw and color a gradient using either PIL rectangle or point drawing methods
"""
if col1 is col2 and col2 is col3:
imdraw.rectangle([xys[0], xys[1]], fill=(col1[0], col1[1], col1[2]))
else:
for i in range(20):
a = i / 20.0
for j in range(20):
b = j / 20.0
xcol = col1 + b * (col3 - col1)
ycol = xcol + a * (col2 - xcol)
imdraw.point(
(xys[0][0] + j, xys[0][1] + i), fill=(ycol[0], ycol[1], ycol[2])
)
def drawPtCol(self, imdraw, color, uv, debug=0):
"""
Draw on the given Texture image accordinge en UV coordinates and colors
uv is the 3 vertex coordinate in UV
"""
# uv is the 3 vertex coordinate in UV
# uv=extendUV(uv)
uv = numpy.array(uv)
u = uv[0][0]
v = uv[0][1]
# color = [c0,c1,c2]
distu = [[0.0, 0, 0], [0, 0.0, 0], [-0, -0, 0.0]]
distv = [[0.0, 0, 0], [0, 0.0, 0], [-0, -0, 0.0]]
for i in range(3): # points
for j in range(3):
# print i,j
distu[i][j] = uv[i][0] - uv[j][0]
distv[i][j] = uv[i][1] - uv[j][1]
order = [0, 1, 2]
# print distu
# print distv
order = self.getOrder(distu, uv)
# orderv = getOrder(distv,uv=uv)
ab = []
ab.append(self.getA(uv[order[0]], uv[order[1]]))
ab.append(self.getA(uv[order[1]], uv[order[2]]))
ab.append(self.getA(uv[order[0]], uv[order[2]]))
uv = numpy.array(uv, int)
u = uv[order[0]][0]
v = uv[order[0]][1]
up = False
if distv[order[0]][order[1]] <= 0:
up = True # second pt up
if debug:
print(up, distv[order[0]][order[1]])
# distu=numpy.array(distu,int)
# distv=numpy.array(distv,int)
rgu = list(range(u, uv[order[2]][0]))
if u - 1 == -1:
rgu = rgu[1:]
if debug:
print("1x ", uv[order[1]][1], v)
print("1x ", uv[order[2]][1], v)
maxv = uv[order[1]][1]
if uv[order[1]][1] == v:
d = distv[order[0]][order[2]]
maxv = uv[order[2]][1]
else:
d = distv[order[0]][order[1]]
if d < 0.0:
d = d * -1
rgv = list(range(d))
if debug:
print(maxv, rgv)
if len(rgv) == 0:
rgv = [0, 1]
ycol = color[order[0]]
if debug:
print("draw first point", order, u, v, len(rgu), len(rgv)) #
# imdraw.point((u,v),fill=(ycol[0],ycol[1],ycol[2]))
# print "line eq",ab,order,orderv
indice1 = 0 # more on right
indice2 = 2 # midlle which can be up/down compare to 1
x = 0.0
ca = 0.0
for gg in rgu:
# range of v
if debug:
print(
"eq ",
u,
ab[indice1],
u * ab[indice1][0] + ab[indice1][1],
ab[indice2],
u * ab[indice2][0] + ab[indice2][1],
)
# y = ax+b
va = int(u * ab[indice1][0] + ab[indice1][1])
if ab[indice1][0] == 0.0 and u == uv[order[0]][0]:
vb = uv[order[1]][1] # - distv[order[0]][order[1]]
else:
vb = int(u * ab[indice2][0] + ab[indice2][1])
# if up :
rg = list(range(va, vb))
# else :
# rg = range(vb,va+1)
if len(rg) == 0:
rg = list(range(vb, va))
if debug:
print("range V ", u, va, vb, rg)
xcola = x / len(rgu) # * ab[2][0] + ab[2][1] #
if debug:
print("xcola", xcola, x, len(rgu), ca / len(rgu))
y = 0.0
for cb, v in enumerate(rg):
k = float(maxv) - float(v)
if k < 0:
k = k * -1.0
ycola = k / float(
len(rgv)
) # (x/len(rgu) * ab[indice1][0] + ab[indice1][1])/len(rgv) #
if debug:
print("ycola", y, len(rgv), y / len(rgv), ycola)
if (color[order[2]] - color[order[0]]).sum() == 0.0:
# if up:
xcol = color[order[0]]
# else :
# xcol = color[order[2]]
else:
# if up:
# xcol = color[order[2]]+x/len(rgu)*(color[order[0]]-color[order[2]])
# else :
xcol = color[order[0]] + xcola * (color[order[2]] - color[order[0]])
if (color[order[1]] - color[order[0]]).sum() == 0.0:
ycol = xcol
else:
# if up :
ycol = xcol + (1 - ycola) * (color[order[1]] - xcol)
# else :
# ycol = color[order[1]]+y/n*(xcol-color[order[1]])
imdraw.point((u, v), fill=(ycol[0], ycol[1], ycol[2]))
y = y + 1.0
if u == uv[order[1]][0]:
if debug:
print("end", u, uv[order[1]][0])
# if y == uv[order[1]][1] :
# print "change line eq",y , uv[order[1]][1],indice1,indice2
indice1 = 1
# if order[0] == orderv[0] :
# indice1 = orderv[0]
indice2 = 2
# print indice1,indice2
u = u + 1
x = x + 1.0
def getOrder(self, distu, uv):
order = [0, 1, 2]
u, v = uv.transpose()
imaxu = numpy.nonzero(u == u.max())[0] # [0]
iminu = numpy.nonzero(u == u.min())[0] # [0]
# order0 should be left/top
if len(iminu) == 1: #
order[0] = iminu[0]
else:
# different max,top one is the one with v mini
min = [9999, 0]
for ind in iminu:
if v[ind] < min[0]:
min = [v[ind], ind]
order[0] = min[1]
# order1
# closest u from order[0]
# print distu[order[0]]
min = [9999, 0]
ds = []
for i, d in enumerate(distu[order[0]]):
if i != order[0]:
ds.append(d)
if -d < min[0]:
min = [-d, i]
if ds[0] == ds[1]:
min = [9999, 0]
for ind, val in enumerate(v):
if ind != order[0]:
if val < min[0]:
min = [val, ind]
order[1] = min[1]
# order2
if len(imaxu) == 1:
order[2] = imaxu[0]
else:
min = [9999, 0]
for ind in imaxu:
if v[ind] < min[0] and ind != order[1]:
min = [v[ind], ind]
order[2] = min[1]
# print order
return order
def getA(self, pt1, pt2):
if pt2[0] - pt1[0] == 0.0:
a = 0.0
else:
a = (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
# solve y = ax+b
# b = y - ax
b = pt1[1] - a * pt1[0]
return (a, b)
# ==============================================================================
# IO / read/write 3D object, cene file etc
# ==============================================================================
def combineDaeMeshData(self, data, transform=True):
vertices = []
faces = []
vnormal = []
for i in range(len(data)):
v, vn, f = data[i]["mesh"]
f = numpy.array(f, int)
faces.extend((f + len(vertices)).tolist())
if transform and len(data[i]["instances"]):
v = self.ApplyMatrix(v, data[i]["instances"][0])
vertices.extend(v)
if vn is not None:
vnormal.extend(vn)
return vertices, vnormal, faces
def read(self, filename, **kw):
pass
def write(self, *args, **kw):
pass
def instancesToCollada(
self, parent_object, collada_xml=None, instance_node=True, **kw
):
try:
from cellpack.autopack.transformation import decompose_matrix
from collada import Collada
from collada import material
from collada import source
from collada import geometry
from collada import scene
except Exception:
return
inst_parent = parent_object # self.getCurrentSelection()[0]
ch = self.getChilds(inst_parent)
transpose = True
if "transpose" in kw:
transpose = kw["transpose"]
# instance master
if "mesh" in kw:
inst_master = kw["mesh"]
f, v, vn = self.DecomposeMesh(
kw["mesh"], edit=False, copy=False, tri=True, transform=True
)
else:
inst_master = self.getMasterInstance(ch[0])
# grabb v,f,n of inst_master
f, v, vn = self.DecomposeMesh(
inst_master, edit=False, copy=False, tri=True, transform=True
)
iname = self.getName(inst_master)
pname = self.getName(inst_parent)
if collada_xml is None:
collada_xml = Collada()
collada_xml.assetInfo.unitname = "centimeter"
collada_xml.assetInfo.unitmeter = 0.01
mat = self.getMaterialObject(inst_master)
if len(mat):
mat = mat[0]
props = self.getMaterialProperty(mat, color=1) # ,specular_color=1)
effect = material.Effect("effect" + iname, [], "phong", diffuse=props["color"])
# specular = props["specular_color"])
mat = material.Material("material" + iname, iname + "_material", effect)
matnode = scene.MaterialNode("material" + iname, mat, inputs=[])
collada_xml.effects.append(effect)
collada_xml.materials.append(mat)
# the geom
# invert Z ? for C4D?
vertzyx = numpy.array(v) # * numpy.array([1,1,-1])
z, y, x = vertzyx.transpose()
vertxyz = numpy.vstack([x, y, z]).transpose() * numpy.array([1, 1, -1])
vert_src = source.FloatSource(
iname + "_verts-array", vertxyz.flatten(), ("X", "Y", "Z")
)
norzyx = numpy.array(vn)
nz, ny, nx = norzyx.transpose()
norxyz = numpy.vstack([nx, ny, nz]).transpose() * numpy.array([1, 1, -1])
normal_src = source.FloatSource(
iname + "_normals-array", norxyz.flatten(), ("X", "Y", "Z")
)
geom = geometry.Geometry(
collada_xml, "geometry" + iname, iname, [vert_src, normal_src]
)
input_list = source.InputList()
input_list.addInput(0, "VERTEX", "#" + iname + "_verts-array")
input_list.addInput(0, "NORMAL", "#" + iname + "_normals-array")
# invert all the face
fi = numpy.array(f, int) # [:,::-1]
triset = geom.createTriangleSet(fi.flatten(), input_list, iname + "materialref")
geom.primitives.append(triset)
collada_xml.geometries.append(geom)
# the noe
# instance here ?
# creae the instance maser node :
if instance_node:
master_geomnode = scene.GeometryNode(geom, [matnode])
master_node = scene.Node(
"node_" + iname,
children=[
master_geomnode,
],
) # ,transforms=[tr,rz,ry,rx,s])
g = []
for c in ch:
# collada.scene.NodeNode
if instance_node:
geomnode = scene.NodeNode(master_node)
else:
geomnode = scene.GeometryNode(geom, [matnode])
matrix = self.ToMat(self.getTransformation(c)) # .transpose()#.flatten()
if transpose:
matrix = numpy.array(matrix).transpose()
scale, shear, euler, translate, perspective = decompose_matrix(matrix)
scale = self.getScale(c)
p = translate # matrix[3,:3]/100.0#unit problem
tr = scene.TranslateTransform(p[0], p[1], p[2])
rx = scene.RotateTransform(1, 0, 0, numpy.degrees(euler[0]))
ry = scene.RotateTransform(0, 1, 0, numpy.degrees(euler[1]))
rz = scene.RotateTransform(0, 0, 1, numpy.degrees(euler[2]))
s = scene.ScaleTransform(scale[0], scale[1], scale[2])
# n = scene.NodeNode(master_node,transforms=[tr,rz,ry,rx,s])
# gnode = scene.Node(self.getName(c)+"_inst", children=[geomnode,])
n = scene.Node(
self.getName(c),
children=[
geomnode,
],
transforms=[tr, rz, ry, rx, s],
) # scene.MatrixTransform(matrix)[scene.MatrixTransform(numpy.array(matrix).reshape(16,))]
# n = scene.Node(self.getName(c), children=[geomnode,],
# transforms=[scene.MatrixTransform(numpy.array(matrix).reshape(16,))]) #scene.MatrixTransform(matrix)[scene.MatrixTransform(numpy.array(matrix).reshape(16,))]
g.append(n)
node = scene.Node(
pname, children=g
) # ,transforms=[scene.RotateTransform(0,1,0,90.0)])
if "parent_node" in kw:
kw["parent_node"].children.append(node)
node = kw["parent_node"]
if not len(collada_xml.scenes):
myscene = scene.Scene("myscene", [node])
collada_xml.scenes.append(myscene)
collada_xml.scene = myscene
else:
if "parent_node" not in kw:
collada_xml.scene.nodes.append(node)
if instance_node:
collada_xml.node.append(master_node)
return collada_xml
# DejaVu.indexedPolygon have also this function
def writeMeshToFile(self, filename, verts=None, faces=None, vnorms=[], fnorms=[]):
"""
Write the given mesh data (vertices, faces, normal, face normal) in the DejaVu format.
Create two files : filename.indpolvert and filename.indpolface
@type filename: string
@param filename: the destinaon filename.
@type verts: list
@param verts: the liste of vertices
@type faces: list
@param faces: the liste of faces
@type vnorms: list
@param vnorms: the liste of vertices normal
@type fnorms: list
@param fnorms: the liste of faces normal
"""
file = open(filename + ".indpolvert", "w")
[
(file.write("%f %f %f %f %f %f\n" % tuple(tuple(v) + tuple(n))))
for v, n in zip(verts, vnorms)
]
file.close()
file = open(filename + ".indpolface", "w")
for v, face in zip(fnorms, faces):
[file.write("%d " % ind) for ind in face]
# map( lambda ind, f=file: f.write("%d "%ind ), face )
file.write("%f %f %f\n" % tuple(v))
file.close()
def readMeshFromFile(self, filename):
"""
Given the DejaVu filename return the mesh data (vertices, faces, normal).
Parse two files : filename.indpolvert and filename.indpolface
@type filename: string
@param filename: the destinaon filename.
@rtype :list
@return : the liste of vertices,faces and normals
"""
filename = os.path.splitext(filename)[0]
f = open(filename + ".indpolvert")
lines = f.readlines()
data = [line.split() for line in lines]
f.close()
verts = [(float(x[0]), float(x[1]), float(x[2])) for x in data]
norms = [(float(x[3]), float(x[4]), float(x[5])) for x in data]
f = open(filename + ".indpolface")
lines = f.readlines()
data = [line.split() for line in lines]
f.close()
faces = []
fnorms = []
for line in data:
faces.append(list(map(int, line[:-3])))
fnorms.append(list(map(float, line[-3:])))
return verts, faces, norms
def writeToFile(self, polygon, filename):
"""
Write the given polygon mesh data (vertices, faces, normal, face normal) in the DejaVu format.
Create two files : filename.indpolvert and filename.indpolface.
See writeMeshToFile
@type polygon: hostObj/hostMesh/String
@param polygon: the polygon to export in DejaVu format
@type filename: string
@param filename: the destinaon filename.
"""
# get shild ?
faces, vertices, vnormals, fnormals = self.DecomposeMesh(
self.getMesh(polygon),
edit=False,
copy=False,
tri=True,
transform=True,
fn=True,
)
self.writeMeshToFile(
filename, verts=vertices, faces=faces, vnorms=vnormals, fnorms=fnormals
)
@classmethod
def writeDX(self, filename, gridvalue, gridcenter, gridstep, grisize, commens=""):
nx, ny, nz = grisize
ox, oy, oz = gridcenter
sx, sy, sz = gridstep
N = nx * ny * nz
aStr = "#Data frm upy\n"
aStr += "#%s\n" % commens
aStr += "object 1 class gridpositions counts %i %i %i\n" % (nx, ny, nz)
aStr += "origin %f %f %f\n" % (ox, oy, oz)
aStr += "delta %f 0.000000e+00 0.000000e+00\n" % sx
aStr += "delta 0.000000e+00 %f 0.000000e+00\n" % sy
aStr += "delta 0.000000e+00 0.000000e+00 %f\n" % sz
aStr += "object 2 class gridconnections counts %i %i %i\n" % (nx, ny, nz)
aStr += "object 3 class array type double rank 0 items %i data follows\n" % N
# u(*,*,*)
# The data values, ordered with the z-index increasing most quickly, followed by the y-index, and then the x-index.
counterLine = 0
counter = 0
v = ""
for x in range(nx):
for y in range(ny):
for z in range(nz):
v += "%f " % gridvalue[counter]
counter += 1
counterLine += 1
if counterLine == 2:
aStr += v + "\n"
counterLine = 0
f = open(filename, "w")
f.write(aStr)
f.close()
# ==============================================================================
# raycasting RAPID? python ?
# ==============================================================================
def raycast(self, obj, point, direction, length, **kw):
intersect = False
if "count" in kw:
return intersect, 0
if "fnormal" in kw:
return intersect, [0, 0, 0]
if "hitpos" in kw:
return intersect, [0, 0, 0]
return intersect
| 32.81383
| 201
| 0.526582
|
d3198d8d5baf53b821456e4a35ef11a19df210a7
| 4,363
|
py
|
Python
|
python_tests/Test.py
|
brunoerg/Ember
|
fbd8a5f43a81de7f5a6ba6bcecfac7e6bdb535fa
|
[
"CC0-1.0"
] | null | null | null |
python_tests/Test.py
|
brunoerg/Ember
|
fbd8a5f43a81de7f5a6ba6bcecfac7e6bdb535fa
|
[
"CC0-1.0"
] | null | null | null |
python_tests/Test.py
|
brunoerg/Ember
|
fbd8a5f43a81de7f5a6ba6bcecfac7e6bdb535fa
|
[
"CC0-1.0"
] | null | null | null |
#Types.
from typing import Callable, List
#Exceptions.
from python_tests.Tests.Errors import EmptyError, NodeError, TestError
#Meros classes.
from python_tests.Meros.Meros import Meros
from python_tests.Meros.RPC import RPC
#Tests.
from python_tests.Tests.Merit.ChainAdvancementTest import ChainAdvancementTest
from python_tests.Tests.Merit.SyncTest import MSyncTest
from python_tests.Tests.Transactions.DataTest import DataTest
from python_tests.Tests.Transactions.FiftyTest import FiftyTest
from python_tests.Tests.Consensus.Verification.Unknown import VUnknown
from python_tests.Tests.Consensus.Verification.Parsable import VParsable
from python_tests.Tests.Consensus.Verification.Competing import VCompeting
from python_tests.Tests.Consensus.MeritRemoval.SameNonce.CauseTest import MRSNCauseTest
from python_tests.Tests.Consensus.MeritRemoval.SameNonce.LiveTest import MRSNLiveTest
from python_tests.Tests.Consensus.MeritRemoval.SameNonce.SyncTest import MRSNSyncTest
from python_tests.Tests.Consensus.MeritRemoval.VerifyCompeting.CauseTest import MRVCCauseTest
from python_tests.Tests.Consensus.MeritRemoval.VerifyCompeting.LiveTest import MRVCLiveTest
from python_tests.Tests.Consensus.MeritRemoval.VerifyCompeting.SyncTest import MRVCSyncTest
from python_tests.Tests.Consensus.MeritRemoval.Multiple.CauseTest import MRMCauseTest
from python_tests.Tests.Consensus.MeritRemoval.Multiple.LiveTest import MRMLiveTest
from python_tests.Tests.Consensus.MeritRemoval.Partial.CauseTest import MRPCauseTest
from python_tests.Tests.Consensus.MeritRemoval.Partial.LiveTest import MRPLiveTest
from python_tests.Tests.Consensus.MeritRemoval.Partial.SyncTest import MRPSyncTest
from python_tests.Tests.Consensus.MeritRemoval.PendingActions.CauseTest import MRPACauseTest
from python_tests.Tests.Consensus.MeritRemoval.PendingActions.LiveTest import MRPALiveTest
#Arguments.
from sys import argv
#Sleep standard function.
from time import sleep
#SHUtil standard lib.
import shutil
#Format Exception standard function.
from traceback import format_exc
#Initial port.
port: int = 5132
#Results.
ress: List[str] = []
#Tests.
tests: List[
Callable[[RPC], None]
] = [
ChainAdvancementTest,
MSyncTest,
DataTest,
FiftyTest,
VUnknown,
VParsable,
VCompeting,
MRSNCauseTest,
MRSNLiveTest,
MRSNSyncTest,
MRVCCauseTest,
MRVCLiveTest,
MRVCSyncTest,
MRPCauseTest,
MRPLiveTest,
MRPSyncTest,
MRPACauseTest,
MRPALiveTest,
MRMCauseTest,
MRMLiveTest
]
#Tests to run.
#If any were specified over the CLI, only run those.
testsToRun: List[str] = argv[1:]
#Else, run all.
if len(testsToRun) == 0:
for test in tests:
testsToRun.append(test.__name__)
#Remove invalid tests.
for testName in testsToRun:
found: bool = False
for test in tests:
if test.__name__ == testName:
found = True
break
if not found:
ress.append("\033[0;31mCouldn't find " + testName + ".")
testsToRun.remove(testName)
#Delete the python_tests data directory.
try:
shutil.rmtree("./data/python_tests")
except FileNotFoundError:
pass
#Run every test.
for test in tests:
if len(testsToRun) == 0:
break
if test.__name__ not in testsToRun:
continue
testsToRun.remove(test.__name__)
print("Running " + test.__name__ + ".")
meros: Meros = Meros(
test.__name__,
port,
port + 1
)
sleep(2)
rpc: RPC = RPC(meros)
try:
test(rpc)
ress.append("\033[0;32m" + test.__name__ + " succeeded.")
except EmptyError as e:
ress.append("\033[0;33m" + test.__name__ + " is empty.")
continue
except NodeError as e:
ress.append("\033[5;31m" + test.__name__ + " caused the node to crash!\033[0;31m")
except TestError as e:
ress.append("\033[0;31m" + test.__name__ + " failed: " + str(e))
continue
except Exception as e:
ress.append("\r\n")
ress.append("\033[0;31m" + test.__name__ + " is invalid.")
ress.append(format_exc())
finally:
try:
rpc.quit()
except NodeError:
ress.append("\033[5;31m" + test.__name__ + " caused the node to crash!\033[0;31m")
print("-" * shutil.get_terminal_size().columns)
for res in ress:
print(res)
| 27.26875
| 94
| 0.728398
|
f3e5f357852f6425b03c1536df2814f935ab2746
| 8,820
|
py
|
Python
|
unit_tests/test_multisite.py
|
dardelean/charm-ceph-radosgw
|
cdc59e2a39db985aef53f8f9d7a14f761aaa96f6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
unit_tests/test_multisite.py
|
dardelean/charm-ceph-radosgw
|
cdc59e2a39db985aef53f8f9d7a14f761aaa96f6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
unit_tests/test_multisite.py
|
dardelean/charm-ceph-radosgw
|
cdc59e2a39db985aef53f8f9d7a14f761aaa96f6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import mock
import multisite
from test_utils import CharmTestCase
def whoami():
return inspect.stack()[1][3]
class TestMultisiteHelpers(CharmTestCase):
TO_PATCH = [
'subprocess',
'socket',
'hookenv',
]
def setUp(self):
super(TestMultisiteHelpers, self).setUp(multisite, self.TO_PATCH)
self.socket.gethostname.return_value = 'testhost'
def _testdata(self, funcname):
return os.path.join(os.path.dirname(__file__),
'testdata',
'{}.json'.format(funcname))
def test_create_realm(self):
with open(self._testdata(whoami()), 'rb') as f:
self.subprocess.check_output.return_value = f.read()
result = multisite.create_realm('beedata', default=True)
self.assertEqual(result['name'], 'beedata')
self.subprocess.check_output.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'realm', 'create',
'--rgw-realm=beedata', '--default'
])
def test_list_realms(self):
with open(self._testdata(whoami()), 'rb') as f:
self.subprocess.check_output.return_value = f.read()
result = multisite.list_realms()
self.assertTrue('beedata' in result)
def test_set_default_zone(self):
multisite.set_default_realm('newrealm')
self.subprocess.check_call.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'realm', 'default',
'--rgw-realm=newrealm'
])
def test_create_zonegroup(self):
with open(self._testdata(whoami()), 'rb') as f:
self.subprocess.check_output.return_value = f.read()
result = multisite.create_zonegroup(
'brundall',
endpoints=['http://localhost:80'],
master=True,
default=True,
realm='beedata',
)
self.assertEqual(result['name'], 'brundall')
self.subprocess.check_output.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'zonegroup', 'create',
'--rgw-zonegroup=brundall',
'--endpoints=http://localhost:80',
'--rgw-realm=beedata',
'--default',
'--master'
])
def test_list_zonegroups(self):
with open(self._testdata(whoami()), 'rb') as f:
self.subprocess.check_output.return_value = f.read()
result = multisite.list_zonegroups()
self.assertTrue('brundall' in result)
def test_create_zone(self):
with open(self._testdata(whoami()), 'rb') as f:
self.subprocess.check_output.return_value = f.read()
result = multisite.create_zone(
'brundall-east',
endpoints=['http://localhost:80'],
master=True,
default=True,
zonegroup='brundall',
access_key='mykey',
secret='mypassword',
)
self.assertEqual(result['name'], 'brundall-east')
self.subprocess.check_output.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'zone', 'create',
'--rgw-zone=brundall-east',
'--endpoints=http://localhost:80',
'--rgw-zonegroup=brundall',
'--default', '--master',
'--access-key=mykey',
'--secret=mypassword',
'--read-only=0',
])
def test_modify_zone(self):
multisite.modify_zone(
'brundall-east',
endpoints=['http://localhost:80', 'https://localhost:443'],
access_key='mykey',
secret='secret',
readonly=True
)
self.subprocess.check_output.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'zone', 'modify',
'--rgw-zone=brundall-east',
'--endpoints=http://localhost:80,https://localhost:443',
'--access-key=mykey', '--secret=secret',
'--read-only=1',
])
def test_modify_zone_promote_master(self):
multisite.modify_zone(
'brundall-east',
default=True,
master=True,
)
self.subprocess.check_output.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'zone', 'modify',
'--rgw-zone=brundall-east',
'--master',
'--default',
'--read-only=0',
])
def test_modify_zone_partial_credentials(self):
multisite.modify_zone(
'brundall-east',
endpoints=['http://localhost:80', 'https://localhost:443'],
access_key='mykey',
)
self.subprocess.check_output.assert_called_with([
'radosgw-admin', '--id=rgw.testhost',
'zone', 'modify',
'--rgw-zone=brundall-east',
'--endpoints=http://localhost:80,https://localhost:443',
'--read-only=0',
])
def test_list_zones(self):
with open(self._testdata(whoami()), 'rb') as f:
self.subprocess.check_output.return_value = f.read()
result = multisite.list_zones()
self.assertTrue('brundall-east' in result)
def test_update_period(self):
multisite.update_period()
self.subprocess.check_call.assert_called_once_with([
'radosgw-admin', '--id=rgw.testhost',
'period', 'update', '--commit'
])
@mock.patch.object(multisite, 'list_zonegroups')
@mock.patch.object(multisite, 'list_zones')
@mock.patch.object(multisite, 'update_period')
def test_tidy_defaults(self,
mock_update_period,
mock_list_zones,
mock_list_zonegroups):
mock_list_zones.return_value = ['default']
mock_list_zonegroups.return_value = ['default']
multisite.tidy_defaults()
self.subprocess.call.assert_has_calls([
mock.call(['radosgw-admin', '--id=rgw.testhost',
'zonegroup', 'remove',
'--rgw-zonegroup=default', '--rgw-zone=default']),
mock.call(['radosgw-admin', '--id=rgw.testhost',
'zone', 'delete',
'--rgw-zone=default']),
mock.call(['radosgw-admin', '--id=rgw.testhost',
'zonegroup', 'delete',
'--rgw-zonegroup=default'])
])
mock_update_period.assert_called_with()
@mock.patch.object(multisite, 'list_zonegroups')
@mock.patch.object(multisite, 'list_zones')
@mock.patch.object(multisite, 'update_period')
def test_tidy_defaults_noop(self,
mock_update_period,
mock_list_zones,
mock_list_zonegroups):
mock_list_zones.return_value = ['brundall-east']
mock_list_zonegroups.return_value = ['brundall']
multisite.tidy_defaults()
self.subprocess.call.assert_not_called()
mock_update_period.assert_not_called()
def test_pull_realm(self):
multisite.pull_realm(url='http://master:80',
access_key='testkey',
secret='testsecret')
self.subprocess.check_output.assert_called_once_with([
'radosgw-admin', '--id=rgw.testhost',
'realm', 'pull',
'--url=http://master:80',
'--access-key=testkey', '--secret=testsecret',
])
def test_pull_period(self):
multisite.pull_period(url='http://master:80',
access_key='testkey',
secret='testsecret')
self.subprocess.check_output.assert_called_once_with([
'radosgw-admin', '--id=rgw.testhost',
'period', 'pull',
'--url=http://master:80',
'--access-key=testkey', '--secret=testsecret',
])
| 37.058824
| 74
| 0.551701
|
e73889752e450db4f8db41eef181ed1c85348f89
| 292
|
py
|
Python
|
scripts/pyinstaller/hooks/hook-googleapiclient.model.py
|
lucasalavapena/dvc
|
230eb7087df7f063ded7422af7ae45bd04eb794a
|
[
"Apache-2.0"
] | 9,136
|
2018-05-30T05:10:44.000Z
|
2022-03-31T16:58:52.000Z
|
scripts/pyinstaller/hooks/hook-googleapiclient.model.py
|
lucasalavapena/dvc
|
230eb7087df7f063ded7422af7ae45bd04eb794a
|
[
"Apache-2.0"
] | 4,804
|
2018-05-30T00:36:42.000Z
|
2022-03-31T18:34:54.000Z
|
scripts/pyinstaller/hooks/hook-googleapiclient.model.py
|
lucasalavapena/dvc
|
230eb7087df7f063ded7422af7ae45bd04eb794a
|
[
"Apache-2.0"
] | 1,072
|
2018-05-30T07:59:35.000Z
|
2022-03-28T20:43:49.000Z
|
"""Temporary fix for https://github.com/iterative/dvc/issues/5618."""
from PyInstaller.utils.hooks import ( # pylint:disable=import-error
collect_data_files,
copy_metadata,
)
datas = collect_data_files("googleapiclient.discovery")
datas += copy_metadata("google_api_python_client")
| 32.444444
| 69
| 0.773973
|
aec0eb32a225cf094c339a0ab97ffe7fcb27cbd1
| 13,107
|
py
|
Python
|
lib/roi_data/loader.py
|
ruotianluo/Context-aware-ZSR
|
f9e487a4d52a51588252d960c96c04150e74c52a
|
[
"MIT"
] | 59
|
2019-04-12T20:16:26.000Z
|
2021-08-01T02:51:58.000Z
|
lib/roi_data/loader.py
|
ruotianluo/Context-aware-ZSR
|
f9e487a4d52a51588252d960c96c04150e74c52a
|
[
"MIT"
] | 3
|
2019-11-06T17:22:37.000Z
|
2022-02-10T04:44:26.000Z
|
lib/roi_data/loader.py
|
ruotianluo/Context-aware-ZSR
|
f9e487a4d52a51588252d960c96c04150e74c52a
|
[
"MIT"
] | 9
|
2019-04-29T11:20:16.000Z
|
2022-02-16T05:22:08.000Z
|
import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._prepare_extra_info()
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def _prepare_extra_info(self):
self._extra_info = {k:self._roidb[0][k] for k in set(self._roidb[0].keys()) - set(self._roidb[1].keys())}
if 'relationships' in self._extra_info:
from collections import defaultdict
tmp = defaultdict(list)
for rel in self._extra_info['relationships']:
tmp[(rel['subject_id'], rel['object_id'])].append(rel['rel_id'])
if cfg.MODEL.RELATION_COOCCUR:
for k in tmp:
tmp[k] = [1]
self._extra_info['relationships'] = tmp
if 'word_embeddings' in self._extra_info:
self._extra_info['word_embeddings'] = torch.tensor(self._extra_info['word_embeddings'])
if 'rel_embeddings' in self._extra_info:
self._extra_info['rel_embeddings'] = torch.tensor(self._extra_info['rel_embeddings'])
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
if self._roidb[index]['need_crop']:
self.crop_data(blobs, ratio)
# Check bounding box
entry = blobs['roidb'][0]
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
valid_inds = np.nonzero(~ invalid)[0]
if len(valid_inds) < len(boxes):
for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
'box_to_gt_ind_map', 'gt_keypoints']:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn
return blobs
def crop_data(self, blobs, ratio):
data_height, data_width = map(int, blobs['im_info'][:2])
boxes = blobs['roidb'][0]['boxes']
if ratio < 1: # width << height, crop height
size_crop = math.ceil(data_width / ratio) # size after crop
min_y = math.floor(np.min(boxes[:, 1]))
max_y = math.floor(np.max(boxes[:, 3]))
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
else:
if (box_region - size_crop) < 0:
y_s_min = max(max_y - size_crop, 0)
y_s_max = min(min_y, data_height - size_crop)
y_s = y_s_min if y_s_min == y_s_max else \
npr.choice(range(y_s_min, y_s_max + 1))
else:
# CHECK: rethinking the mechnism for the case box_region > size_crop
# Now, the crop is biased on the lower part of box_region caused by
# // 2 for y_s_add
y_s_add = (box_region - size_crop) // 2
y_s = min_y if y_s_add == 0 else \
npr.choice(range(min_y, min_y + y_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, y_s:(y_s + size_crop), :,]
# Update im_info
blobs['im_info'][0] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 1] -= y_s
boxes[:, 3] -= y_s
np.clip(boxes[:, 1], 0, size_crop - 1, out=boxes[:, 1])
np.clip(boxes[:, 3], 0, size_crop - 1, out=boxes[:, 3])
blobs['roidb'][0]['boxes'] = boxes
else: # width >> height, crop width
size_crop = math.ceil(data_height * ratio)
min_x = math.floor(np.min(boxes[:, 0]))
max_x = math.floor(np.max(boxes[:, 2]))
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
else:
if (box_region - size_crop) < 0:
x_s_min = max(max_x - size_crop, 0)
x_s_max = min(min_x, data_width - size_crop)
x_s = x_s_min if x_s_min == x_s_max else \
npr.choice(range(x_s_min, x_s_max + 1))
else:
x_s_add = (box_region - size_crop) // 2
x_s = min_x if x_s_add == 0 else \
npr.choice(range(min_x, min_x + x_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, :, x_s:(x_s + size_crop)]
# Update im_info
blobs['im_info'][1] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 0] -= x_s
boxes[:, 2] -= x_s
np.clip(boxes[:, 0], 0, size_crop - 1, out=boxes[:, 0])
np.clip(boxes[:, 2], 0, size_crop - 1, out=boxes[:, 2])
blobs['roidb'][0]['boxes'] = boxes
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
if cfg.TRAIN.ASPECT_GROUPING:
# Given the ratio_list, we want to make the ratio same
# for each minibatch on each GPU.
self.ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
self._reset_iter()
def __iter__(self):
return self
def __next__(self):
if self.iter_counter == len(self._ratio_index):
self._reset_iter()
raise StopIteration()
else:
elem = (self._ratio_index[self.iter_counter], self._ratio_list_minibatch[self.iter_counter])
self.iter_counter += 1
return elem
def _reset_iter(self):
if cfg.TRAIN.ASPECT_GROUPING:
# indices for aspect grouping awared permutation
n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
indices = np.arange(round_num_data)
npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle
if rem != 0:
indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
self._ratio_index = self.ratio_index[indices]
self._ratio_list_minibatch = self.ratio_list_minibatch[indices]
else:
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
self._ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
self._ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
self.iter_counter = 0
self._ratio_index = self._ratio_index.tolist()
self._ratio_list_minibatch = self._ratio_list_minibatch.tolist()
def __len__(self):
return self.num_data
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
self._ratio_index = state_dict['ratio_index']
self._ratio_list_minibatch = state_dict['ratio_list_minibatch']
self.iter_counter = state_dict['iter_counter']
def state_dict(self, prefetched_num=None):
prefetched_num = prefetched_num or 0
return {
'ratio_index': self._ratio_index,
'ratio_list_minibatch': self._ratio_list_minibatch,
'iter_counter': self.iter_counter - prefetched_num
}
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = list_of_blobs[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
# Pad image data
mini_list = pad_image_data(mini_list)
minibatch = default_collate(mini_list)
minibatch['roidb'] = list_of_roidb[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
def pad_image_data(list_of_blobs):
max_shape = blob_utils.get_max_shape([blobs['data'].shape[1:] for blobs in list_of_blobs])
output_list = []
for blobs in list_of_blobs:
data_padded = np.zeros((3, max_shape[0], max_shape[1]), dtype=np.float32)
_, h, w = blobs['data'].shape
data_padded[:, :h, :w] = blobs['data']
blobs['data'] = data_padded
output_list.append(blobs)
return output_list
| 42.417476
| 113
| 0.592813
|
53d9fed799dfd6ac923e6575015fe97e3a43e0dd
| 2,139
|
py
|
Python
|
hasher-matcher-actioner/hmalib/common/reactioner_models.py
|
king40or1/ThreatExchange
|
95680d1568241bf63249f91480bbf1c7bbe9b699
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/common/reactioner_models.py
|
king40or1/ThreatExchange
|
95680d1568241bf63249f91480bbf1c7bbe9b699
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/common/reactioner_models.py
|
king40or1/ThreatExchange
|
95680d1568241bf63249f91480bbf1c7bbe9b699
|
[
"BSD-3-Clause"
] | null | null | null |
import typing as t
from dataclasses import dataclass, fields
from hmalib.models import MatchMessage, BankedSignal
from hmalib.common.logging import get_logger
from hmalib.common.actioner_models import ActionPerformer, ActionLabel
from hmalib.aws_secrets import AWSSecrets
from threatexchange.api import ThreatExchangeAPI
logger = get_logger(__name__)
@dataclass
class ReactActionPerformer(ActionPerformer):
@property
def reaction(self) -> str:
raise NotImplementedError
def perform_action(self, match_message: MatchMessage) -> None:
api_key = AWSSecrets.te_api_key()
api = ThreatExchangeAPI(api_key)
indicator_ids = {
dataset_match_details.banked_content_id
for dataset_match_details in match_message.matching_banked_signals
if dataset_match_details.bank_source == "te"
}
descriptor_ids = {
descriptor_id["id"]
for indicator_id in indicator_ids
for descriptor_id in api.get_threat_descriptors_from_indicator(indicator_id)
}
for id in descriptor_ids:
api.react_to_threat_descriptor(id, self.reaction)
logger.info("reacted %s to descriptor %s", self.reaction, id)
class ReactInReviewActionPerformer(ReactActionPerformer):
reaction = "IN_REVIEW"
class ReactIngestedActionPerformer(ReactActionPerformer):
reaction = "INGESTED"
class ReactSawThisTooActionPerformer(ReactActionPerformer):
reaction = "SAW_THIS_TOO"
if __name__ == "__main__":
banked_signals = [
BankedSignal("2862392437204724", "bank 4", "te"),
BankedSignal("4194946153908639", "bank 4", "te"),
]
match_message = MatchMessage("key", "hash", banked_signals)
configs: t.List[ActionPerformer] = [
ReactInReviewActionPerformer(
action_label=ActionLabel("ReactInReview"),
),
ReactSawThisTooActionPerformer(
action_label=ActionLabel("ReactSawThisToo"),
),
]
# This will react to 4 real descriptors
for action_config in configs:
action_config.perform_action(match_message)
| 28.905405
| 88
| 0.706405
|
165635a75c5f9489c1f0b499fc874c8c22d6a4fd
| 453
|
py
|
Python
|
bg_run/__init__.py
|
cfperez/bg_run
|
0600a5d7a761ca413bbf6c5f8cf02901070e555b
|
[
"Apache-2.0"
] | 3
|
2017-04-24T21:38:26.000Z
|
2017-12-04T16:26:13.000Z
|
bg_run/__init__.py
|
cfperez/bg_run
|
0600a5d7a761ca413bbf6c5f8cf02901070e555b
|
[
"Apache-2.0"
] | null | null | null |
bg_run/__init__.py
|
cfperez/bg_run
|
0600a5d7a761ca413bbf6c5f8cf02901070e555b
|
[
"Apache-2.0"
] | null | null | null |
def load_ipython_extension(shell):
from .magics import AsyncMagics
shell.register_magics(AsyncMagics)
def unload_ipython_extension(shell):
from .magics import AsyncMagics
shell.magics_manager.registry['AsyncMagics'].pool.shutdown()
for magic_type,magics in AsyncMagics.magics.items():
for magic in magics:
del shell.magics_manager.magics[magic_type][magic]
del shell.magics_manager.registry['AsyncMagics']
| 32.357143
| 64
| 0.750552
|
4f88a115dfc32d537b225e8992925ae7a94c8c64
| 1,994
|
py
|
Python
|
infra/bots/zip_utils_test.py
|
InvictrixRom/external_skia
|
5d1778b530aa0b845b8d6996815665f7cc44bf38
|
[
"BSD-3-Clause"
] | 4
|
2019-10-18T05:53:30.000Z
|
2021-08-21T07:36:37.000Z
|
infra/bots/zip_utils_test.py
|
InvictrixRom/external_skia
|
5d1778b530aa0b845b8d6996815665f7cc44bf38
|
[
"BSD-3-Clause"
] | 4
|
2016-04-08T23:04:42.000Z
|
2017-06-16T21:46:02.000Z
|
infra/bots/zip_utils_test.py
|
InvictrixRom/external_skia
|
5d1778b530aa0b845b8d6996815665f7cc44bf38
|
[
"BSD-3-Clause"
] | 7
|
2017-09-30T23:06:11.000Z
|
2019-05-30T08:54:33.000Z
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for zip_utils."""
import filecmp
import os
import test_utils
import unittest
import utils
import uuid
import zip_utils
class ZipUtilsTest(unittest.TestCase):
def test_zip_unzip(self):
with utils.tmp_dir():
fw = test_utils.FileWriter(os.path.join(os.getcwd(), 'input'))
# Create input files and directories.
fw.mkdir('mydir')
fw.mkdir('anotherdir', 0666)
fw.mkdir('dir3', 0600)
fw.mkdir('subdir')
fw.write('a.txt', 0777)
fw.write('b.txt', 0751)
fw.write('c.txt', 0640)
fw.write(os.path.join('subdir', 'd.txt'), 0640)
# Zip, unzip.
zip_utils.zip('input', 'test.zip')
zip_utils.unzip('test.zip', 'output')
# Compare the inputs and outputs.
test_utils.compare_trees(self, 'input', 'output')
def test_blacklist(self):
with utils.tmp_dir():
# Create input files and directories.
fw = test_utils.FileWriter(os.path.join(os.getcwd(), 'input'))
fw.mkdir('.git')
fw.write(os.path.join('.git', 'index'))
fw.write('somefile')
fw.write('.DS_STORE')
fw.write('leftover.pyc')
fw.write('.pycfile')
# Zip, unzip.
zip_utils.zip('input', 'test.zip', blacklist=['.git', '.DS*', '*.pyc'])
zip_utils.unzip('test.zip', 'output')
# Remove the files/dirs we don't expect to see in output, so that we can
# use self._compare_trees to check the results.
fw.remove(os.path.join('.git', 'index'))
fw.remove('.git')
fw.remove('.DS_STORE')
fw.remove('leftover.pyc')
# Compare results.
test_utils.compare_trees(self, 'input', 'output')
def test_nonexistent_dir(self):
with utils.tmp_dir():
with self.assertRaises(IOError):
zip_utils.zip('input', 'test.zip')
if __name__ == '__main__':
unittest.main()
| 26.586667
| 78
| 0.625878
|
b202e0a2b308c07f1392fe00d9a2bd46b23ee3a6
| 8,788
|
py
|
Python
|
workBousaiTYO_baseline/predflowio/predflowio_DMVST_continue.py
|
deepkashiwa20/DeepCrowd
|
847bcc20ca36b521eead4ededa5d11b2fd2af30a
|
[
"MIT"
] | 4
|
2021-09-07T09:29:43.000Z
|
2022-03-28T07:18:16.000Z
|
workBousaiTYO_baseline/predflowio/predflowio_DMVST_continue.py
|
deepkashiwa20/DeepCrowd
|
847bcc20ca36b521eead4ededa5d11b2fd2af30a
|
[
"MIT"
] | null | null | null |
workBousaiTYO_baseline/predflowio/predflowio_DMVST_continue.py
|
deepkashiwa20/DeepCrowd
|
847bcc20ca36b521eead4ededa5d11b2fd2af30a
|
[
"MIT"
] | 2
|
2021-06-18T01:28:16.000Z
|
2021-08-10T01:24:49.000Z
|
import datetime
import sys
import shutil
import gc
from keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint, LearningRateScheduler, TensorBoard
import model_structure
from load_data import data_generator, test_generator, get_test_true
from Param_DMVST_flow import *
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def get_model_structure(name):
model = model_structure.get_model(name)
model.summary()
# model_json = model.to_json()
# with open('model_data/structure/' + name + '.json', "w") as json_file:
# json_file.write(model_json)
return model
def get_data(model_name, type):
print('loading data...')
if type == 'in':
local_density_path = local_flow_in_path
topo_density_path = topo_flow_in_path
elif type == 'out':
local_density_path = local_flow_out_path
topo_density_path = topo_flow_out_path
if model_name == 'density':
region_window = np.load(local_density_path)
topo_data = np.loadtxt(topo_density_path, skiprows=1, usecols=range(1, toponet_len + 1))
temporal_data = np.loadtxt(temporal_path, skiprows=1, delimiter=',')
print('population data', region_window.shape)
print('temporal data', temporal_data.shape)
print('topo data', topo_data.shape)
region_window = region_window / MAX_VALUE
startIndex, endIndex = 0, int(int(region_window.shape[0] * trainRatio) * (1 - SPLIT))
trainData = region_window[startIndex:endIndex, :, :, :, :]
trainTemporal = temporal_data[startIndex:endIndex]
print('train data', trainData.shape)
startIndex, endIndex = int(int(region_window.shape[0] * trainRatio) * (1 - SPLIT)), int(region_window.shape[0] * trainRatio)
validData = region_window[startIndex:endIndex, :, :, :, :]
validTemporal = temporal_data[startIndex:endIndex]
print('valid data', validData.shape)
startIndex, endIndex = int(region_window.shape[0] * trainRatio), region_window.shape[0]
testData = region_window[startIndex:endIndex, :, :, :, :]
testTemporal = temporal_data[startIndex:endIndex]
print('test data', testData.shape)
print('load finished')
return trainData, validData, testData, trainTemporal, validTemporal, testTemporal, topo_data
def model_train(model_name, train_data, valid_data, trainTemporal, validTemporal, topo_data, type):
# set callbacks
csv_logger = CSVLogger(PATH + '/' + MODELNAME + '_' + type + '.log')
checkpointer_path = PATH + '/' + MODELNAME + '_' + type + '.h5'
checkpointer = ModelCheckpoint(filepath=checkpointer_path, verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')
LearnRate = LearningRateScheduler(lambda epoch: LR)
# data generator
train_generator = data_generator(train_data, trainTemporal, topo_data, BATCHSIZE, TIMESTEP, model_name)
val_generator = data_generator(valid_data, validTemporal, topo_data, BATCHSIZE, TIMESTEP, model_name)
sep = (train_data.shape[0] - TIMESTEP) * train_data.shape[1] // BATCHSIZE
val_sep = (valid_data.shape[0] - TIMESTEP) * valid_data.shape[1] // BATCHSIZE
# train model
model = get_model_structure(model_name)
# model = multi_gpu_model(model, gpus=2) # gpu parallel
model.compile(loss=LOSS, optimizer=OPTIMIZER)
model.fit_generator(train_generator, steps_per_epoch=sep, epochs=EPOCH,
validation_data=val_generator, validation_steps=val_sep,
callbacks=[csv_logger, checkpointer, LearnRate, early_stopping])
# compute mse
val_nolabel_generator = test_generator(valid_data, validTemporal, topo_data, BATCHSIZE, TIMESTEP)
val_predY = model.predict_generator(val_nolabel_generator, steps=val_sep)
valY = get_test_true(valid_data, TIMESTEP, model_name)
# mse
scaled_valY = np.reshape(valY, ((valid_data.shape[0] - TIMESTEP), HEIGHT, WIDTH))
scaled_predValY = np.reshape(val_predY, ((valid_data.shape[0] - TIMESTEP), HEIGHT, WIDTH))
print('val scale shape: ', scaled_predValY.shape)
val_scale_MSE = np.mean((scaled_valY - scaled_predValY) ** 2)
print("Model val scaled MSE", val_scale_MSE)
# rescale mse
val_rescale_MSE = val_scale_MSE * MAX_VALUE ** 2
print("Model val rescaled MSE", val_rescale_MSE)
# write record
with open(PATH + '/' + MODELNAME + '_prediction_scores.txt', 'a') as wf:
wf.write('train flow {} start time: {}\n'.format(type, StartTime))
wf.write('train flow {} end time: {}\n'.format(type, datetime.datetime.now().strftime('%Y%m%d_%H%M%S')))
wf.write("Keras MSE on flow {} trainData, {}\n".format(type, val_scale_MSE))
wf.write("Rescaled MSE on flow {} trainData, {}\n".format(type, val_rescale_MSE))
return val_scale_MSE, val_rescale_MSE
def model_pred(model_name, test, testTemporal, topo_data, type):
# test generator
test_gene = test_generator(test, testTemporal, topo_data, BATCHSIZE, TIMESTEP)
test_sep = (test.shape[0] - TIMESTEP) * test.shape[1] // BATCHSIZE
# get predict
model = get_model_structure(model_name)
# model = multi_gpu_model(model, gpus=2) # gpu parallel
model.compile(loss=LOSS, optimizer=OPTIMIZER)
model.load_weights(PATH + '/' + MODELNAME + '_' + type + '.h5')
predY = model.predict_generator(test_gene, steps=test_sep)
# ground truth
testY = get_test_true(test, TIMESTEP, model_name)
# compute mse
scaled_testY = np.reshape(testY, ((test.shape[0] - TIMESTEP), HEIGHT, WIDTH))
scaled_predTestY = np.reshape(predY, ((test.shape[0] - TIMESTEP), HEIGHT, WIDTH))
print('test scale shape: ', scaled_predTestY.shape)
scale_MSE = np.mean((scaled_testY - scaled_predTestY) ** 2)
print("Model scaled MSE", scale_MSE)
rescale_MSE = scale_MSE * MAX_VALUE ** 2
print("Model rescaled MSE", rescale_MSE)
with open(PATH + '/' + MODELNAME + '_prediction_scores.txt', 'a') as wf:
wf.write("Keras MSE on flow {} testData, {}\n".format(type, scale_MSE))
wf.write("Rescaled MSE on flow {} testData, {}\n\n".format(type, rescale_MSE))
np.save(PATH + '/' + MODELNAME + '_prediction_' + type + '.npy', scaled_predTestY * MAX_VALUE)
np.save(PATH + '/' + MODELNAME + '_groundtruth_' + type + '.npy', scaled_testY * MAX_VALUE)
return scale_MSE, rescale_MSE
################# Path Setting #######################
MODELNAME = 'DMVST'
# KEYWORD = 'predflowio_' + MODELNAME + '_' + datetime.datetime.now().strftime("%y%m%d%H%M")
KEYWORD = 'predflowio_DMVST_1908141337'
PATH = '../' + KEYWORD
###########################Reproducible#############################
import numpy as np
import random
from keras import backend as K
import os
import tensorflow as tf
np.random.seed(100)
random.seed(100)
os.environ['PYTHONHASHSEED'] = '0' # necessary for py3
tf.set_random_seed(100)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf.gpu_options.allow_growth = True
session_conf.gpu_options.per_process_gpu_memory_fraction = 0.45
session_conf.gpu_options.visible_device_list = '1'
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
###################################################################
if __name__ == '__main__':
mkdir(PATH)
currentPython = sys.argv[0]
shutil.copy2(currentPython, PATH)
shutil.copy2('Param_DMVST_flow.py', PATH)
shutil.copy2('model_structure.py', PATH)
shutil.copy2('load_data.py', PATH)
shutil.copy2('preprocess_flow.py', PATH)
StartTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
choose = 0
if choose == 0:
# density
model_name = 'density'
print('#' * 50)
print('start running at {}'.format(StartTime))
print('model name: flow')
print('#' * 50, '\n')
test_sc, test_re = [], []
for type in ['in', 'out']:
StartTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
train_data, valid_data, test_data, trainTemporal, validTemporal, testTemporal, topo_data = get_data(model_name,
type)
test_scale, test_rescale = model_pred(model_name, test_data, testTemporal, topo_data, type)
test_sc.append(test_scale), test_re.append(test_rescale)
del train_data, valid_data, test_data, trainTemporal, validTemporal, testTemporal, topo_data
gc.collect()
with open(PATH + '/' + MODELNAME + '_prediction_scores.txt', 'a') as wf:
wf.write("Keras MSE on flow testData, {}\n".format(np.mean(test_sc)))
wf.write("Rescaled MSE on flow testData, {}\n".format(np.mean(test_re)))
| 42.454106
| 128
| 0.677629
|
5adce4a5e0ed4d13fc43f7c0779a1d7146a30a19
| 85
|
py
|
Python
|
fairshake_assessments/utils/force_list.py
|
MaayanLab/fairshake-assessments
|
86528e34edc0282528bb9229b637f43c7656fb80
|
[
"Apache-2.0"
] | null | null | null |
fairshake_assessments/utils/force_list.py
|
MaayanLab/fairshake-assessments
|
86528e34edc0282528bb9229b637f43c7656fb80
|
[
"Apache-2.0"
] | null | null | null |
fairshake_assessments/utils/force_list.py
|
MaayanLab/fairshake-assessments
|
86528e34edc0282528bb9229b637f43c7656fb80
|
[
"Apache-2.0"
] | null | null | null |
def force_list(val):
if type(val) == list:
return val
else:
return [val]
| 14.166667
| 23
| 0.6
|
b9f9645cb1a3c5fd67a37d1cf38d9fd91a68db22
| 994
|
py
|
Python
|
MacOSX/small scripts/pdf-modify.py
|
Lincoln12w/workshop-setups
|
368da10db6231636c49080fe5272478a2ae80078
|
[
"MIT"
] | null | null | null |
MacOSX/small scripts/pdf-modify.py
|
Lincoln12w/workshop-setups
|
368da10db6231636c49080fe5272478a2ae80078
|
[
"MIT"
] | null | null | null |
MacOSX/small scripts/pdf-modify.py
|
Lincoln12w/workshop-setups
|
368da10db6231636c49080fe5272478a2ae80078
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Description:
Modify the pdf files.
Author: lincoln12w
Github: https://github.com/Lincoln12w
Module:
APIs:
Examples:
python pdf-modify.py test.pdf 0 # remove the first page of the pdf.
Modify History
--------------
00a 24oct17 lzw create.
01a 20mar18 lzw never raised the exception in `generic.py:632`.
"""
import sys
from PyPDF2 import PdfFileReader, PdfFileMerger, PdfFileWriter
def rmPages(filename, pages):
with open(filename, 'rb') as pdffile:
originfile = PdfFileReader(pdffile)
newfile = PdfFileMerger()
write = PdfFileWriter()
for index in range(originfile.getNumPages()):
if index not in pages:
newfile.append(originfile, pages=(index, index+1))
with open(filename[:-4] + "-new.pdf", "wb") as newpdf:
newfile.write(newpdf)
if __name__ == '__main__':
filename = sys.argv[1]
pages = [int(page) for page in sys.argv[2:]]
rmPages(filename, pages)
| 22.088889
| 79
| 0.651911
|
f010c204328e6440cdeaa222ba88931993792b97
| 461
|
py
|
Python
|
democracy_club/apps/everyelection/migrations/0007_auto_20160710_1136.py
|
chris48s/Website
|
efabd478d617a40d58304b3e9cce9da343cfaa78
|
[
"BSD-3-Clause"
] | null | null | null |
democracy_club/apps/everyelection/migrations/0007_auto_20160710_1136.py
|
chris48s/Website
|
efabd478d617a40d58304b3e9cce9da343cfaa78
|
[
"BSD-3-Clause"
] | null | null | null |
democracy_club/apps/everyelection/migrations/0007_auto_20160710_1136.py
|
chris48s/Website
|
efabd478d617a40d58304b3e9cce9da343cfaa78
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('everyelection', '0006_authorityelectionposition_seats'),
]
operations = [
migrations.AlterField(
model_name='authorityelectionposition',
name='seats',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
| 23.05
| 72
| 0.642082
|
727c11499b5d7a53ff01e27f13f2cd10843c7a3c
| 2,632
|
py
|
Python
|
lldb/test/API/python_api/findvalue_duplist/TestSBFrameFindValue.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/test/API/python_api/findvalue_duplist/TestSBFrameFindValue.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/test/API/python_api/findvalue_duplist/TestSBFrameFindValue.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
"""Test that SBFrame::FindValue finds things but does not duplicate the entire variables list"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SBFrameFindValueTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_formatters_api(self):
"""Test that SBFrame::FindValue finds things but does not duplicate the entire variables list"""
self.build()
self.setTearDownCleanup()
exe = self.getBuildArtifact("a.out")
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
breakpoint = target.BreakpointCreateBySourceRegex(
'Set breakpoint here', lldb.SBFileSpec("main.cpp"))
self.assertTrue(breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertEquals(len(threads), 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
self.assertTrue(
self.frame.GetVariables(
True,
True,
False,
True).GetSize() == 2,
"variable count is off")
self.assertFalse(
self.frame.FindValue(
"NoSuchThing",
lldb.eValueTypeVariableArgument,
lldb.eDynamicCanRunTarget).IsValid(),
"found something that should not be here")
self.assertTrue(
self.frame.GetVariables(
True,
True,
False,
True).GetSize() == 2,
"variable count is off after failed FindValue()")
self.assertTrue(
self.frame.FindValue(
"a",
lldb.eValueTypeVariableArgument,
lldb.eDynamicCanRunTarget).IsValid(),
"FindValue() didn't find an argument")
self.assertTrue(
self.frame.GetVariables(
True,
True,
False,
True).GetSize() == 2,
"variable count is off after successful FindValue()")
| 32.9
| 104
| 0.590426
|
841f6289746a3ec64b7e33669a7b0afac7ba74e2
| 30,712
|
py
|
Python
|
src/solarflarebot.py
|
hafizhadi/solarflarebot
|
6fc1080be19e4e8ccb9c7e8537986c0419e79877
|
[
"MIT"
] | null | null | null |
src/solarflarebot.py
|
hafizhadi/solarflarebot
|
6fc1080be19e4e8ccb9c7e8537986c0419e79877
|
[
"MIT"
] | null | null | null |
src/solarflarebot.py
|
hafizhadi/solarflarebot
|
6fc1080be19e4e8ccb9c7e8537986c0419e79877
|
[
"MIT"
] | null | null | null |
import os
import re
import copy
import random
import asyncio
import json
from dotenv import load_dotenv
from string import Template
import discord
from discord import ChannelType as ctype
from discord.ext import commands
#### BOT LEVEL VARIABLES ####
# Environments
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
PREFIX = os.getenv('BOT_PREFIX')
# Initialize client
bot = commands.Bot(command_prefix=PREFIX)
#### COMMONS ####
### GLOBAL GAME VARIABLES ###
session_default = {
'game_type': 'X',
'game_state': 0, # 1: Initialization + Name Gathering | 2-3-4: Round 1-2-3 | 0: Game inactive
'current_team': 'B',
'scores': {'A': 0, 'B': 0},
'can_answer': False,
'current_player': None,
# Teams
'player_list': set([]) # List of (user id, username, team, turn)
}
game_name = {
'M': 'Monikers',
'W': 'Wavelength'
}
session = copy.deepcopy(session_default)
### HELPER FUNCTIONS ###
async def GetSession():
serializableSession = {}
for key, value in session.items():
if isinstance(value, set):
serializableSession[key] = list(value)
else:
serializableSession[key] = value
return serializableSession
async def ResetGlobal():
global session
session = copy.deepcopy(session_default)
## MESSAGING ##
async def PaddedSend(ctx, message, front=0, back=0):
await asyncio.sleep(front)
await ctx.send(message)
await asyncio.sleep(back)
async def DoubleSend(ctx, message, front=0, back=0):
await asyncio.sleep(front)
await bot.get_user(session['current_player']).send(message)
await ctx.send(message)
await asyncio.sleep(back)
## PLAYER STUFF ##
# Join game, return a boolean for success flag
async def JoinGame(ctx):
if (ctx.author.id, ctx.author.name, None, False) in session['player_list']:
await PaddedSend(ctx, f'''Player {ctx.author.name} has already joined though?''')
return False
else:
session['player_list'].add((ctx.author.id, ctx.author.name, None, False))
await PaddedSend(ctx, f'''Player {ctx.author.name} joined the {game_name[session['game_type']]} session!''')
return True
# Leave game, return a boolean for success flag
async def LeaveGame(ctx):
if (ctx.author.id, ctx.author.name, None, False) in session['player_list']:
session['player_list'].remove((ctx.author.id, ctx.author.name, None, False))
await PaddedSend(ctx, f'''You're leaving {ctx.author.name}? Well then...''')
return True
else:
await PaddedSend(ctx, f'''You haven't joined yet though, {ctx.author.name}?''')
return False
# Randomly assign teams to player and set all turn to False
async def AssignTeams():
shuffledlist = random.sample(list(session['player_list']), len(session['player_list']))
session['player_list'] = set([(player[0], player[1], ('A' if len(shuffledlist) // 2 <= idx else 'B'), False) for idx, player in enumerate(shuffledlist)])
# Display playerlist
async def DisplayPlayers(ctx, readycondition):
list_message = '''
The people who's currently playing are: \n {}
'''.format('\n'.join(
[f'''{idx+1}. {name[1]}{': Ready' if(readycondition(name)) else ''}''' for idx, name in enumerate(list(session['player_list']))]))
await PaddedSend(ctx, list_message)
# Send teams
async def DisplayTeams(ctx):
team_a = [player for player in session['player_list'] if player[2] == 'A']
team_b = [player for player in session['player_list'] if player[2] == 'B']
copy_a = '''Team A: \n {}'''.format('\n'.join([f'{idx+1}. {player[1]}' for idx, player in enumerate(team_a)]))
copy_b = '''Team B: \n {}'''.format('\n'.join([f'{idx+1}. {player[1]}' for idx, player in enumerate(team_b)]))
await PaddedSend(ctx, copy_a + '\n' + copy_b)
# Get the next player without turn
async def PlayerCandidates():
return [p for p in session['player_list'] if ((p[3] == False) and (p[2] == session['current_team']))]
# Change current player, return name
async def ChangeCurrentPlayer(ctx):
candidates = await PlayerCandidates()
# Check if all answered, if so then reset
if len(candidates) == 0:
session['player_list'] = set([(p[0], p[1], p[2], False) for p in session['player_list']])
candidates = await PlayerCandidates()
# Change next player
next_player = sorted(candidates)[0]
session['current_player'] = next_player[0]
# Mark turn
session['player_list'].remove(next_player)
session['player_list'].add((next_player[0], next_player[1], next_player[2], True))
return next_player
### CHECKS ###
def CheckChannel(channeltype, exclusion=False):
def predicate(ctx):
return (not exclusion) == (ctx.channel.type in channeltype)
return commands.check(predicate)
def CheckState(game_type, game_state, exclusion=False):
def predicate(ctx):
return (session['game_type'] == game_type) and ((not exclusion) == (session['game_state'] in game_state))
return commands.check(predicate)
def CheckPlayer(team=False):
def predicate(ctx):
player_ids = []
if not team: # Just check player currently joining
player_ids = [player[0] for player in session['player_list']]
else: # Check currently playing team
player_ids = [player[0] for player in session['player_list'] if player[2] == session['current_team']]
return (ctx.author.id in player_ids)
return commands.check(predicate)
#### MONIKERS ####
### GLOBAL GAME VARIABLES ###
#mon_qs = json.load(open(os.path.dirname(__file__) + "/../data/monQuestions.json"))
mon_copies = json.load(open(os.path.dirname(__file__) + "/../data/monCopy.json"))
mon_default = {
# Configurables
'game_type': 0, # 0: No description | 1: With description
'name_quota': 5, # 3 is the bare minimum man
'round_length': 75,
'round_multiplier': 2,
'streak_multiplier': 0.25,
'max_streak': 5,
'current_streak': 0,
# Questions
'current_name': None,
'name_list': set([]), # List of tuple (name:string, user id, status:bool)
}
mon_session = copy.deepcopy(mon_default)
### COPIES ###
mon_round_instruction = mon_copies['round_instructions']
mon_streak_messages = mon_copies['streak_messages']
mon_rules = Template("\n".join(mon_copies['rules'])).substitute(rlength=mon_session['round_length'])
mon_init_message = Template("\n".join(mon_copies['init'])).substitute(prefix=PREFIX)
mon_join_message = Template("\n".join(mon_copies['join'])).substitute(prefix=PREFIX, quota=mon_session['name_quota'])
### HELPER FUNCTIONS ###
async def ResetMoniker():
await ResetGlobal()
global mon_session
mon_session = copy.deepcopy(mon_default)
# Convert all sets to list so they can be dumped as JSON
async def GetMonSession():
serializableSession = {
'global': {},
'moniker': {}
}
serializableSession['global'] = await GetSession()
for key, value in mon_session.items():
if isinstance(value, set):
serializableSession['moniker'][key] = list(value)
else:
serializableSession['moniker'][key] = value
return serializableSession
# Change currently guessed name and inform currently explaining player
async def ChangeName(new_name):
mon_session['current_name'] = new_name
current_player = bot.get_user(session['current_player'])
await current_player.send(f'''Your next word is: {new_name[0]}''')
# Get one names that hasn't been answered yet
async def GetUnansweredName():
unanswereds = [(name[0], name[1]) for name in mon_session['name_list'] if name[2] == False]
# Check if all answered
if len(unanswereds) > 1:
next_name = random.choice([name for name in unanswereds if name != mon_session['current_name']])
return next_name
elif len(unanswereds) == 1:
return unanswereds[0]
else:
return None
# Scoring Logic
async def MonikerScore():
current_round = session['game_state'] - 1
basic_score = 1000 * (1 + (current_round * mon_session['round_multiplier']))
streak_score = 0
# Start streak if behind or streak has already started
if min(session['scores'].values()) == session['scores'][session['current_team']] or mon_session['current_streak'] > 0:
streak_score = basic_score * mon_session['current_streak'] * mon_session['streak_multiplier']
mon_session['current_streak'] = min(mon_session['max_streak'], mon_session['current_streak'] + 1)
session['scores'][session['current_team']] = session['scores'][session['current_team']] + basic_score + streak_score
return basic_score + streak_score
# Advance the game to next round if any
async def AdvanceRound(ctx):
session['game_state'] = session['game_state'] + 1
if session['game_state'] <= 4:
# Reset round states and remove 1 name per player
mon_session['name_list'] = set([(np[0], np[1], False) for np in mon_session['name_list']])
mon_session['name_list'] = set(random.sample([np for np in mon_session['name_list']], len(mon_session['name_list']) - len(session['player_list'])))
await PaddedSend(ctx, f'''Type "{PREFIX}m go" to begin the next round.''')
else:
if session['scores']['A'] > session['scores']['B']:
await PaddedSend(ctx, '''Team A wins! I guess I misjudged you guys.''')
elif session['scores']['B'] > session['scores']['A']:
await PaddedSend(ctx, '''Team B wins! That was a close game.''')
else:
await PaddedSend(ctx, '''It's a draw?''')
await ResetMoniker()
### BOT COMMANDS ###
@bot.group(name='monikers')
async def _monikers(ctx):
# Basic sanity checks
if ctx.invoked_subcommand is None:
await PaddedSend(ctx,
f'''You must have mistyped your command.''')
# Aliases
@bot.command(name='mon')
async def m_mon(ctx):
await _monikers.invoke(ctx)
@bot.command(name='m')
async def m_m(ctx):
await _monikers.invoke(ctx)
# Priority commands, available in all game state
# 1. Debug
# 2. Help
# 3. Abort the game
# 4. List players
@_monikers.command(name='debug')
async def m_debug(ctx, *args):
print(ctx, json.dumps(await GetMonSession(), sort_keys=True))
@_monikers.command(name='help')
async def m_help(ctx, *args):
await ctx.send_help()
@_monikers.command(name='abort')
@CheckState('M', [0], exclusion=True)
@CheckChannel([ctype.text])
async def m_abort(ctx, *args):
await ResetMoniker()
await PaddedSend(ctx, 'You want to end it? ... well if you say so. Tell me if you changed your mind.')
@_monikers.command(name='playerlist')
@CheckState('M', [0], exclusion=True)
async def m_playerlist(ctx, *args):
await DisplayPlayers(ctx, lambda x: len([np for np in mon_session['name_list'] if np[1] == x[0]]) >= mon_session['name_quota'])
# Pre-game - possible actions:
@_monikers.command(name='init')
@CheckState('X', [0])
@CheckChannel([ctype.text])
async def m_init(ctx, *args):
await ResetMoniker()
session['game_type'] = 'M'
session['game_state'] = 1
await PaddedSend(ctx, mon_init_message)
# Initialization state - possible actions:
# 1. Add yourself to the list of players
# 2. Remove yourself from the list of players
# 3. Add name to the pool
# 4. Remove name to the pool
# 5. List all the names you've inputted to the pool
# 6. Start the game IF AT LEAST THERE'S 4 PLAYERS AND ALL PLAYERS HAVE INPUTTED 5 NAMES
@_monikers.command(name='rules')
@CheckState('M', [1])
@CheckChannel([ctype.text, ctype.private])
async def m_rules(ctx):
await PaddedSend(ctx.author, mon_rules)
@_monikers.command(name='join')
@CheckState('M', [1])
@CheckChannel([ctype.text])
async def m_join(ctx, *args):
if await JoinGame(ctx):
await PaddedSend(ctx.author, mon_join_message)
@_monikers.command(name='leave')
@CheckState('M', [1])
@CheckChannel([ctype.text])
async def m_leave(ctx, *args):
if await LeaveGame(ctx):
mon_session['name_list'] = set([np for np in mon_session['name_list'] if np[1] != ctx.author.id])
@_monikers.command(name='add')
@CheckState('M', [1])
@CheckPlayer()
@CheckChannel([ctype.private])
async def m_add(ctx, *, arg):
# Check if already more than enough names
if(len([np for np in mon_session['name_list'] if np[1] == ctx.author.id]) >= mon_session['name_quota']):
await PaddedSend(ctx, f'''You've given me more than enough. Do you want me to remove some you've already mentioned?''')
return
# Fuckin preprocess omegalul
name = arg.lower()
name = re.sub(r'([^\s\w]|_)+', '', name)
# Add to list of names if not exist yet
if (name, ctx.author.id, False) not in mon_session['name_list']:
mon_session['name_list'].add((name, ctx.author.id, False))
await PaddedSend(ctx, f'''Very well, I added {name} to your list of names.''')
else:
await PaddedSend(ctx, f'''I already see {name} in your list, you might want to choose other names.''')
@_monikers.command(name='remove')
@CheckState('M', [1])
@CheckPlayer()
@CheckChannel([ctype.private])
async def m_remove(ctx, *, arg):
# Add to list of names if not exist yet
name = arg.lower()
if (name, ctx.author.id, False) in mon_session['name_list']:
mon_session['name_list'].remove((name, ctx.author.id, False))
await PaddedSend(ctx, f'''So you changed your mind? I'll remove {name} from your list then.''')
else:
PaddedSend(ctx, f'''I don't see {name} here... you might want to add it instead.''')
@_monikers.command(name='listnames')
@CheckState('M', [1])
@CheckPlayer()
@CheckChannel([ctype.private])
async def m_listnames(ctx):
list_names = [np[0] for np in mon_session['name_list'] if np[1] == ctx.author.id]
list_message = '''
Here's what you have so far: \n {}
'''.format('\n'.join([f'{idx+1}. {np}' for idx, np in enumerate(list_names)]))
await PaddedSend(ctx, list_message)
@_monikers.command(name='start')
@CheckState('M', [1])
@CheckPlayer()
@CheckChannel([ctype.text])
async def m_start(ctx, *args):
player_count = len(session['player_list'])
if (player_count >= 4) and (len(mon_session['name_list']) >= (player_count * mon_session['name_quota'])):
# DEBUG: if (player_count >= 1) and (len(mon_session['name_list']) >= (player_count * mon_session['name_quota'])):
session['game_state'] = 2
await AssignTeams()
await DisplayTeams(ctx)
await PaddedSend(ctx, f'Type "{PREFIX}m go" if you want me to start.')
elif len(session['player_list']) < 1:
await PaddedSend(ctx, '''I'm afraid we don't have enough people here...''')
else:
await PaddedSend(ctx, '''Wait... somebody hasn't given me enough names yet!''')
# MAIN GAME LOOP - possible actions:
# 1. Start the round
# 2. GUESSER: guess
# 3. EXPLAINER: skip
@_monikers.command(name='go')
@commands.max_concurrency(1)
@CheckState('M', [2, 3, 4])
@CheckPlayer()
@CheckChannel([ctype.text])
async def m_go(ctx, *args):
# Change turn
session['current_team'] = {'A':'B', 'B':'A'}[session['current_team']]
await PaddedSend(ctx,
f'''Round {session['game_state'] - 1}: Team {session['current_team']}
{random.choice(mon_round_instruction[str(session['game_state']-1)])}
Type "{PREFIX}m ? [answer]" to answer.''')
# Get player and name
next_player = await ChangeCurrentPlayer(ctx)
await PaddedSend(ctx, f'The next player to explain is {next_player[1]}!')
# Countdown
await PaddedSend(ctx, 'Get ready. 3...', front=3)
await PaddedSend(ctx, '2...', front=1)
await PaddedSend(ctx, '1...', front=1)
await PaddedSend(ctx, 'Go.', front=1)
# Get name
next_name = await GetUnansweredName()
await ChangeName(next_name)
# Countdown logic
time_left = mon_session['round_length']
session['can_answer'] = True
mon_session['current_streak'] = 0
for reminder in [30, 10, 5]:
if time_left > reminder:
await DoubleSend(ctx, f'{reminder} more seconds!', front=time_left - reminder)
time_left = reminder
await(DoubleSend(ctx, '''And that's the end of the round. I ask you all to stop guessing now.''', front=time_left))
session['can_answer'] = False
mon_session['current_name'] = None
await PaddedSend(ctx,
f'''CURRENT SCORE
# Team A: {session['scores']['A']} points
# Team B: {session['scores']['B']} points''')
await PaddedSend(ctx, f'''Words left: {len([np for np in mon_session['name_list'] if np[2] == False])}''')
# Check round end
if await GetUnansweredName() == None:
await AdvanceRound(ctx)
else:
await PaddedSend(ctx,
'''Next is Team {}'s turn.
Are you ready? Type "{}m go" to start.'''.format({'A':'B', 'B':'A'}[session['current_team']], PREFIX))
@_monikers.command(name='?')
@commands.max_concurrency(1, wait=True)
@CheckState('M', [2, 3, 4])
@CheckPlayer(team=True)
@CheckChannel([ctype.text])
async def m_guess(ctx, *, args):
if session['can_answer'] and ctx.author.id != session['current_player']:
# DEBUG: if session['can_answer']:
# Send guess to explainer
await bot.get_user(session['current_player']).send(f'Your team guessed {args}.')
if args.lower() == mon_session['current_name'][0]:
# Send message
score = await MonikerScore()
await PaddedSend(ctx, f'{args} is correct! {score} points!')
await bot.get_user(session['current_player']).send('''And that is correct.''')
if mon_session['current_streak'] > 0: await PaddedSend(ctx, mon_streak_messages[str(mon_session['current_streak'])])
# Update status
mon_session['name_list'].remove((mon_session['current_name'][0], mon_session['current_name'][1], False))
mon_session['name_list'].add((mon_session['current_name'][0], mon_session['current_name'][1], True))
# Go to next name
next_name = await GetUnansweredName()
if next_name == None:
await PaddedSend(ctx, '''You've guessed all of the names. I will give you time to celebrate until the end of this round.''')
session['can_answer'] = False
else:
await ChangeName(next_name)
@_monikers.command(name='skip')
@commands.max_concurrency(1, wait=True)
@commands.cooldown(1, 1)
@CheckState('M', [2, 3, 4])
@CheckPlayer(team=True)
@CheckChannel([ctype.private])
async def m_skip(ctx):
# Go to next name
if session['can_answer'] and ctx.author.id == session['current_player']:
await PaddedSend(ctx, 'You skipped.')
mon_session['current_streak'] = 0 # reset streak
next_name = await GetUnansweredName()
await ChangeName(next_name)
#### WAVELENGTH ####
### GLOBAL GAME VARIABLES ###
wav_qs = json.load(open(os.path.dirname(__file__) + "/../data/wavQuestions.json"))
wav_copies = json.load(open(os.path.dirname(__file__) + "/../data/wavCopy.json"))
wav_default = {
# Configurables
'target_score': 10,
# Question
'current_clue': 'PINEAPPLE PIZZA',
'current_prompts': ('ETHICAL FOOD OR BEVERAGES', 'UNETHICAL FOOD OR BEVERAGES'),
'current_target': 1,
'current_position': 18,
'question_list': set([(x[0], x[1]) for x in wav_qs]), # List of tuple (name:string, user id, status:bool)
}
wav_session = copy.deepcopy(wav_default)
### COPIES ###
wav_rules = "\n".join(wav_copies['rules'])
wav_init_message = Template("\n".join(wav_copies['init'])).substitute(prefix=PREFIX)
wav_score_message = wav_copies['score_message']
### HELPER FUNCTIONS ###
async def ResetWavelength():
await ResetGlobal()
global wav_session
wav_session = copy.deepcopy(wav_default)
# Convert all sets to list so they can be dumped as JSON
async def GetWavSession():
serializableSession = {
'global': {},
'wavelength': {}
}
serializableSession['global'] = await GetSession()
for key, value in mon_session.items():
if isinstance(value, set):
serializableSession['wavelength'][key] = list(value)
else:
serializableSession['wavelength'][key] = value
return serializableSession
## DRAWING ##
# Process sentence into array of acceptable words
async def ProcessSentence(width, sentence):
pWords = []
for word in sentence.split(' '):
if len(word) <= (width - 4):
pWords.append(word)
else:
x = word
while len(x) > (width - 4):
pWords.append(x[:width - 4])
x = x[width - 4:]
pWords.append(x)
return pWords
# Return a nice, boxed sentence
async def BoxedSentence(width, sentence):
words = await ProcessSentence(width, sentence)
arrays = [
f''' {'_' * (width - 2)} ''',
f'''|{' ' * (width - 2)}|'''
]
cRow = ''
for word in words:
if len(cRow) + len(word) + 1 <= (width - 4):
cRow = f'''{cRow} {word}'''.lstrip(' ')
else:
arrays.append(f'''| {cRow}{' ' * (width - 4 - len(cRow))} |''')
cRow = word
arrays.append(f'''| {cRow}{' ' * (width - 4 - len(cRow))} |''')
arrays.append(f'''|{'_' * (width - 2)}|''')
return arrays
# Return the 3 x 35 meter; [1-35]
async def Meter(arrow, answer, closed=True):
meter = []
meter.append(f''' {' ' * (arrow-1)}|{' ' * (35-arrow)} ''')
meter.append(f''' {' ' * (arrow-1)}▼{' ' * (35-arrow)} ''')
meter.append(f'''<{'.' * 10}{'o' * 5}{'●' * 2}{'▲' * 1}{'●' * 2}{'o' * 5}{'.' * 10}>''')
if closed:
meter.append(f'''<{'■' * 35}>''')
else:
row4 = [' '] * 35
row4[max(0,answer-2)] = '-'
row4[min(34,answer)] = '-'
row4[answer-1] = 'X'
row4 = f''' {''.join(row4)} '''
meter.append(row4)
meter.append(f'''<.........[THE TRUTH METER].........>''')
return meter
# Combine box and meter
async def FullDisplay(box1, box2, meter, boxprompt=[]):
height = max(len(box1), len(box2), len(meter))
display = []
# Add answer if any
for line in boxprompt:
display.append(f'''{' ' * (len(box1[0])+1)}{line}''')
# Pad stuff
box1 = [(' ' * len(box1[0]))] * (height - len(box1)) + box1
box2 = [(' ' * len(box2[0]))] * (height - len(box2)) + box2
meter = [(' ' * len(meter[0]))] * (height - len(meter)) + meter
display = display + [box1[i] + meter[i] + box2[i] for i in range(height)]
return display
# Print the display
async def PrintDisplay(ctx, closed=True):
box1 = await BoxedSentence(15, wav_session['current_prompts'][0])
box2 = await BoxedSentence(15, wav_session['current_prompts'][1])
meter = await Meter(wav_session['current_position'], wav_session['current_target'], closed=closed)
if wav_session['current_clue'] == None:
boxprompt = []
else:
boxprompt = await BoxedSentence(35, wav_session['current_clue'])
full = '\n'.join(await FullDisplay(box1, box2, meter, boxprompt=boxprompt))
await PaddedSend(ctx, f'''```{full}```''')
# Entire round initialization logic
async def QuestionPhase(ctx):
session['game_state'] = 3
session['current_team'] = {'A':'B', 'B':'A'}[session['current_team']]
await PaddedSend(ctx, f'''Team {session['current_team']} turn!''')
# Get player and name
next_player = await ChangeCurrentPlayer(ctx)
await PaddedSend(ctx, f'''{next_player[1]} is playing next; go check your direct message.''')
# Get question, and target
wav_session['current_prompts'] = random.choice(list(wav_session['question_list']))
wav_session['current_target'] = random.randint(1, 35)
wav_session['current_position'] = 18
wav_session['current_clue'] = None
player = bot.get_user(next_player[0])
await player.send('''Here is your question and the target position:''')
await PrintDisplay(player, closed=False)
# Get prompt from current player
answered = False
while not answered:
await player.send('What prompt do you want to give for the question?')
def check(m):
return (m.author == player and m.channel.type == ctype.private)
def checkYes(m):
return check(m) and m.content.lower() in ('y', 'n')
prompt = await bot.wait_for('message', check=check)
await player.send(f'Is "{prompt.content}" okay? (Y/N)')
msg = await bot.wait_for('message', check=checkYes)
if msg.content.lower() == 'y':
await player.send(f'Got it.')
wav_session['current_clue'] = prompt.content.upper()
answered = True
else:
await player.send(f'''You're going to revise your prompt then?''')
# Print completed display to all
await PaddedSend(ctx, '''Okay, here's the question: ''')
await PrintDisplay(ctx, closed=True)
await PaddedSend(ctx,
f'''Adjust the meter by "{PREFIX}w +" and "{PREFIX}w -".
If you're happy with your answer, lock it in by using "{PREFIX}w lock"!''')
# Scoring phase
async def ScoringPhase(ctx):
score = max(0, 5 - abs(wav_session['current_target'] - wav_session['current_position']))
await PaddedSend(ctx, wav_score_message[str(score)])
session['scores'][session['current_team']] = session['scores'][session['current_team']] + score
# Print score
await PaddedSend(ctx,
f'''CURRENT SCORE
# Team A: {session['scores']['A']} points
# Team B: {session['scores']['B']} points''')
# Check for end condition
if session['scores'][session['current_team']] >= wav_session['target_score']:
await PaddedSend(ctx,
f'''And that's the game! Congratulations team {session['current_team']}!
To play another session, type {PREFIX}w init.''')
await ResetWavelength()
else:
await PaddedSend(ctx, f'''Game's not over, type {PREFIX}w go to continue.''')
session['game_state'] = 2
### BOT COMMANDS ###
@bot.group(name='wavelength')
async def _wavelength(ctx):
# Basic sanity checks
if ctx.invoked_subcommand is None:
await PaddedSend(ctx,
f'''You must have mistyped your command.''')
# Aliases
@bot.command(name='wav')
async def w_wav(ctx):
await _wavelength.invoke(ctx)
@bot.command(name='w')
async def w_w(ctx):
await _wavelength.invoke(ctx)
# Priority commands, available in all game state
# 1. Debug
# 2. Help
# 3. Abort the game
# 4. List players
@_wavelength.command(name='debug')
async def w_debug(ctx, *args):
print(ctx, json.dumps(await GetWavSession(), sort_keys=True))
@_wavelength.command(name='help')
async def w_help(ctx, *args):
await ctx.send_help()
@_wavelength.command(name='abort')
@CheckState('W', [0], exclusion=True)
@CheckChannel([ctype.text])
async def w_abort(ctx, *args):
await ResetWavelength()
await PaddedSend(ctx, 'You want to end it? ... well if you say so. Tell me if you changed your mind.')
@_wavelength.command(name='playerlist')
@CheckState('W', [0], exclusion=True)
async def w_playerlist(ctx, *args):
await DisplayPlayers(ctx, lambda x: True)
# Pre-game - possible actions:
@_wavelength.command(name='init')
@CheckState('X', [0])
@CheckChannel([ctype.text])
async def w_init(ctx, *args):
await ResetWavelength()
session['game_type'] = 'W'
session['game_state'] = 1
await PaddedSend(ctx, wav_init_message)
# Initialization state - possible actions:
# 1. Add yourself to the list of players
# 2. Remove yourself from the list of players
# 3. Start the game IF AT LEAST THERE'S 4 PLAYERS
@_wavelength.command(name='rules')
@CheckState('W', [1])
@CheckChannel([ctype.text, ctype.private])
async def w_rules(ctx):
await PaddedSend(ctx.author, wav_rules)
await PrintDisplay(ctx.author, closed=False)
@_wavelength.command(name='join')
@CheckState('W', [1])
@CheckChannel([ctype.text])
async def w_join(ctx, *args):
await JoinGame(ctx)
@_wavelength.command(name='leave')
@CheckState('W', [1])
@CheckChannel([ctype.text])
async def w_leave(ctx, *args):
await LeaveGame(ctx)
@_wavelength.command(name='start')
@CheckState('W', [1])
@CheckPlayer()
@CheckChannel([ctype.text])
async def w_start(ctx, *args):
# DEBUG: if (len(session['player_list']) >= 4):
if (len(session['player_list']) >= 1):
session['game_state'] = 2
await AssignTeams()
await DisplayTeams(ctx)
# Start first round
await w_go.invoke(ctx)
else:
await PaddedSend(ctx, '''I'm afraid we don't have enough people here...''')
## MAIN GAME LOOP ##
# 1. Start the round
# 2. GUESSER: +, -, lock
@_wavelength.command(name='go')
@commands.max_concurrency(1)
@CheckState('W', [2])
@CheckPlayer()
@CheckChannel([ctype.text])
async def w_go(ctx, *args):
await QuestionPhase(ctx)
@_wavelength.command(name='+')
@commands.max_concurrency(1)
@CheckState('W', [3])
@CheckPlayer(team=True)
@CheckChannel([ctype.text])
async def w_plus(ctx, *, arg=1):
if ctx.author.id != session['current_player']:
wav_session['current_position'] = min(wav_session['current_position'] + int(arg), 35)
await PrintDisplay(ctx)
@_wavelength.command(name='-')
@commands.max_concurrency(1)
@CheckState('W', [3])
@CheckPlayer(team=True)
@CheckChannel([ctype.text])
async def w_minus(ctx, *, arg=1):
if ctx.author.id != session['current_player']:
wav_session['current_position'] = max(wav_session['current_position'] - int(arg), 1)
await PrintDisplay(ctx)
@_wavelength.command(name='lock')
@commands.max_concurrency(1)
@CheckState('W', [3])
@CheckPlayer(team=True)
@CheckChannel([ctype.text])
async def w_lock(ctx, *args):
if ctx.author.id != session['current_player']:
def check(m):
return (m.author == ctx.author and m.channel == ctx.channel)
await PaddedSend(ctx, '''Are you 100% sure with your answer? (Y/N)''')
msg = await bot.wait_for('message', check=check)
if msg.content.lower() == 'y':
await PaddedSend(ctx, 'Okay then, locking in!')
await PaddedSend(ctx, '''Let's see if you've successful in guessing!''', back=2)
await PaddedSend(ctx, '''*dramatic drumroll noises*''', back=2)
await PrintDisplay(ctx, closed=False)
await ScoringPhase(ctx)
else:
await PaddedSend(ctx, f'''Okay, but don't take too long!''')
#### RUN BOT ####
bot.run(TOKEN)
| 34.820862
| 159
| 0.640173
|
631dfe6b3425aa8fc66f81ff590f184ef006d8b3
| 1,195
|
py
|
Python
|
detector.py
|
alextitonis/Cognitive-Agent
|
071cc2c63dc34259817a1dd390e36b22d405e837
|
[
"MIT"
] | 4
|
2021-12-15T16:41:38.000Z
|
2022-01-28T11:03:17.000Z
|
detector.py
|
alextitonis/Cognitive-Agent
|
071cc2c63dc34259817a1dd390e36b22d405e837
|
[
"MIT"
] | null | null | null |
detector.py
|
alextitonis/Cognitive-Agent
|
071cc2c63dc34259817a1dd390e36b22d405e837
|
[
"MIT"
] | null | null | null |
from imageai.Detection import ObjectDetection
import os
class detector:
def __init__(self, modelPath="../resnet50_coco_best_v2.1.0.h5"):
self.detector = ObjectDetection()
self.detector.setModelTypeAsRetinaNet()
self.detector.setModelPath(modelPath)
self.detector.loadModel()
def detect(self, image_data, input_type="array", output_image_path="imagenew.jpg", minPerc=50):
detections = self.detector.detectObjectsFromImage(input_image=image_data, input_type=input_type, output_image_path=output_image_path, minimum_percentage_probability=minPerc)
res = []
print('detected:', len(detections), 'objects!');
for eachObject in detections:
print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
x1, y1, x2, y2 = eachObject["box_points"]
center = (x1 + x2) / 2, (y1 + y2) / 2
res.append(detectedObject(eachObject["name"], center))
print("--------------------------------")
return res
class detectedObject:
def __init__(self, name, center):
self.name = name
self.center = center
| 45.961538
| 181
| 0.638494
|
f8b2b1a859d30766873ac2bae737d817bc43bdfd
| 9,608
|
py
|
Python
|
pwseqdist/pairwise.py
|
kmayerb/pwseqdist
|
60500ece9d76aeb6c7aec836bebcba037148483e
|
[
"MIT"
] | null | null | null |
pwseqdist/pairwise.py
|
kmayerb/pwseqdist
|
60500ece9d76aeb6c7aec836bebcba037148483e
|
[
"MIT"
] | null | null | null |
pwseqdist/pairwise.py
|
kmayerb/pwseqdist
|
60500ece9d76aeb6c7aec836bebcba037148483e
|
[
"MIT"
] | null | null | null |
import multiprocessing
import parmap
import itertools
import numpy as np
import scipy
from .metrics import compute_many, compute_many_rect
__all__ = ['apply_pairwise_sq',
'apply_pairwise_rect']
"""TODO:
Currently I pass all the sequences and some set of indices to compute_many.
Why wouldn't I just send the some of the sequences?
The point was to avoid sending all the pairs of sequences and just send
pairs of indices to the workers. So we'd have to be smart about reducing the
total number of sequences that are needed and then sending just those and
translated indices.
These functions are currently not compatible with the numpy subst_metric
because the seqs input woudl be provided as a numpy matric of integers.
I'm not even sure the numpy metric will be faster so not adding now.
Add a numb-compiled version of pairwise_sq and pairwise_rect
- Convert metrics to code that can be compile with numba
This would allow the "outer" loop performing pairwise
distances to also be compiled using numba. The advantage
is that numba-compiled code can use multithreading to run
on multiple CPUs, and making use of shared memory.
I think this could allow for very fast distance calculations"""
def _mati2veci(i, j, n):
veci = scipy.special.comb(n, 2) - scipy.special.comb(n - i, 2) + (j - i - 1)
return int(veci)
def apply_pairwise_sq(seqs, metric, ncpus=1, **kwargs):
"""Calculate distance between all pairs of seqs using metric
and kwargs provided to metric. Will use multiprocessing Pool
if ncpus > 1.
For efficiency, will only compute metric on unique values in
seqs. All values are returned, including redundancies.
Though written to be used for distance calculations,
it is general enough that it could be used to run
any arbitrary function on pairs of elements in seqs.
Parameters
----------
seqs : list
List of sequences provided to metric in pairs.
metric : function
A distance function of the form
func(seq1, seq2, **kwargs)
ncpus : int
Size of the worker pool to be used by multiprocessing
**kwargs : keyword arguments
Additional keyword arguments are supplied to the metric.
Returns
-------
dvec : np.ndarray, length n*(n - 1) / 2
Vector form of the pairwise distance matrix.
Use scipy.distance.squareform to convert to a square matrix"""
"""Set to false to turn on computation of redundant distances"""
uniqify=True
useqs = list(set(seqs))
if len(useqs) == len(seqs) or ~uniqify:
useqs = seqs
translate = False
else:
translate = True
"""itertools.combinations creates the i,j pairs in the same order
as scipy.distance.pdist/squareform"""
pw_indices = list(itertools.combinations(range(len(useqs)), 2))
chunk_func = lambda l, n: [l[i:i + n] for i in range(0, len(l), n)]
chunksz = len(pw_indices) // ncpus
chunked_indices = chunk_func(pw_indices, chunksz)
dtype = type(metric(useqs[0], useqs[0], **kwargs))
if ncpus > 1:
with multiprocessing.Pool(ncpus) as pool:
try:
dists = parmap.map(compute_many,
chunked_indices,
metric,
useqs,
dtype,
**kwargs,
pm_parallel=True,
pm_pool=pool)
except ValueError as err:
print('pwseqdist.apply_pairwise_sq: error with metric %s and multiprocessing, trying on single core' % metric)
dists = parmap.map(compute_many,
chunked_indices,
metric,
useqs,
dtype,
**kwargs,
pm_parallel=False)
print('pwseqdist.apply_pairwise_sq: metric %s could not be spread to multiple processes, ran on single core' % metric)
else:
dists = parmap.map(compute_many,
chunked_indices,
metric,
useqs,
dtype,
**kwargs,
pm_parallel=False)
"""Create translation dict from vector_i to mat_ij coordinates for the
distance matrix of unique seqs (unneccessary, but may be useful later)"""
# uveci2umati = {veci:(i, j) for veci, (i, j) in enumerate(itertools.combinations(range(len(useqs)), 2))}
"""Get the vector form of the useqs"""
"""n = len(useqs)
uvec = np.zeros(scipy.special.comb(n, 2))
for ichunk, dchunk in zip(chunked_indices, dists):
for (i,j), d in zip(ichunk, dchunk):
uvec[_mati2veci(i, j, n)] = d"""
uvec = np.concatenate(dists) # this may be more memory intensive, but should be fine
if translate:
"""Then translate the vector form of the useqs to the vector
form of the seqs"""
vout = np.zeros(int(scipy.special.comb(len(seqs), 2)))
for veci, (i,j), in enumerate(itertools.combinations(range(len(seqs)), 2)):
ui = useqs.index(seqs[i])
uj = useqs.index(seqs[j])
if ui == uj:
vout[veci] = 0
else:
if uj < ui:
uj, ui = ui, uj
vout[veci] = uvec[_mati2veci(ui, uj, len(useqs))]
else:
vout = uvec
return vout
def apply_pairwise_rect(seqs1, seqs2, metric, ncpus=1, **kwargs):
"""Calculate distance between pairs of sequences in seqs1
with sequences in seqs2 using metric and kwargs provided to
metric. Will use multiprocessing Pool if ncpus > 1.
For efficiency, will only compute metric on unique values in
seqs1/seqs2. All values are returned, including redundancies.
Though written to be used for distance calculations,
it is general enough that it could be used to run
any arbitrary function on pairs of elements in seqs.
Parameters
----------
seqs1, seqs2 : lists
Lists of sequences.
metric : function
A distance function of the form
func(seq1, seq2, **kwargs)
ncpus : int
Size of the worker pool to be used by multiprocessing
**kwargs : keyword arguments
Additional keyword arguments are supplied to the metric.
Returns
-------
dvec : np.ndarray, length len(seqs1) * len(seqs2)
Vector form of the pairwise distance rectangle.
indices : np.ndarray, shape [len(seqs1) * len(seqs2), 2]
Contains i,j indices on each row where i (j) is an index
into seqs1 (seqs2) and can be used to recreate a distance rectangle"""
uniqify = True
def _recti2veci(i, j, n2):
"""Translate from rectangle coordinates to vector coordinates"""
return int(i * len(n2) + j)
useqs1 = list(set(seqs1))
if len(useqs1) == len(seqs1) or ~uniqify:
useqs1 = seqs1
translate1 = False
else:
translate1 = True
useqs1 = list(seqs1)
useqs2 = list(set(seqs2))
if len(useqs2) == len(seqs2) or ~uniqify:
useqs2 = seqs2
translate2 = False
else:
translate2 = True
useqs2 = list(seqs2)
pw_indices = list(itertools.product(range(len(useqs1)), range(len(useqs2))))
chunk_func = lambda l, n: [l[i:i + n] for i in range(0, len(l), n)]
chunksz = len(pw_indices)//ncpus
chunked_indices = chunk_func(pw_indices, chunksz)
dtype = type(metric(useqs1[0], useqs2[0], **kwargs))
if ncpus > 1:
with multiprocessing.Pool(ncpus) as pool:
try:
dists = parmap.map(compute_many_rect,
chunked_indices,
metric,
useqs1,
useqs2,
dtype,
**kwargs,
pm_parallel=True,
pm_pool=pool)
except ValueError as err:
print('pwseqdist.apply_pairwise_rect: error with metric %s and multiprocessing, trying on single core' % metric)
dists = parmap.map(compute_many_rect,
chunked_indices,
metric,
useqs1,
useqs2,
dtype,
**kwargs,
pm_parallel=False)
print('pwseqdist.apply_pairwise_rect: metric %s could not be spread to multiple processes, ran on single core' % metric)
else:
dists = parmap.map(compute_many_rect,
chunked_indices,
metric,
useqs1,
useqs2,
dtype,
**kwargs,
pm_parallel=False)
urect = np.concatenate(dists).reshape((len(useqs1), len(useqs2)))
if translate1:
redup_ind = [useqs1.index(s) for s in seqs1]
urect = urect[redup_ind, :]
if translate2:
redup_ind = [useqs2.index(s) for s in seqs2]
urect = urect[:, redup_ind]
return urect
| 39.539095
| 136
| 0.566195
|
b69ebd64531be1834c7c75eb4917495e0f534592
| 50,059
|
py
|
Python
|
allennlp/tests/semparse/worlds/atis_world_test.py
|
MartinoMensio/allennlp
|
9a6dce3997d17185650a5377c9a7a5af0f1ac241
|
[
"Apache-2.0"
] | null | null | null |
allennlp/tests/semparse/worlds/atis_world_test.py
|
MartinoMensio/allennlp
|
9a6dce3997d17185650a5377c9a7a5af0f1ac241
|
[
"Apache-2.0"
] | 1
|
2018-06-08T21:12:54.000Z
|
2018-06-08T21:12:54.000Z
|
allennlp/tests/semparse/worlds/atis_world_test.py
|
MartinoMensio/allennlp
|
9a6dce3997d17185650a5377c9a7a5af0f1ac241
|
[
"Apache-2.0"
] | 1
|
2018-10-22T18:52:14.000Z
|
2018-10-22T18:52:14.000Z
|
import json
from allennlp.common.testing import AllenNlpTestCase
from allennlp.semparse.worlds.atis_world import AtisWorld
class TestAtisWorld(AllenNlpTestCase):
def setUp(self):
super().setUp()
test_filename = self.FIXTURES_ROOT / "data" / "atis" / "sample.json"
self.data = open(test_filename).readlines()
def test_atis_global_actions(self): # pylint: disable=no-self-use
world = AtisWorld([])
valid_actions = world.valid_actions
assert set(valid_actions.keys()) == {'agg',
'agg_func',
'agg_results',
'biexpr',
'binaryop',
'boolean',
'col_ref',
'col_refs',
'condition',
'conditions',
'conj',
'distinct',
'in_clause',
'number',
'pos_value',
'query',
'select_results',
'statement',
'string',
'table_name',
'table_refs',
'ternaryexpr',
'value',
'where_clause'}
assert set(valid_actions['statement']) == {'statement -> [query, ";"]'}
assert set(valid_actions['query']) == \
{'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'query -> ["SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause]'}
assert set(valid_actions['select_results']) == \
{'select_results -> [agg]', 'select_results -> [col_refs]'}
assert set(valid_actions['agg']) == \
{'agg -> [agg_func, "(", col_ref, ")"]'}
assert set(valid_actions['agg_func']) == \
{'agg_func -> ["COUNT"]',
'agg_func -> ["MAX"]',
'agg_func -> ["MIN"]'}
assert set(valid_actions['col_refs']) == \
{'col_refs -> [col_ref]', 'col_refs -> [col_ref, ",", col_refs]'}
assert set(valid_actions['table_refs']) == \
{'table_refs -> [table_name]', 'table_refs -> [table_name, ",", table_refs]'}
assert set(valid_actions['where_clause']) == \
{'where_clause -> ["WHERE", "(", conditions, ")"]',
'where_clause -> ["WHERE", conditions]'}
assert set(valid_actions['conditions']) == \
{'conditions -> ["(", conditions, ")", conj, conditions]',
'conditions -> ["(", conditions, ")"]',
'conditions -> ["NOT", conditions]',
'conditions -> [condition, conj, "(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> [condition]'}
assert set(valid_actions['condition']) == \
{'condition -> [biexpr]',
'condition -> [in_clause]',
'condition -> [ternaryexpr]'}
assert set(valid_actions['in_clause']) == \
{'in_clause -> [col_ref, "IN", query]'}
assert set(valid_actions['biexpr']) == \
{'biexpr -> [col_ref, "LIKE", string]',
'biexpr -> [col_ref, binaryop, value]',
'biexpr -> [value, binaryop, value]'}
assert set(valid_actions['binaryop']) == \
{'binaryop -> ["*"]',
'binaryop -> ["+"]',
'binaryop -> ["-"]',
'binaryop -> ["/"]',
'binaryop -> ["<"]',
'binaryop -> ["<="]',
'binaryop -> ["="]',
'binaryop -> [">"]',
'binaryop -> [">="]',
'binaryop -> ["IS"]'}
assert set(valid_actions['ternaryexpr']) == \
{'ternaryexpr -> [col_ref, "BETWEEN", value, "AND", value]',
'ternaryexpr -> [col_ref, "NOT", "BETWEEN", value, "AND", value]'}
assert set(valid_actions['value']) == \
{'value -> ["NOT", pos_value]',
'value -> [pos_value]'}
assert set(valid_actions['pos_value']) == \
{'pos_value -> ["ALL", query]',
'pos_value -> ["ANY", query]',
'pos_value -> ["NULL"]',
'pos_value -> [agg_results]',
'pos_value -> [boolean]',
'pos_value -> [col_ref]',
'pos_value -> [number]',
'pos_value -> [string]'}
assert set(valid_actions['agg_results']) == \
{('agg_results -> ["(", "SELECT", distinct, agg, "FROM", table_name, '
'where_clause, ")"]'),
'agg_results -> ["SELECT", distinct, agg, "FROM", table_name, where_clause]'}
assert set(valid_actions['boolean']) == \
{'boolean -> ["true"]', 'boolean -> ["false"]'}
assert set(valid_actions['conj']) == \
{'conj -> ["OR"]', 'conj -> ["AND"]'}
assert set(valid_actions['distinct']) == \
{'distinct -> [""]', 'distinct -> ["DISTINCT"]'}
assert set(valid_actions['number']) == \
{'number -> ["0"]',
'number -> ["1"]'}
assert set(valid_actions['string']) == set()
assert set(valid_actions['col_ref']) == \
{'col_ref -> ["*"]',
'col_ref -> ["aircraft", ".", "aircraft_code"]',
'col_ref -> ["aircraft", ".", "aircraft_description"]',
'col_ref -> ["aircraft", ".", "basic_type"]',
'col_ref -> ["aircraft", ".", "manufacturer"]',
'col_ref -> ["aircraft", ".", "pressurized"]',
'col_ref -> ["aircraft", ".", "propulsion"]',
'col_ref -> ["aircraft", ".", "wide_body"]',
'col_ref -> ["airline", ".", "airline_code"]',
'col_ref -> ["airline", ".", "airline_name"]',
'col_ref -> ["airport", ".", "airport_code"]',
'col_ref -> ["airport", ".", "airport_location"]',
'col_ref -> ["airport", ".", "airport_name"]',
'col_ref -> ["airport", ".", "country_name"]',
'col_ref -> ["airport", ".", "minimum_connect_time"]',
'col_ref -> ["airport", ".", "state_code"]',
'col_ref -> ["airport", ".", "time_zone_code"]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'col_ref -> ["airport_service", ".", "city_code"]',
'col_ref -> ["airport_service", ".", "direction"]',
'col_ref -> ["airport_service", ".", "miles_distant"]',
'col_ref -> ["airport_service", ".", "minutes_distant"]',
'col_ref -> ["city", ".", "city_code"]',
'col_ref -> ["city", ".", "city_name"]',
'col_ref -> ["city", ".", "country_name"]',
'col_ref -> ["city", ".", "state_code"]',
'col_ref -> ["city", ".", "time_zone_code"]',
'col_ref -> ["class_of_service", ".", "booking_class"]',
'col_ref -> ["class_of_service", ".", "class_description"]',
'col_ref -> ["class_of_service", ".", "rank"]',
'col_ref -> ["date_day", ".", "day_name"]',
'col_ref -> ["date_day", ".", "day_number"]',
'col_ref -> ["date_day", ".", "month_number"]',
'col_ref -> ["date_day", ".", "year"]',
'col_ref -> ["days", ".", "day_name"]',
'col_ref -> ["days", ".", "days_code"]',
'col_ref -> ["equipment_sequence", ".", "aircraft_code"]',
'col_ref -> ["equipment_sequence", ".", "aircraft_code_sequence"]',
'col_ref -> ["fare", ".", "fare_airline"]',
'col_ref -> ["fare", ".", "fare_basis_code"]',
'col_ref -> ["fare", ".", "fare_id"]',
'col_ref -> ["fare", ".", "from_airport"]',
'col_ref -> ["fare", ".", "one_direction_cost"]',
'col_ref -> ["fare", ".", "restriction_code"]',
'col_ref -> ["fare", ".", "round_trip_cost"]',
'col_ref -> ["fare", ".", "round_trip_required"]',
'col_ref -> ["fare", ".", "to_airport"]',
'col_ref -> ["fare_basis", ".", "basis_days"]',
'col_ref -> ["fare_basis", ".", "booking_class"]',
'col_ref -> ["fare_basis", ".", "class_type"]',
'col_ref -> ["fare_basis", ".", "discounted"]',
'col_ref -> ["fare_basis", ".", "economy"]',
'col_ref -> ["fare_basis", ".", "fare_basis_code"]',
'col_ref -> ["fare_basis", ".", "night"]',
'col_ref -> ["fare_basis", ".", "premium"]',
'col_ref -> ["fare_basis", ".", "season"]',
'col_ref -> ["flight", ".", "aircraft_code_sequence"]',
'col_ref -> ["flight", ".", "airline_code"]',
'col_ref -> ["flight", ".", "airline_flight"]',
'col_ref -> ["flight", ".", "arrival_time"]',
'col_ref -> ["flight", ".", "connections"]',
'col_ref -> ["flight", ".", "departure_time"]',
'col_ref -> ["flight", ".", "dual_carrier"]',
'col_ref -> ["flight", ".", "flight_days"]',
'col_ref -> ["flight", ".", "flight_id"]',
'col_ref -> ["flight", ".", "flight_number"]',
'col_ref -> ["flight", ".", "from_airport"]',
'col_ref -> ["flight", ".", "meal_code"]',
'col_ref -> ["flight", ".", "stops"]',
'col_ref -> ["flight", ".", "time_elapsed"]',
'col_ref -> ["flight", ".", "to_airport"]',
'col_ref -> ["flight_fare", ".", "fare_id"]',
'col_ref -> ["flight_fare", ".", "flight_id"]',
'col_ref -> ["flight_leg", ".", "flight_id"]',
'col_ref -> ["flight_leg", ".", "leg_flight"]',
'col_ref -> ["flight_leg", ".", "leg_number"]',
'col_ref -> ["flight_stop", ".", "arrival_airline"]',
'col_ref -> ["flight_stop", ".", "arrival_flight_number"]',
'col_ref -> ["flight_stop", ".", "arrival_time"]',
'col_ref -> ["flight_stop", ".", "departure_airline"]',
'col_ref -> ["flight_stop", ".", "departure_flight_number"]',
'col_ref -> ["flight_stop", ".", "departure_time"]',
'col_ref -> ["flight_stop", ".", "flight_id"]',
'col_ref -> ["flight_stop", ".", "stop_airport"]',
'col_ref -> ["flight_stop", ".", "stop_days"]',
'col_ref -> ["flight_stop", ".", "stop_number"]',
'col_ref -> ["flight_stop", ".", "stop_time"]',
'col_ref -> ["food_service", ".", "compartment"]',
'col_ref -> ["food_service", ".", "meal_code"]',
'col_ref -> ["food_service", ".", "meal_description"]',
'col_ref -> ["food_service", ".", "meal_number"]',
'col_ref -> ["ground_service", ".", "airport_code"]',
'col_ref -> ["ground_service", ".", "city_code"]',
'col_ref -> ["ground_service", ".", "ground_fare"]',
'col_ref -> ["ground_service", ".", "transport_type"]',
'col_ref -> ["month", ".", "month_name"]',
'col_ref -> ["month", ".", "month_number"]',
'col_ref -> ["restriction", ".", "advance_purchase"]',
'col_ref -> ["restriction", ".", "application"]',
'col_ref -> ["restriction", ".", "maximum_stay"]',
'col_ref -> ["restriction", ".", "minimum_stay"]',
'col_ref -> ["restriction", ".", "no_discounts"]',
'col_ref -> ["restriction", ".", "restriction_code"]',
'col_ref -> ["restriction", ".", "saturday_stay_required"]',
'col_ref -> ["restriction", ".", "stopovers"]',
'col_ref -> ["state", ".", "country_name"]',
'col_ref -> ["state", ".", "state_code"]',
'col_ref -> ["state", ".", "state_name"]'}
assert set(valid_actions['table_name']) == \
{'table_name -> ["aircraft"]',
'table_name -> ["airline"]',
'table_name -> ["airport"]',
'table_name -> ["airport_service"]',
'table_name -> ["city"]',
'table_name -> ["class_of_service"]',
'table_name -> ["date_day"]',
'table_name -> ["days"]',
'table_name -> ["equipment_sequence"]',
'table_name -> ["fare"]',
'table_name -> ["fare_basis"]',
'table_name -> ["flight"]',
'table_name -> ["flight_fare"]',
'table_name -> ["flight_leg"]',
'table_name -> ["flight_stop"]',
'table_name -> ["food_service"]',
'table_name -> ["ground_service"]',
'table_name -> ["month"]',
'table_name -> ["restriction"]',
'table_name -> ["state"]'}
def test_atis_local_actions(self): # pylint: disable=no-self-use
# Check if the triggers activate correcty
world = AtisWorld(["show me the flights from denver at 12 o'clock"])
assert set(world.valid_actions['number']) == \
{'number -> ["0"]',
'number -> ["1"]',
'number -> ["1200"]',
'number -> ["2400"]'}
assert set(world.valid_actions['string']) == \
{'string -> ["\'DENVER\'"]',
'string -> ["\'DDEN\'"]',
'string -> ["\'AT\'"]'}
world = AtisWorld(["show me the flights from denver at 12 o'clock",
"show me the delta or united flights in afternoon"])
assert set(world.valid_actions['number']) == \
{'number -> ["0"]',
'number -> ["1"]',
'number -> ["1800"]',
'number -> ["1200"]',
'number -> ["2400"]'}
assert set(world.valid_actions['string']) == \
{'string -> ["\'DENVER\'"]',
'string -> ["\'DDEN\'"]',
'string -> ["\'AT\'"]',
'string -> ["\'DL\'"]',
'string -> ["\'UA\'"]',
'string -> ["\'IN\'"]'}
world = AtisWorld(["i would like one coach reservation for \
may ninth from pittsburgh to atlanta leaving \
pittsburgh before 10 o'clock in morning 1991 \
august twenty sixth"])
assert set(world.valid_actions['number']) == \
{'number -> ["0"]',
'number -> ["1"]',
'number -> ["9"]',
'number -> ["8"]',
'number -> ["6"]',
'number -> ["5"]',
'number -> ["26"]',
'number -> ["2200"]',
'number -> ["1991"]',
'number -> ["1200"]',
'number -> ["1000"]'}
assert set(world.valid_actions['string']) == \
{'string -> ["\'COACH\'"]',
'string -> ["\'PITTSBURGH\'"]',
'string -> ["\'PIT\'"]',
'string -> ["\'PPIT\'"]',
'string -> ["\'ATLANTA\'"]',
'string -> ["\'ATL\'"]',
'string -> ["\'MATL\'"]',
'string -> ["\'IN\'"]',
'string -> ["\'MONDAY\'"]'}
def test_atis_simple_action_sequence(self): # pylint: disable=no-self-use
world = AtisWorld([("give me all flights from boston to "
"philadelphia next week arriving after lunch")])
action_sequence = world.get_action_sequence(("(SELECT DISTINCT city . city_code , city . city_name "
"FROM city WHERE ( city.city_name = 'BOSTON' ) );"))
assert action_sequence == ['statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'BOSTON\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref, ",", col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_name"]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> ["DISTINCT"]']
action_sequence = world.get_action_sequence(("( SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code FROM city "
"WHERE city.city_name = 'BOSTON' ) ) ;"))
assert action_sequence == ['statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'BOSTON\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]']
action_sequence = world.get_action_sequence(("( SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code IN "
"( SELECT city . city_code FROM city "
"WHERE city.city_name = 'BOSTON' ) AND 1 = 1) ;"))
assert action_sequence == \
['statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition, conj, conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [value, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [number]',
'number -> ["1"]',
'binaryop -> ["="]',
'value -> [pos_value]',
'pos_value -> [number]',
'number -> ["1"]',
'conj -> ["AND"]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'BOSTON\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]']
world = AtisWorld([("give me all flights from boston to "
"philadelphia next week arriving after lunch")])
action_sequence = world.get_action_sequence(("( SELECT DISTINCT flight.flight_id "
"FROM flight WHERE "
"( flight . from_airport IN "
"( SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code IN "
"( SELECT city . city_code "
"FROM city "
"WHERE city.city_name = 'BOSTON' )))) ;"))
assert action_sequence == \
['statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'BOSTON\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]',
'col_ref -> ["flight", ".", "from_airport"]',
'table_refs -> [table_name]',
'table_name -> ["flight"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["flight", ".", "flight_id"]',
'distinct -> ["DISTINCT"]']
def test_atis_long_action_sequence(self): # pylint: disable=no-self-use
world = AtisWorld([("what is the earliest flight in morning "
"1993 june fourth from boston to pittsburgh")])
action_sequence = world.get_action_sequence("( SELECT DISTINCT flight.flight_id "
"FROM flight "
"WHERE ( flight.departure_time = ( "
"SELECT MIN ( flight.departure_time ) "
"FROM flight "
"WHERE ( flight.departure_time BETWEEN 0 AND 1200 AND "
"( flight . from_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code "
"IN ( "
"SELECT city . city_code "
"FROM city WHERE city.city_name = 'BOSTON' )) "
"AND flight . to_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code "
"FROM city "
"WHERE city.city_name = 'PITTSBURGH' )) ) ) ) AND "
"( flight.departure_time BETWEEN 0 AND 1200 AND "
"( flight . from_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code "
"FROM city WHERE city.city_name = 'BOSTON' )) "
"AND flight . to_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code IN ( "
"SELECT city . city_code "
"FROM city "
"WHERE city.city_name = 'PITTSBURGH' )) ) ) ) ) ;")
assert action_sequence == \
['statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> ["(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> ["(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'PITTSBURGH\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]',
'col_ref -> ["flight", ".", "to_airport"]',
'conj -> ["AND"]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'BOSTON\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]',
'col_ref -> ["flight", ".", "from_airport"]',
'conj -> ["AND"]',
'condition -> [ternaryexpr]',
'ternaryexpr -> [col_ref, "BETWEEN", value, "AND", value]',
'value -> [pos_value]',
'pos_value -> [number]',
'number -> ["1200"]',
'value -> [pos_value]',
'pos_value -> [number]',
'number -> ["0"]',
'col_ref -> ["flight", ".", "departure_time"]',
'conj -> ["AND"]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [agg_results]',
'agg_results -> ["(", "SELECT", distinct, agg, "FROM", table_name, '
'where_clause, ")"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> ["(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'PITTSBURGH\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]',
'col_ref -> ["flight", ".", "to_airport"]',
'conj -> ["AND"]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [in_clause]',
'in_clause -> [col_ref, "IN", query]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'where_clause -> ["WHERE", conditions]',
'conditions -> [condition]',
'condition -> [biexpr]',
'biexpr -> [col_ref, binaryop, value]',
'value -> [pos_value]',
'pos_value -> [string]',
'string -> ["\'BOSTON\'"]',
'binaryop -> ["="]',
'col_ref -> ["city", ".", "city_name"]',
'table_refs -> [table_name]',
'table_name -> ["city"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["city", ".", "city_code"]',
'distinct -> [""]',
'col_ref -> ["airport_service", ".", "city_code"]',
'table_refs -> [table_name]',
'table_name -> ["airport_service"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'distinct -> [""]',
'col_ref -> ["flight", ".", "from_airport"]',
'conj -> ["AND"]',
'condition -> [ternaryexpr]',
'ternaryexpr -> [col_ref, "BETWEEN", value, "AND", value]',
'value -> [pos_value]',
'pos_value -> [number]',
'number -> ["1200"]',
'value -> [pos_value]',
'pos_value -> [number]',
'number -> ["0"]',
'col_ref -> ["flight", ".", "departure_time"]',
'table_name -> ["flight"]',
'agg -> [agg_func, "(", col_ref, ")"]',
'col_ref -> ["flight", ".", "departure_time"]',
'agg_func -> ["MIN"]',
'distinct -> [""]',
'binaryop -> ["="]',
'col_ref -> ["flight", ".", "departure_time"]',
'table_refs -> [table_name]',
'table_name -> ["flight"]',
'select_results -> [col_refs]',
'col_refs -> [col_ref]',
'col_ref -> ["flight", ".", "flight_id"]',
'distinct -> ["DISTINCT"]']
def test_atis_from_json(self):
line = json.loads(self.data[0])
for utterance_idx in range(len(line['interaction'])):
world = AtisWorld([interaction['utterance'] for
interaction in line['interaction'][:utterance_idx+1]])
action_sequence = world.get_action_sequence(line['interaction'][utterance_idx]['sql'])
assert action_sequence is not None
def test_all_possible_actions(self): # pylint: disable=no-self-use
world = AtisWorld([("give me all flights from boston to "
"philadelphia next week arriving after lunch")])
possible_actions = world.all_possible_actions()
assert possible_actions == \
['agg -> [agg_func, "(", col_ref, ")"]',
'agg_func -> ["COUNT"]',
'agg_func -> ["MAX"]',
'agg_func -> ["MIN"]',
'agg_results -> ["(", "SELECT", distinct, agg, "FROM", table_name, '
'where_clause, ")"]',
'agg_results -> ["SELECT", distinct, agg, "FROM", table_name, where_clause]',
'biexpr -> [col_ref, "LIKE", string]',
'biexpr -> [col_ref, binaryop, value]',
'biexpr -> [value, binaryop, value]',
'binaryop -> ["*"]',
'binaryop -> ["+"]',
'binaryop -> ["-"]',
'binaryop -> ["/"]',
'binaryop -> ["<"]',
'binaryop -> ["<="]',
'binaryop -> ["="]',
'binaryop -> [">"]',
'binaryop -> [">="]',
'binaryop -> ["IS"]',
'boolean -> ["false"]',
'boolean -> ["true"]',
'col_ref -> ["*"]',
'col_ref -> ["aircraft", ".", "aircraft_code"]',
'col_ref -> ["aircraft", ".", "aircraft_description"]',
'col_ref -> ["aircraft", ".", "basic_type"]',
'col_ref -> ["aircraft", ".", "manufacturer"]',
'col_ref -> ["aircraft", ".", "pressurized"]',
'col_ref -> ["aircraft", ".", "propulsion"]',
'col_ref -> ["aircraft", ".", "wide_body"]',
'col_ref -> ["airline", ".", "airline_code"]',
'col_ref -> ["airline", ".", "airline_name"]',
'col_ref -> ["airport", ".", "airport_code"]',
'col_ref -> ["airport", ".", "airport_location"]',
'col_ref -> ["airport", ".", "airport_name"]',
'col_ref -> ["airport", ".", "country_name"]',
'col_ref -> ["airport", ".", "minimum_connect_time"]',
'col_ref -> ["airport", ".", "state_code"]',
'col_ref -> ["airport", ".", "time_zone_code"]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'col_ref -> ["airport_service", ".", "city_code"]',
'col_ref -> ["airport_service", ".", "direction"]',
'col_ref -> ["airport_service", ".", "miles_distant"]',
'col_ref -> ["airport_service", ".", "minutes_distant"]',
'col_ref -> ["city", ".", "city_code"]',
'col_ref -> ["city", ".", "city_name"]',
'col_ref -> ["city", ".", "country_name"]',
'col_ref -> ["city", ".", "state_code"]',
'col_ref -> ["city", ".", "time_zone_code"]',
'col_ref -> ["class_of_service", ".", "booking_class"]',
'col_ref -> ["class_of_service", ".", "class_description"]',
'col_ref -> ["class_of_service", ".", "rank"]',
'col_ref -> ["date_day", ".", "day_name"]',
'col_ref -> ["date_day", ".", "day_number"]',
'col_ref -> ["date_day", ".", "month_number"]',
'col_ref -> ["date_day", ".", "year"]',
'col_ref -> ["days", ".", "day_name"]',
'col_ref -> ["days", ".", "days_code"]',
'col_ref -> ["equipment_sequence", ".", "aircraft_code"]',
'col_ref -> ["equipment_sequence", ".", "aircraft_code_sequence"]',
'col_ref -> ["fare", ".", "fare_airline"]',
'col_ref -> ["fare", ".", "fare_basis_code"]',
'col_ref -> ["fare", ".", "fare_id"]',
'col_ref -> ["fare", ".", "from_airport"]',
'col_ref -> ["fare", ".", "one_direction_cost"]',
'col_ref -> ["fare", ".", "restriction_code"]',
'col_ref -> ["fare", ".", "round_trip_cost"]',
'col_ref -> ["fare", ".", "round_trip_required"]',
'col_ref -> ["fare", ".", "to_airport"]',
'col_ref -> ["fare_basis", ".", "basis_days"]',
'col_ref -> ["fare_basis", ".", "booking_class"]',
'col_ref -> ["fare_basis", ".", "class_type"]',
'col_ref -> ["fare_basis", ".", "discounted"]',
'col_ref -> ["fare_basis", ".", "economy"]',
'col_ref -> ["fare_basis", ".", "fare_basis_code"]',
'col_ref -> ["fare_basis", ".", "night"]',
'col_ref -> ["fare_basis", ".", "premium"]',
'col_ref -> ["fare_basis", ".", "season"]',
'col_ref -> ["flight", ".", "aircraft_code_sequence"]',
'col_ref -> ["flight", ".", "airline_code"]',
'col_ref -> ["flight", ".", "airline_flight"]',
'col_ref -> ["flight", ".", "arrival_time"]',
'col_ref -> ["flight", ".", "connections"]',
'col_ref -> ["flight", ".", "departure_time"]',
'col_ref -> ["flight", ".", "dual_carrier"]',
'col_ref -> ["flight", ".", "flight_days"]',
'col_ref -> ["flight", ".", "flight_id"]',
'col_ref -> ["flight", ".", "flight_number"]',
'col_ref -> ["flight", ".", "from_airport"]',
'col_ref -> ["flight", ".", "meal_code"]',
'col_ref -> ["flight", ".", "stops"]',
'col_ref -> ["flight", ".", "time_elapsed"]',
'col_ref -> ["flight", ".", "to_airport"]',
'col_ref -> ["flight_fare", ".", "fare_id"]',
'col_ref -> ["flight_fare", ".", "flight_id"]',
'col_ref -> ["flight_leg", ".", "flight_id"]',
'col_ref -> ["flight_leg", ".", "leg_flight"]',
'col_ref -> ["flight_leg", ".", "leg_number"]',
'col_ref -> ["flight_stop", ".", "arrival_airline"]',
'col_ref -> ["flight_stop", ".", "arrival_flight_number"]',
'col_ref -> ["flight_stop", ".", "arrival_time"]',
'col_ref -> ["flight_stop", ".", "departure_airline"]',
'col_ref -> ["flight_stop", ".", "departure_flight_number"]',
'col_ref -> ["flight_stop", ".", "departure_time"]',
'col_ref -> ["flight_stop", ".", "flight_id"]',
'col_ref -> ["flight_stop", ".", "stop_airport"]',
'col_ref -> ["flight_stop", ".", "stop_days"]',
'col_ref -> ["flight_stop", ".", "stop_number"]',
'col_ref -> ["flight_stop", ".", "stop_time"]',
'col_ref -> ["food_service", ".", "compartment"]',
'col_ref -> ["food_service", ".", "meal_code"]',
'col_ref -> ["food_service", ".", "meal_description"]',
'col_ref -> ["food_service", ".", "meal_number"]',
'col_ref -> ["ground_service", ".", "airport_code"]',
'col_ref -> ["ground_service", ".", "city_code"]',
'col_ref -> ["ground_service", ".", "ground_fare"]',
'col_ref -> ["ground_service", ".", "transport_type"]',
'col_ref -> ["month", ".", "month_name"]',
'col_ref -> ["month", ".", "month_number"]',
'col_ref -> ["restriction", ".", "advance_purchase"]',
'col_ref -> ["restriction", ".", "application"]',
'col_ref -> ["restriction", ".", "maximum_stay"]',
'col_ref -> ["restriction", ".", "minimum_stay"]',
'col_ref -> ["restriction", ".", "no_discounts"]',
'col_ref -> ["restriction", ".", "restriction_code"]',
'col_ref -> ["restriction", ".", "saturday_stay_required"]',
'col_ref -> ["restriction", ".", "stopovers"]',
'col_ref -> ["state", ".", "country_name"]',
'col_ref -> ["state", ".", "state_code"]',
'col_ref -> ["state", ".", "state_name"]',
'col_refs -> [col_ref, ",", col_refs]',
'col_refs -> [col_ref]',
'condition -> [biexpr]',
'condition -> [in_clause]',
'condition -> [ternaryexpr]',
'conditions -> ["(", conditions, ")", conj, conditions]',
'conditions -> ["(", conditions, ")"]',
'conditions -> ["NOT", conditions]',
'conditions -> [condition, conj, "(", conditions, ")"]',
'conditions -> [condition, conj, conditions]',
'conditions -> [condition]',
'conj -> ["AND"]',
'conj -> ["OR"]',
'distinct -> [""]',
'distinct -> ["DISTINCT"]',
'in_clause -> [col_ref, "IN", query]',
'number -> ["0"]',
'number -> ["1"]',
'number -> ["1200"]',
'number -> ["1400"]',
'number -> ["1800"]',
'pos_value -> ["ALL", query]',
'pos_value -> ["ANY", query]',
'pos_value -> ["NULL"]',
'pos_value -> [agg_results]',
'pos_value -> [boolean]',
'pos_value -> [col_ref]',
'pos_value -> [number]',
'pos_value -> [string]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'query -> ["SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause]',
'select_results -> [agg]',
'select_results -> [col_refs]',
'statement -> [query, ";"]',
'string -> ["\'BBOS\'"]',
'string -> ["\'BOS\'"]',
'string -> ["\'BOSTON\'"]',
'string -> ["\'LUNCH\'"]',
'string -> ["\'PHILADELPHIA\'"]',
'string -> ["\'PHL\'"]',
'string -> ["\'PPHL\'"]',
'table_name -> ["aircraft"]',
'table_name -> ["airline"]',
'table_name -> ["airport"]',
'table_name -> ["airport_service"]',
'table_name -> ["city"]',
'table_name -> ["class_of_service"]',
'table_name -> ["date_day"]',
'table_name -> ["days"]',
'table_name -> ["equipment_sequence"]',
'table_name -> ["fare"]',
'table_name -> ["fare_basis"]',
'table_name -> ["flight"]',
'table_name -> ["flight_fare"]',
'table_name -> ["flight_leg"]',
'table_name -> ["flight_stop"]',
'table_name -> ["food_service"]',
'table_name -> ["ground_service"]',
'table_name -> ["month"]',
'table_name -> ["restriction"]',
'table_name -> ["state"]',
'table_refs -> [table_name, ",", table_refs]',
'table_refs -> [table_name]',
'ternaryexpr -> [col_ref, "BETWEEN", value, "AND", value]',
'ternaryexpr -> [col_ref, "NOT", "BETWEEN", value, "AND", value]',
'value -> ["NOT", pos_value]',
'value -> [pos_value]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
'where_clause -> ["WHERE", conditions]']
| 54.117838
| 114
| 0.407799
|
fe149f4f4ffad25eb7afb85cedbe2229c3a50d34
| 416
|
py
|
Python
|
silver/tests/integration/test_documents_billing_logs.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 222
|
2017-01-15T10:30:57.000Z
|
2022-03-08T20:34:46.000Z
|
silver/tests/integration/test_documents_billing_logs.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 141
|
2017-01-11T10:56:49.000Z
|
2021-10-12T11:51:00.000Z
|
silver/tests/integration/test_documents_billing_logs.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 76
|
2017-01-10T13:50:27.000Z
|
2022-03-25T21:37:00.000Z
|
import pytest
from silver.fixtures.factories import BillingLogFactory
@pytest.mark.django_db
def test_update_billing_log_when_creating_proforma_related_invoice():
billing_log = BillingLogFactory.create(invoice=None)
proforma = billing_log.proforma
assert billing_log.invoice is None
invoice = proforma.create_invoice()
billing_log.refresh_from_db()
assert billing_log.invoice == invoice
| 26
| 69
| 0.802885
|
07c4fdacf127cff18fa47f956058df268f1e55c6
| 4,400
|
py
|
Python
|
reinforce_book_chapter2.py
|
kitfactory/python_test
|
c1ff2f579d52ad81b327e103ec1f04b83774e9c0
|
[
"Apache-2.0"
] | null | null | null |
reinforce_book_chapter2.py
|
kitfactory/python_test
|
c1ff2f579d52ad81b327e103ec1f04b83774e9c0
|
[
"Apache-2.0"
] | null | null | null |
reinforce_book_chapter2.py
|
kitfactory/python_test
|
c1ff2f579d52ad81b327e103ec1f04b83774e9c0
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
plt.plot([1,1],[0,1], color='red', linewidth=2)
plt.plot([1,2],[2,2], color='red', linewidth=2)
plt.plot([2,2],[2,1], color='red', linewidth=2)
plt.plot([2,3],[1,1], color='red', linewidth=2)
plt.text(0.5, 2.5, 'S0', size=14, ha='center')
plt.text(1.5, 2.5, 'S1', size=14, ha='center')
plt.text(2.5, 2.5, 'S2', size=14, ha='center')
plt.text(0.5, 1.5, 'S3', size=14, ha='center')
plt.text(1.5, 1.5, 'S4', size=14, ha='center')
plt.text(2.5, 2.5, 'S5', size=14, ha='center')
plt.text(0.5, 0.5, 'S6', size=14, ha='center')
plt.text(1.5, 0.5, 'S7', size=14, ha='center')
plt.text(2.5, 0.5, 'S8', size=14, ha='center')
plt.text(0.5, 2.3, 'START', ha='center')
plt.text(2.5, 0.3, 'GOAL', ha='center')
ax.set_xlim(0, 3)
ax.set_ylim(0, 3)
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off',right='off',labelleft='off')
line, = ax.plot([0.5],[2.5], marker="o", color='g', markersize=60)
plt.show()
theta_0 = np.array(
[[np.nan, 1, 1, np.nan], #S0
[np.nan, 1, np.nan, 1], #S1
[np.nan, np.nan, 1, 1], #S2
[1, 1 , 1, np.nan], #S3
[np.nan, np.nan, 1, 1], #S4
[1, np.nan, np.nan, np.nan], #S5
[1, np.nan, np.nan, np.nan], #S6
[1, 1, np.nan, np.nan] #S7
])
def simple_convert_into_pi_from_theta(theta):
[m, n] = theta.shape
pi = np.zeros((m, n))
for i in range(0, m):
pi[i, :] = theta[i, :] / np.nansum(theta[i, :])
pi = np.nan_to_num(pi)
return pi
pi_0 = simple_convert_into_pi_from_theta(theta_0)
print(pi_0)
def get_next_s(pi, s):
direction = ["up","right","down","left"]
next_direction = np.random.choice(direction,p=pi[s,:])
if next_direction =='up':
s_next = s -3
elif next_direction == 'right':
s_next = s + 1
elif next_direction == 'down':
s_next = s + 3
if next_direction == 'left':
s_next = s -1
return s_next
def get_action_and_next_s(pi, s):
direction = ["up", "right", "down", "left"]
# pi[s,:]の確率に従って、directionが選択される
next_direction = np.random.choice(direction, p=pi[s, :])
if next_direction == "up":
action = 0
s_next = s - 3 # 上に移動するときは状態の数字が3小さくなる
elif next_direction == "right":
action = 1
s_next = s + 1 # 右に移動するときは状態の数字が1大きくなる
elif next_direction == "down":
action = 2
s_next = s + 3 # 下に移動するときは状態の数字が3大きくなる
elif next_direction == "left":
action = 3
s_next = s - 1 # 左に移動するときは状態の数字が1小さくなる
return [action, s_next]
def goal_maze(pi):
s = 0
state_history = [0]
while(1):
next_s = get_next_s(pi, s)
state_history.append(next_s)
if next_s == 8:
break
else:
s = next_s
return state_history
s_a_history = goal_maze(pi_0)
def goal_maze_ret_s_a(pi):
s = 0 # スタート地点
s_a_history = [[0, np.nan]] # エージェントの移動を記録するリスト
while (1): # ゴールするまでループ
[action, next_s] = get_action_and_next_s(pi, s)
s_a_history[-1][1] = action
# 現在の状態(つまり一番最後なのでindex=-1)の行動を代入
s_a_history.append([next_s, np.nan])
# 次の状態を代入。行動はまだ分からないのでnanにしておく
if next_s == 8: # ゴール地点なら終了
break
else:
s = next_s
return s_a_history
s_a_history = goal_maze_ret_s_a(pi_0)
print(s_a_history)
print("迷路を解くのにかかったのは",len(s_a_history)-1,"ステップです。")
def update_theta(theta, pi, s_a_histroy):
eta = 0.1
T = len(s_a_histroy)-1
[m,n] = theta.shape
delta_theta = theta.copy() # Δθ
for i in range(0, m):
for j in range(0, n):
if not (np.isnan(theta[i, j])):
SA_i = [SA for SA in s_a_history if SA[0] == i]
# 履歴から状態iのものを取り出すリスト内包表記です
SA_ij = [SA for SA in s_a_history if SA == [i, j]]
# 状態iで行動jをしたものを取り出す
N_i = len(SA_i) # 状態iで行動した総回数
N_ij = len(SA_ij) # 状態iで行動jをとった回数
N_i = len(SA_i)
N_ij = len(SA_ij)
delta_theta[i,j] = (N_ij + pi[i, j]*N_i) / T
new_theta = theta + eta * delta_theta
return new_theta
new_theta = update_theta(theta_0, pi_0, s_a_history)
pi = simple_convert_into_pi_from_theta(new_theta)
print(pi)
| 26.190476
| 114
| 0.562273
|
5965934e3b51cf33b0c1a8f27cc50cee75b0b957
| 3,384
|
py
|
Python
|
nemo_text_processing/text_normalization/taggers/money.py
|
jhfong/NeMo
|
db7a2db5ee4e0e81a5640b1f8ff5e83e993bcb87
|
[
"Apache-2.0"
] | 2
|
2021-09-21T07:36:20.000Z
|
2022-02-05T15:29:04.000Z
|
nemo_text_processing/text_normalization/taggers/money.py
|
joseewei/NeMo
|
c5dbf4508abaa5f54db8971b53e37266137b0399
|
[
"Apache-2.0"
] | null | null | null |
nemo_text_processing/text_normalization/taggers/money.py
|
joseewei/NeMo
|
c5dbf4508abaa5f54db8971b53e37266137b0399
|
[
"Apache-2.0"
] | 12
|
2021-06-20T08:56:10.000Z
|
2022-03-16T19:07:10.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.data_loader_utils import get_abs_path
from nemo_text_processing.text_normalization.graph_utils import (
NEMO_SIGMA,
SINGULAR_TO_PLURAL,
GraphFst,
convert_space,
insert_space,
)
from nemo_text_processing.text_normalization.taggers.date import get_hundreds_graph
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money, suppletive aware, e.g.
$12.05 -> money { currency: "dollars" integer_part: "twelve" fractional_part: "o five" }
$1 -> money { currency: "dollar" integer_part: "one" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
graph_decimal_final = decimal.final_graph_wo_negative
unit_singular = pynini.string_file(get_abs_path("data/currency/currency.tsv"))
unit_plural = convert_space(unit_singular @ SINGULAR_TO_PLURAL)
unit_singular = convert_space(unit_singular)
graph_unit_singular = pynutil.insert("currency: \"") + unit_singular + pynutil.insert("\"")
graph_unit_plural = pynutil.insert("currency: \"") + unit_plural + pynutil.insert("\"")
singular_graph = (
graph_unit_singular + pynutil.insert(" integer_part: \"") + pynini.cross("1", "one") + pynutil.insert("\"")
)
graph_decimal = graph_unit_plural + insert_space + graph_decimal_final
if deterministic:
graph_integer = (
graph_unit_plural
+ pynutil.insert(" integer_part: \"")
+ ((NEMO_SIGMA - "1") @ cardinal_graph)
+ pynutil.insert("\"")
)
else:
graph_integer = (
graph_unit_plural
+ pynutil.insert(" integer_part: \"")
+ ((NEMO_SIGMA - "1") @ (get_hundreds_graph(deterministic) | cardinal_graph))
+ pynutil.insert("\"")
)
graph_decimal |= singular_graph + insert_space + graph_decimal_final
graph_integer |= singular_graph
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| 38.896552
| 119
| 0.675236
|
067d1182cda35db0e9e373f1225ea48ddfe26fed
| 5,685
|
py
|
Python
|
builder/main.py
|
cab202/quty
|
abadf5e9656b6c7113d6958bd3f4fc1314ee4dfd
|
[
"Apache-2.0"
] | null | null | null |
builder/main.py
|
cab202/quty
|
abadf5e9656b6c7113d6958bd3f4fc1314ee4dfd
|
[
"Apache-2.0"
] | null | null | null |
builder/main.py
|
cab202/quty
|
abadf5e9656b6c7113d6958bd3f4fc1314ee4dfd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modifications Copyright (C) 2022 Queensland University of Technology (QUT)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
from platformio.util import get_serial_ports
def BeforeUpload(target, source, env): # pylint: disable=W0613,W0621
upload_options = {}
if "BOARD" in env:
upload_options = env.BoardConfig().get("upload", {})
if env.subst("$UPLOAD_SPEED"):
env.Append(UPLOADERFLAGS=["-c", "$UPLOAD_SPEED"])
env.Append(ERASEFLAGS=["-c", "$UPLOAD_SPEED"])
# extra upload flags
if "extra_flags" in upload_options:
env.Append(UPLOADERFLAGS=upload_options.get("extra_flags"))
env.Append(ERASEFLAGS=upload_options.get("extra_flags"))
env.AutodetectUploadPort()
env.Append(UPLOADERFLAGS=["-u", '"$UPLOAD_PORT"'])
env.Append(ERASEFLAGS=["-u", '"$UPLOAD_PORT"'])
env.TouchSerialPort("$UPLOAD_PORT", 1200)
env = DefaultEnvironment()
env.Replace(
AR="avr-gcc-ar",
AS="avr-as",
CC="avr-gcc",
GDB="avr-gdb",
CXX="avr-g++",
OBJCOPY="avr-objcopy",
RANLIB="avr-gcc-ranlib",
SIZETOOL="avr-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.bootloader)\s+([0-9]+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+([0-9]+).* ",
SIZEEEPROMREGEXP=r"^(?:\.eeprom)\s+([0-9]+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL --mcu=$BOARD_MCU -C -d $SOURCES',
UPLOADER="pymcuprog",
UPLOADERFLAGS=[
"-d", "$BOARD_MCU",
"-t", "uart",
"-f", "$SOURCES"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS write",
ERASEFLAGS=[
"-d", "$BOARD_MCU",
"-t", "uart"
],
ERASECMD="$UPLOADER $ERASEFLAGS erase",
PROGSUFFIX=".elf"
)
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToEep=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-j",
".eeprom",
'--set-section-flags=.eeprom="alloc,load"',
"--no-change-warnings",
"--change-section-lma",
".eeprom=0",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".eep"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.SConscript("frameworks/_bare.py", exports="env")
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildProgram()
target_firm = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
env.Depends(target_firm, "checkprogsize")
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.AddPlatformTarget(
"size",
target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"),
"Program Size",
"Calculate program size",
)
#
# Target: Upload by default .hex file
#
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
if upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$ERASECMD", "Erasing flash..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
upload_options = env.BoardConfig().get("upload", {})
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["-vinfo"])
env.Prepend(ERASEFLAGS=["-vinfo"])
env.AddPlatformTarget("upload", target_firm, upload_actions, "Upload")
#
# Setup default targets
#
Default([target_buildprog, target_size])
| 28.567839
| 76
| 0.605277
|
8f7875a856e1a6d80aff074d0edd74d189337da2
| 8,311
|
py
|
Python
|
examples/microjson/mutants/CRP_Str_mutant_1486201410.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 90
|
2015-04-07T10:26:53.000Z
|
2022-03-07T15:14:57.000Z
|
examples/microjson/mutants/CRP_Str_mutant_1486201410.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 14
|
2015-10-13T16:25:59.000Z
|
2021-01-21T18:31:03.000Z
|
examples/microjson/mutants/CRP_Str_mutant_1486201410.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 32
|
2015-04-07T10:41:29.000Z
|
2022-02-26T05:17:28.000Z
|
import math
import StringIO
import types
__pychecker__ = 'no-returnvalues'
WS = set([' ', '\t', '\r', '\n', '\x08', '\x0c'])
DIGITS = set([str(i) for i in range(0, 10)])
NUMSTART = DIGITS.union(['.', '-', '+'])
NUMCHARS = NUMSTART.union(['e', 'E'])
ESC_MAP = {'n': '\n', 't': '\t', 'r': '\r', '': '\x08', 'f': '\x0c'}
REV_ESC_MAP = dict([(_v, _k) for (_k, _v) in ESC_MAP.items()] + [('"', '"')])
E_BYTES = 'input string must be type str containing ASCII or UTF-8 bytes'
E_MALF = 'malformed JSON data'
E_TRUNC = 'truncated JSON data'
E_BOOL = 'expected boolean'
E_NULL = 'expected null'
E_LITEM = 'expected list item'
E_DKEY = 'expected key'
E_COLON = 'missing colon after key'
E_EMPTY = 'found empty string, not valid JSON data'
E_BADESC = 'bad escape character found'
E_UNSUPP = 'unsupported type "%s" cannot be JSON-encoded'
E_BADFLOAT = 'cannot emit floating point value "%s"'
NEG_INF = float('-inf')
POS_INF = float('inf')
class JSONError(Exception):
def __init__(self, msg, stm=None, pos=0):
if stm:
msg += ' at position %d, "%s"' % (pos, repr(stm.substr(pos, 32)))
Exception.__init__(self, msg)
class JSONStream(object):
def __init__(self, data):
self._stm = StringIO.StringIO(data)
@property
def pos(self):
return self._stm.pos
@property
def len(self):
return self._stm.len
def getvalue(self):
return self._stm.getvalue()
def skipspaces(self):
'post-cond: read pointer will be over first non-WS char'
self._skip(lambda c: (c not in WS))
def _skip(self, stopcond):
while True:
c = self.peek()
if (stopcond(c) or (c == '')):
break
self.next()
def next(self, size=1):
return self._stm.read(size)
def next_ord(self):
return ord(self.next())
def peek(self):
if (self.pos == self.len):
return ''
return self.getvalue()[self.pos]
def substr(self, pos, length):
return self.getvalue()[pos:pos + length]
def _decode_utf8(c0, stm):
c0 = ord(c0)
r = 65533
nc = stm.next_ord
if (c0 & 224 == 192):
r = c0 & 31 << 6 + nc() & 63
elif (c0 & 240 == 224):
r = c0 & 15 << 12 + nc() & 63 << 6 + nc() & 63
elif (c0 & 248 == 240):
r = c0 & 7 << 18 + nc() & 63 << 12 + nc() & 63 << 6 + nc() & 63
return unichr(r)
def decode_escape(c, stm):
v = ESC_MAP.get(c, None)
if (v is not None):
return v
elif (c != 'u'):
return c
sv = 12
r = 0
for _ in range(0, 4):
r |= int(stm.next(), 16) << sv
sv -= 4
return unichr(r)
def _from_json_string(stm):
stm.next()
r = []
while True:
c = stm.next()
if (c == ''):
raiseJSONError(E_TRUNC, stm, stm.pos - 1)
elif (c == '\\'):
c = stm.next()
r.append(decode_escape(c, stm))
elif (c == '"'):
return ''.join(r)
elif (c > '\x7f'):
r.append(_decode_utf8(c, stm))
else:
r.append(c)
def _from_json_fixed(stm, expected, value, errmsg):
off = len(expected)
pos = stm.pos
if (stm.substr(pos, off) == expected):
stm.next(off)
return value
raiseJSONError(errmsg, stm, pos)
def _from_json_number(stm):
is_float = 0
saw_exp = 0
pos = stm.pos
while True:
c = stm.peek()
if (c not in NUMCHARS):
break
elif ((c == '-') and (not saw_exp)):
pass
elif (c in ('.', 'e', 'E')):
is_float = 1
if (c in ('e', 'E')):
saw_exp = 1
stm.next()
s = stm.substr(pos, stm.pos - pos)
if is_float:
return float(s)
return long(s)
def _from_json_list(stm):
stm.next()
result = []
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
elif (c == ']'):
stm.next()
return result
elif (c == ','):
stm.next()
result.append(_from_json_raw(stm))
continue
elif (not result):
result.append(_from_json_raw(stm))
continue
else:
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_dict(stm):
stm.next()
result = {}
expect_key = 0
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
if (c in ('}', ',')):
stm.next()
if expect_key:
raiseJSONError(E_DKEY, stm, stm.pos)
if (c == '}'):
return result
expect_key = 1
continue
elif (c == '"'):
key = _from_json_string(stm)
stm.skipspaces()
c = stm.next()
if (c != ':'):
raiseJSONError(E_COLON, stm, stm.pos)
stm.skipspaces()
val = _from_json_raw(stm)
result[key] = val
expect_key = 0
continue
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_raw(stm):
while True:
stm.skipspaces()
c = stm.peek()
if (c == '"'):
return _from_json_string(stm)
elif (c == '{'):
return _from_json_dict(stm)
elif (c == '['):
return _from_json_list(stm)
elif (c == 't'):
return _from_json_fixed(stm, 'true', True, E_BOOL)
elif (c == 'f'):
return _from_json_fixed(stm, 'false', False, E_BOOL)
elif (c == 'n'):
return _from_json_fixed(stm, 'null', None, E_NULL)
elif (c in NUMSTART):
return _from_json_number(stm)
raiseJSONError(E_MALF, stm, stm.pos)
def from_json(data):
"\n Converts 'data' which is UTF-8 (or the 7-bit pure ASCII subset) into\n a Python representation. You must pass bytes to this in a str type,\n not unicode.\n "
if (not isinstance(data, str)):
raiseJSONError(E_BYTES)
if (not data):
return None
stm = JSONStream(data)
return _from_json_raw(stm)
def _to_json_list(stm, lst):
seen = 0
stm.write('[')
for elem in lst:
if seen:
stm.write(',')
seen = 1
_to_json_object(stm, elem)
stm.write(']')
def _to_json_string(stm, buf):
stm.write('"')
for c in buf:
nc = REV_ESC_MAP.get(c, None)
if nc:
stm.write('\\' + nc)
elif (ord(c) <= 127):
stm.write(str(c))
else:
stm.write('\\u%04x' % ord(c))
stm.write('"')
def _to_json_dict(stm, dct):
seen = 0
stm.write('{')
for key in dct.keys():
if seen:
stm.write(',')
seen = 1
val = dct[key]
if (not (type(key) in (types.StringType, types.UnicodeType))):
key = str(key)
_to_json_string(stm, key)
stm.write(':')
_to_json_object(stm, val)
stm.write('}')
def _to_json_object(stm, obj):
if isinstance(obj, (types.ListType, types.TupleType)):
_to_json_list(stm, obj)
elif isinstance(obj, types.BooleanType):
if obj:
stm.write('true')
else:
stm.write('false')
elif isinstance(obj, types.FloatType):
if (not (NEG_INF < obj < POS_INF)):
raiseJSONError(E_BADFLOAT % obj)
stm.write('%s' % obj)
elif isinstance(obj, (types.IntType, types.LongType)):
stm.write('%d' % obj)
elif isinstance(obj, types.NoneType):
stm.write('null')
elif isinstance(obj, (types.StringType, types.UnicodeType)):
_to_json_string(stm, obj)
elif (hasattr(obj, 'keys') and hasattr(obj, '__getitem__')):
_to_json_dict(stm, obj)
elif hasattr(obj, '__unicode__'):
_to_json_string(stm, obj.__unicode__())
elif hasattr(obj, '__str__'):
_to_json_string(stm, obj.__str__())
else:
raiseJSONError(E_UNSUPP % type(obj))
def to_json(obj):
"\n Converts 'obj' to an ASCII JSON string representation.\n "
stm = StringIO.StringIO('')
_to_json_object(stm, obj)
return stm.getvalue()
decode = from_json
encode = to_json
| 27.889262
| 178
| 0.526411
|
05ff54c086466bf205273f80d3c4e9edd82e816e
| 38,814
|
py
|
Python
|
seglearn/transform.py
|
chkoar/seglearn
|
5dab9d103b416fca26a1a08d9243762cac3403d5
|
[
"BSD-3-Clause"
] | null | null | null |
seglearn/transform.py
|
chkoar/seglearn
|
5dab9d103b416fca26a1a08d9243762cac3403d5
|
[
"BSD-3-Clause"
] | null | null | null |
seglearn/transform.py
|
chkoar/seglearn
|
5dab9d103b416fca26a1a08d9243762cac3403d5
|
[
"BSD-3-Clause"
] | null | null | null |
'''
This module is for transforming time series data.
'''
# Author: David Burns
# License: BSD
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state, check_array
from sklearn.exceptions import NotFittedError
from sklearn.utils.metaestimators import _BaseComposition
from scipy.interpolate import interp1d
from .feature_functions import base_features
from .base import TS_Data
from .util import get_ts_data_parts, check_ts_data
__all__ = ['SegmentX', 'SegmentXY', 'SegmentXYForecast', 'PadTrunc', 'Interp', 'FeatureRep',
'FeatureRepMix']
class XyTransformerMixin(object):
''' Base class for transformer that transforms data and target '''
def fit_transform(self, X, y, sample_weight=None, **fit_params):
'''
Fit the data and transform (required by sklearn API)
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_segments, ]
transformed time series data
y_new : array-like, shape [n_segments]
expanded target vector
sample_weight_new : array-like shape [n_segments]
expanded sample weights
'''
return self.fit(X, y, **fit_params).transform(X, y, sample_weight)
def last(y):
''' Returns the last column from 2d matrix '''
return y[:, (y.shape[1] - 1)]
def middle(y):
''' Returns the middle column from 2d matrix '''
return y[:, y.shape[1] // 2]
def mean(y):
''' returns average along axis 1'''
return np.mean(y, axis=1)
def every(y):
''' Returns all values (sequences) of y '''
return y
def shuffle_data(X, y=None, sample_weight=None):
''' Shuffles indices X, y, and sample_weight together'''
if len(X) > 1:
ind = np.arange(len(X), dtype=np.int)
np.random.shuffle(ind)
Xt = X[ind]
yt = y
swt = sample_weight
if yt is not None:
yt = yt[ind]
if swt is not None:
swt = swt[ind]
return Xt, yt, swt
else:
return X, y, sample_weight
class SegmentX(BaseEstimator, XyTransformerMixin):
'''
Transformer for sliding window segmentation for datasets where
X is time series data, optionally with contextual variables
and each time series in X has a single target value y
The target y is mapped to all segments from their parent series.
The transformed data consists of segment/target pairs that can be learned
through a feature representation or directly with a neural network.
Parameters
----------
width : int > 0
width of segments (number of samples)
overlap : float range [0,1]
amount of overlap between segments. must be in range: 0 <= overlap <= 1
(note: setting overlap to 1.0 results in the segments to being advanced by a single sample)
shuffle : bool, optional
shuffle the segments after transform (recommended for batch optimizations)
random_state : int, default = None
Randomized segment shuffling will return different results for each call to
``transform``. If you have set ``shuffle`` to True and want the same result
with each call to ``fit``, set ``random_state`` to an integer.
Todo
----
separate fit and predict overlap parameters
'''
def __init__(self, width=100, overlap=0.5, shuffle=False, random_state=None):
self.width = width
self.overlap = overlap
self.shuffle = shuffle
self.random_state = random_state
self._validate_params()
self.f_labels = None
self.step = int(self.width * (1. - self.overlap))
self.step = max(1, self.step)
def _validate_params(self):
if not self.width >= 1:
raise ValueError("width must be >=1 (was %d)" % self.width)
if not (self.overlap >= 0.0 and self.overlap <= 1.0):
raise ValueError("overlap must be >=0 and <=1.0 (was %.2f)" % self.overlap)
def fit(self, X, y=None):
'''
Fit the transform
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires
this parameter.
shuffle : bool
Shuffles data after transformation
Returns
-------
self : object
Returns self.
'''
check_ts_data(X, y)
return self
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into segments (temporal tensor)
Note this transformation changes the number of samples in the data
If y and sample_weight are provided, they are transformed to align to the new samples
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : array-like shape [n_segments]
expanded sample weights
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width, self.step) for i in np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width, self.step) for i in np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
if yt is not None:
yt = expand_variables_to_segments(yt, Nt).ravel()
if swt is not None:
swt = expand_variables_to_segments(swt, Nt).ravel()
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if self.shuffle is True:
check_random_state(self.random_state)
return shuffle_data(Xt, yt, swt)
return Xt, yt, swt
class SegmentXY(BaseEstimator, XyTransformerMixin):
'''
Transformer for sliding window segmentation for datasets where
X is time series data, optionally with contextual variables
and y is also time series data with the same sampling interval as X
The target y is mapped to segments from their parent series,
using the parameter ``y_func`` to determine the mapping behavior.
The segment targets can be a single value, or a sequence of values
depending on ``y_func`` parameter.
The transformed data consists of segment/target pairs that can be learned
through a feature representation or directly with a neural network.
Parameters
----------
width : int > 0
width of segments (number of samples)
overlap : float range [0,1]
amount of overlap between segments. must be in range: 0 <= overlap <= 1
(note: setting overlap to 1.0 results in the segments to being advanced by a single sample)
y_func : function
returns target from array of target segments (eg ``last``, ``middle``, or ``mean``)
shuffle : bool, optional
shuffle the segments after transform (recommended for batch optimizations)
random_state : int, default = None
Randomized segment shuffling will return different results for each call to ``transform``.
If you have set ``shuffle`` to True and want the same result with each call to ``fit``,
set ``random_state`` to an integer.
Returns
-------
self : object
Returns self.
'''
def __init__(self, width=100, overlap=0.5, y_func=last, shuffle=False, random_state=None):
self.width = width
self.overlap = overlap
self.y_func = y_func
self.shuffle = shuffle
self.random_state = random_state
self._validate_params()
self.step = int(self.width * (1. - self.overlap))
self.step = max(1, self.step)
def _validate_params(self):
if not self.width >= 1:
raise ValueError("width must be >=1 (was %d)" % self.width)
if not (self.overlap >= 0.0 and self.overlap <= 1.0):
raise ValueError("overlap must be >=0 and <=1.0 (was %.2f)" % self.overlap)
def fit(self, X, y=None):
'''
Fit the transform
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires this
parameter.
Returns
-------
self : object
Returns self.
'''
check_ts_data(X, y)
return self
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width, self.step) for i in np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width, self.step) for i in np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if yt is not None:
yt = np.array([sliding_window(yt[i], self.width, self.step) for i in np.arange(N)])
yt = np.concatenate(yt)
yt = self.y_func(yt)
if self.shuffle is True:
check_random_state(self.random_state)
Xt, yt, _ = shuffle_data(Xt, yt)
return Xt, yt, None
class SegmentXYForecast(BaseEstimator, XyTransformerMixin):
'''
Forecast sliding window segmentation for time series or sequence datasets
The target y is mapped to segments from their parent series,
using the ``forecast`` and ``y_func`` parameters to determine the mapping behavior.
The segment targets can be a single value, or a sequence of values
depending on ``y_func`` parameter.
The transformed data consists of segment/target pairs that can be learned
through a feature representation or directly with a neural network.
Parameters
----------
width : int > 0
width of segments (number of samples)
overlap : float range [0,1]
amount of overlap between segments. must be in range: 0 <= overlap <= 1
(note: setting overlap to 1.0 results in the segments to being advanced by a single sample)
forecast : int
The number of samples ahead in time to forecast
y_func : function
returns target from array of target forecast segments (eg ``last``, or ``mean``)
shuffle : bool, optional
shuffle the segments after transform (recommended for batch optimizations)
random_state : int, default = None
Randomized segment shuffling will return different results for each call to ``transform``.
If you have set ``shuffle`` to True and want the same result with each call to ``fit``, set
``random_state`` to an integer.
Returns
-------
self : object
Returns self.
'''
def __init__(self, width=100, overlap=0.5, forecast=10, y_func=last, shuffle=False,
random_state=None):
self.width = width
self.overlap = overlap
self.forecast = forecast
self.y_func = y_func
self.shuffle = shuffle
self.random_state = random_state
self._validate_params()
self.step = int(self.width * (1. - self.overlap))
self.step = max(1, self.step)
def _validate_params(self):
if not self.width >= 1:
raise ValueError("width must be >=1 (was %d)" % self.width)
if not (self.overlap >= 0.0 and self.overlap <= 1.0):
raise ValueError("overlap must be >=0 and <=1.0 (was %.2f)" % self.overlap)
if not self.forecast >= 1:
raise ValueError("forecase must be >=1 (was %d)" % self.forecast)
def fit(self, X=None, y=None):
'''
Fit the transform
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires this
parameter.
Returns
-------
self : object
Returns self.
'''
check_ts_data(X, y)
return self
def transform(self, X, y, sample_weight=None):
'''
Forecast sliding window segmentation for time series or sequence datasets.
Note this transformation changes the number of samples in the data.
Currently sample weights always returned as None.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series]
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_segments, ]
segmented X data
y_new : array-like, shape [n_segments]
forecast y data
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
# if only one time series is learned
if len(Xt[0]) == 1:
Xt = [Xt]
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width + self.forecast, self.step) for i in
np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width + self.forecast, self.step) for i in
np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
# todo: implement advance X
Xt = Xt[:, 0:self.width]
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if yt is not None:
yt = np.array([sliding_window(yt[i], self.width + self.forecast, self.step) for i in
np.arange(N)])
yt = np.concatenate(yt)
yt = yt[:, self.width:(self.width + self.forecast)] # target y
yt = self.y_func(yt)
if self.shuffle is True:
check_random_state(self.random_state)
Xt, yt, _ = shuffle_data(Xt, yt)
return Xt, yt, None
def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))])
def sliding_window(time_series, width, step):
'''
Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments
'''
w = np.hstack(time_series[i:1 + i - width or None:step] for i in range(0, width))
return w.reshape((int(len(w) / width), width), order='F')
def sliding_tensor(mv_time_series, width, step):
'''
segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data
'''
D = mv_time_series.shape[1]
data = [sliding_window(mv_time_series[:, j], width, step) for j in range(D)]
return np.stack(data, axis=2)
class PadTrunc(BaseEstimator, XyTransformerMixin):
'''
Transformer for using padding and truncation to enforce fixed length on all time
series in the dataset. Series' longer than ``width`` are truncated to length ``width``.
Series' shorter than length ``width`` are padded at the end with zeros up to length ``width``.
The same behavior is applied to the target if it is a series and passed to the transformer.
Parameters
----------
width : int >= 1
width of segments (number of samples)
'''
def __init__(self, width=100):
if not width >= 1:
raise ValueError("width must be >= 1 (was %d)" % width)
self.width = width
def _mv_resize(self, v):
N = len(v)
if v[0].ndim > 1:
D = v[0].shape[1]
w = np.zeros((N, self.width, D))
else:
w = np.zeros((N, self.width))
for i in np.arange(N):
Ni = min(self.width, len(v[i]))
w[i, 0:Ni] = v[i][0:Ni]
return w
def fit(self, X, y=None):
'''
Fit the transform. Does nothing, for compatibility with sklearn API.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires this
parameter.
Returns
-------
self : object
Returns self.
'''
check_ts_data(X, y)
return self
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
Xt = self._mv_resize(Xt)
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
yt = self._mv_resize(yt)
swt = None
elif yt is not None:
# todo: is this needed?
yt = np.array(yt)
return Xt, yt, swt
class Interp(BaseEstimator, XyTransformerMixin):
'''
Transformer for resampling time series data to a fixed period over closed interval
(direct value interpolation).
Default interpolation is linear, but other types can be specified.
If the target is a series, it will be resampled as well.
categorical_target should be set to True if the target series is a class
The transformer will then use nearest neighbor interp on the target.
This transformer assumes the time dimension is column 0, i.e. X[0][:,0]
Note the time dimension is removed, since this becomes a linear sequence.
If start time or similar is important to the estimator, use a context variable.
Parameters
----------
sample_period : numeric
desired sampling period
kind : string
interpolation type - valid types as per scipy.interpolate.interp1d
categorical_target : bool
set to True for classification problems nearest use nearest instead of linear interp
'''
def __init__(self, sample_period, kind='linear', categorical_target=False):
if not sample_period > 0:
raise ValueError("sample_period must be >0 (was %f)" % sample_period)
self.sample_period = sample_period
self.kind = kind
self.categorical_target = categorical_target
def fit(self, X, y=None):
'''
Fit the transform. Does nothing, for compatibility with sklearn API.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires this
parameter.
Returns
-------
self : object
Returns self.
'''
check_ts_data(X, y)
if not X[0].ndim > 1:
raise ValueError("X variable must have more than 1 channel")
return self
def _interp(self, t_new, t, x, kind):
interpolator = interp1d(t, x, kind=kind, copy=False, bounds_error=False,
fill_value="extrapolate", assume_sorted=True)
return interpolator(t_new)
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : array-like or None
None is returned if target is changed. Otherwise it is returned unchanged.
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
N = len(Xt) # number of series
D = Xt[0].shape[1] - 1 # number of data channels
# 1st channel is time
t = [Xt[i][:, 0] for i in np.arange(N)]
t_lin = [np.arange(Xt[i][0, 0], Xt[i][-1, 0], self.sample_period) for i in np.arange(N)]
if D == 1:
Xt = [self._interp(t_lin[i], t[i], Xt[i][:, 1], kind=self.kind) for i in np.arange(N)]
elif D > 1:
Xt = [np.column_stack([self._interp(t_lin[i], t[i], Xt[i][:, j], kind=self.kind)
for j in range(1, D + 1)]) for i in np.arange(N)]
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
swt = None
if self.categorical_target is True:
yt = [self._interp(t_lin[i], t[i], yt[i], kind='nearest') for i in np.arange(N)]
else:
yt = [self._interp(t_lin[i], t[i], yt[i], kind=self.kind) for i in np.arange(N)]
else:
# y is static - leave y alone
pass
return Xt, yt, swt
class FeatureRep(BaseEstimator, TransformerMixin):
'''
A transformer for calculating a feature representation from segmented time series data.
This transformer calculates features from the segmented time series', by computing the same
feature set for each segment from each time series in the data set.
The ``features`` computed are a parameter of this transformer, defined by a dict of functions.
The seglearn package includes some useful features, but this basic feature set can be easily
extended.
Parameters
----------
features : dict, optional
Dictionary of functions for calculating features from a segmented time series.
Each function in the dictionary is specified to compute features from a
multivariate segmented time series along axis 1 (the segment) eg:
>>> def mean(X):
>>> F = np.mean(X, axis = 1)
>>> return(F)
X : array-like shape [n_samples, segment_width, n_variables]
F : array-like [n_samples, n_features]
The number of features returned (n_features) must be >= 1
If features is not specified, a default feature dictionary will be used (see base_features).
See ``feature_functions`` for example implementations.
verbose: boolean, optional (default false)
Controls the verbosity of output messages
Attributes
----------
f_labels : list of string feature labels (in order) corresponding to the computed features
Examples
--------
>>> from seglearn.transform import FeatureRep, SegmentX
>>> from seglearn.pipe import Pype
>>> from seglearn.feature_functions import mean, var, std, skew
>>> from seglearn.datasets import load_watch
>>> from sklearn.ensemble import RandomForestClassifier
>>> data = load_watch()
>>> X = data['X']
>>> y = data['y']
>>> fts = {'mean': mean, 'var': var, 'std': std, 'skew': skew}
>>> clf = Pype([('seg', SegmentX()),
>>> ('ftr', FeatureRep(features = fts)),
>>> ('rf',RandomForestClassifier())])
>>> clf.fit(X, y)
>>> print(clf.score(X, y))
'''
def __init__(self, features='default', verbose = False):
if features == 'default':
self.features = base_features()
else:
if not isinstance(features, dict):
raise TypeError("features must either 'default' or an instance of type dict")
self.features = features
if type(verbose) != bool:
raise TypeError("verbose parameter must be type boolean")
self.verbose = verbose
self.f_labels = None
def fit(self, X, y=None):
'''
Fit the transform
Parameters
----------
X : array-like, shape [n_series, ...]
Segmented time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires this
parameter.
Returns
-------
self : object
Returns self.
'''
check_ts_data(X, y)
self._reset()
if self.verbose:
print("X Shape: ", X.shape)
self.f_labels = self._generate_feature_labels(X)
return self
def transform(self, X):
'''
Transform the segmented time series data into feature data.
If contextual data is included in X, it is returned with the feature data.
Parameters
----------
X : array-like, shape [n_series, ...]
Segmented time series data and (optionally) contextual data
Returns
-------
X_new : array shape [n_series, ...]
Feature representation of segmented time series data and contextual data
'''
self._check_if_fitted()
Xt, Xc = get_ts_data_parts(X)
check_array(Xt, dtype='numeric', ensure_2d=False, allow_nd=True)
fts = np.column_stack([self.features[f](Xt) for f in self.features])
if Xc is not None:
fts = np.column_stack([fts, Xc])
return fts
def _reset(self):
''' Resets internal data-dependent state of the transformer. __init__ parameters not
touched. '''
self.f_labels = None
def _check_if_fitted(self):
if self.f_labels is None:
raise NotFittedError("FeatureRep")
def _check_features(self, features, Xti):
'''
tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function
'''
N = Xti.shape[0]
N_fts = len(features)
fshapes = np.zeros((N_fts, 2), dtype=np.int)
keys = [key for key in features]
for i in np.arange(N_fts):
fshapes[i] = np.row_stack(features[keys[i]](Xti)).shape
# make sure each feature returns an array shape [N, ]
if not np.all(fshapes[:, 0] == N):
raise ValueError("feature function returned array with invalid length, ",
np.array(features.keys())[fshapes[:, 0] != N])
return {keys[i]: fshapes[i, 1] for i in range(N_fts)}
def _generate_feature_labels(self, X):
'''
Generates string feature labels
'''
Xt, Xc = get_ts_data_parts(X)
ftr_sizes = self._check_features(self.features, Xt[0:3])
f_labels = []
# calculated features
for key in ftr_sizes:
for i in range(ftr_sizes[key]):
f_labels += [key + '_' + str(i)]
# contextual features
if Xc is not None:
Ns = len(np.atleast_1d(Xc[0]))
s_labels = ["context_" + str(i) for i in range(Ns)]
f_labels += s_labels
return f_labels
class FeatureRepMix(_BaseComposition, TransformerMixin):
'''
A transformer for calculating a feature representation from segmented time series data.
This transformer calculates features from the segmented time series', by applying the supplied
list of FeatureRep transformers on the specified columns of data. Non-specified columns are
dropped.
The segmented time series data is expected to enter this transform in the form of
num_samples x segment_size x num_features and to leave this transform in the form of
num_samples x num_features. The term columns refers to the last dimension of both
representations.
Note: This code is partially taken (_validate and _transformers functions with docstring) from
the scikit-learn ColumnTransformer made available under the 3-Clause BSD license.
Parameters
----------
transformers : list of (name, transformer, columns) to be applied on the segmented time series
name : string
unique string which is used to prefix the f_labels of the FeatureRep below
transformer : FeatureRep transform
to be applied on the columns specified below
columns : integer, slice or boolean mask
to specify the columns to be transformed
Attributes
----------
f_labels : list of string feature labels (in order) corresponding to the computed features
Examples
--------
>>> from seglearn.transform import FeatureRepMix, FeatureRep, SegmentX
>>> from seglearn.pipe import Pype
>>> from seglearn.feature_functions import mean, var, std, skew
>>> from seglearn.datasets import load_watch
>>> from sklearn.ensemble import RandomForestClassifier
>>> data = load_watch()
>>> X = data['X']
>>> y = data['y']
>>> mask = [False, False, False, True, True, True]
>>> clf = Pype([('seg', SegmentX()),
>>> ('union', FeatureRepMix([
>>> ('ftr_a', FeatureRep(features={'mean': mean}), 0),
>>> ('ftr_b', FeatureRep(features={'var': var}), [0,1,2]),
>>> ('ftr_c', FeatureRep(features={'std': std}), slice(3,7)),
>>> ('ftr_d', FeatureRep(features={'skew': skew}), mask),
>>> ])),
>>> ('rf',RandomForestClassifier())])
>>> clf.fit(X, y)
>>> print(clf.score(X, y))
'''
def __init__(self, transformers):
self.transformers = transformers
self.f_labels = None
@property
def _transformers(self):
'''
Internal list of transformers only containing the name and transformers, dropping the
columns. This is for the implementation of get_params via BaseComposition._get_params which
expects lists of tuples of len 2.
'''
return [(name, trans) for name, trans, _ in self.transformers]
@_transformers.setter
def _transformers(self, value):
self.transformers = [
(name, trans, col) for ((name, trans), (_, _, col))
in zip(value, self.transformers)]
def get_params(self, deep=True):
'''
Get parameters for this transformer.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this transformer and contained transformers.
Returns
-------
params : mapping of string to any parameter names mapped to their values.
'''
return self._get_params('_transformers', deep=deep)
def set_params(self, **kwargs):
'''
Set the parameters of this transformer.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
'''
self._set_params('_transformers', **kwargs)
return self
@staticmethod
def _select(Xt, cols):
'''
Select slices of the last dimension from time series data of the form
num_samples x segment_size x num_features.
'''
return np.atleast_3d(Xt)[:, :, cols]
@staticmethod
def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.')
def fit(self, X, y=None):
'''
Fit the transform
Parameters
----------
X : array-like, shape [n_series, ...]
Segmented time series data and (optionally) contextual data
y : None
There is no need of a target in a transformer, yet the pipeline API requires this
parameter.
Returns
-------
self : object
Returns self.
'''
Xt, Xc = get_ts_data_parts(X)
self.f_labels = []
# calculated features (prefix with the FeatureRep name and correct the index)
for name, trans, cols in self.transformers:
indices = self._retrieve_indices(cols)
trans.fit(self._select(Xt, cols))
for label, index in zip(trans.f_labels, indices):
self.f_labels.append(name + '_' + label.rsplit('_', 1)[0] + '_' + str(index))
# contextual features
if Xc is not None:
Ns = len(np.atleast_1d(Xc[0]))
self.f_labels += ['context_' + str(i) for i in range(Ns)]
return self
def _validate(self):
'''
Internal function to validate the transformer before applying all internal transformers.
'''
if self.f_labels is None:
raise NotFittedError('FeatureRepMix')
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate transformers
for trans in transformers:
if not isinstance(trans, FeatureRep):
raise TypeError("All transformers must be an instance of FeatureRep."
" '%s' (type %s) doesn't." % (trans, type(trans)))
def transform(self, X):
'''
Transform the segmented time series data into feature data.
If contextual data is included in X, it is returned with the feature data.
Parameters
----------
X : array-like, shape [n_series, ...]
Segmented time series data and (optionally) contextual data
Returns
-------
X_new : array shape [n_series, ...]
Feature representation of segmented time series data and contextual data
'''
self._validate()
Xt, Xc = get_ts_data_parts(X)
check_array(Xt, dtype='numeric', ensure_2d=False, allow_nd=True)
# calculated features
fts = np.column_stack([trans.transform(self._select(Xt, cols))
for _, trans, cols in self.transformers])
# contextual features
if Xc is not None:
fts = np.column_stack([fts, Xc])
return fts
| 33.928322
| 100
| 0.592673
|
c54285a4fd064b9e8f4fe28d44258eb99678df85
| 7,770
|
py
|
Python
|
zeex/core/views/sql/add_connection.py
|
zbarge/dbtrix
|
1321978a929586a99bdddf470d970a4aca7e594c
|
[
"MIT"
] | 10
|
2016-12-07T15:54:02.000Z
|
2021-07-24T00:31:39.000Z
|
zeex/core/views/sql/add_connection.py
|
zbarge/dbtrix
|
1321978a929586a99bdddf470d970a4aca7e594c
|
[
"MIT"
] | 86
|
2016-12-12T03:28:19.000Z
|
2017-01-13T18:41:07.000Z
|
zeex/core/views/sql/add_connection.py
|
zbarge/dbtrix
|
1321978a929586a99bdddf470d970a4aca7e594c
|
[
"MIT"
] | 5
|
2016-12-07T23:53:59.000Z
|
2021-01-25T10:14:57.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 13:09:50 2016
MIT License
Copyright (c) 2016 Zeke Barge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import logging
from zeex.core.ctrls.sql import AlchemyConnectionManager, AlchemyConnection
from zeex.core.ui.sql.add_connection_ui import Ui_AlchemyConnectionDialog
from zeex.core.compat import QtGui, QtCore
from zeex.core.utility.widgets import get_ok_msg_box, configure_combo_box
from sqlalchemy.engine.url import URL
DBAPI_MAP = {'sqlite':['pysqlite'], 'mysql':['mysqldb','pymysql'],
'postgresql':['psycopg2']}
class AlchemyConnectionDialog(QtGui.QDialog, Ui_AlchemyConnectionDialog):
"""
A dialog that allows a user to enter database connection parameters.
Successful connections are registered to the AlchemyConnectionManager
and the connection name is emitted whenever this happens.
"""
signalConnectionAdded = QtCore.Signal(str)
def __init__(self, connection_manager: AlchemyConnectionManager, **kwargs):
QtGui.QDialog.__init__(self, **kwargs)
self.con_manager = connection_manager
self.configure()
def configure(self):
db_types = list(sorted(DBAPI_MAP.keys(), reverse=True))
db_apis = DBAPI_MAP[db_types[0]]
self.setupUi(self)
# Actions
self.btnTestConnection.clicked.connect(self.test_connection)
self.buttonBox.clicked.connect(self.register_connection)
self.comboBoxDatabaseType.currentIndexChanged.connect(self.sync_options)
self.lineEditConnectionURL.textChanged.connect(self.sync_options)
self.btnClear.clicked.connect(self.reset_line_edit_text)
# default modes & items
self.lineEditPassword.setEchoMode(QtGui.QLineEdit.Password)
self.comboBoxDatabaseType.addItems(db_types)
self.comboBoxDatabaseAPI.addItems(db_apis)
self.sync_options()
def test_connection(self, show_success=True):
try:
c = self.get_connection()
if show_success is True:
box = get_ok_msg_box(self, "Connected to database!", title="CONNECTION SUCCESS")
box.show()
return c
except Exception as e:
box = get_ok_msg_box(self, str(e), title="CONNECTION ERROR")
box.show()
raise
def get_connection(self) -> AlchemyConnection:
"""
Builds an AlchemyConnection as described by the user
in the line edits/combo boxes.
:return:
"""
con_name = self.lineEditConnectionName.text()
db_type = self.comboBoxDatabaseType.currentText()
uri = self.lineEditConnectionURL.text()
if db_type == 'sqlite' and uri == '':
uri = self.lineEditHost.text()
a = AlchemyConnection(name=con_name)
if uri is '':
port = self.lineEditPort.text()
host = self.lineEditHost.text()
username = self.lineEditUsername.text()
password = self.lineEditPassword.text()
database = self.lineEditDefaultDatabase.text()
db_api = self.comboBoxDatabaseAPI.currentText()
DATABASE = {
'drivername': "{}+{}".format(db_type, db_api),
'host': host or None,
'port': port or None,
'username': username or None,
'password': password or None,
'database': database or None}
try:
__import__(db_api)
except ImportError:
logging.error("Unable to import DBAPI: {} - you need to pip install it.".format(db_api))
DATABASE['drivername'] = db_type
uri = URL(**DATABASE)
else:
if db_type == 'sqlite' and not uri.startswith('sqlite'):
uri = uri.replace("\\", "/").replace("//", "/").replace("/", "\\\\")
uri = URL('sqlite', database=uri)
a.configure_from_url(uri)
return a
def register_connection(self, connection=None):
"""
:param connection: (AlchemyConnection, default None)
A optional pre-compiled AlchemyConnection to register to the connection.
Otherwise one will attempt to generate.
:return: (None)
"""
if not isinstance(connection, AlchemyConnection):
connection = self.test_connection(show_success=False)
self.con_manager.add_connection(connection=connection, allow_duplicate=True)
self.con_manager.save_settings()
self.signalConnectionAdded.emit(connection.name)
def sync_options(self):
"""
Keeps the available options in sync on the dialog.
Makes sure users don't see irrelevant options.
Example:
- database type = sqlite
- hide irrelevant options
- database type = 'postgresql'
- show hidden options (if any)
- database type = 'mysql' & URL provided
- only show URL
:return: None
"""
db_type = self.comboBoxDatabaseType.currentText()
db_api = self.comboBoxDatabaseAPI.currentText()
if db_type == 'sqlite' or (self.lineEditConnectionURL.text() != '' and
db_type != 'sqlite'):
if db_type != 'sqlite':
self.labelHost.hide()
self.lineEditHost.hide()
else:
self.lineEditConnectionURL.hide()
self.labelConnectionURL.hide()
self.lineEditPort.hide()
self.labelPort.hide()
self.lineEditDefaultDatabase.hide()
self.lineEditPassword.hide()
self.labelPassword.hide()
self.lineEditUsername.hide()
self.labelUsername.hide()
self.labelDefaultDatabase.hide()
self.labelDatabaseAPI.hide()
self.comboBoxDatabaseAPI.hide()
else:
self.lineEditConnectionURL.show()
self.labelConnectionURL.show()
self.lineEditPort.show()
self.labelPort.show()
self.labelHost.show()
self.lineEditHost.show()
self.lineEditPassword.show()
self.labelPassword.show()
self.lineEditUsername.show()
self.labelUsername.show()
self.labelDefaultDatabase.show()
self.lineEditDefaultDatabase.show()
self.labelDatabaseAPI.show()
self.comboBoxDatabaseAPI.show()
configure_combo_box(self.comboBoxDatabaseAPI, DBAPI_MAP[db_type], db_api)
def reset_line_edit_text(self):
for line in self.findChildren(QtGui.QLineEdit):
line.setText('')
| 40.051546
| 104
| 0.635521
|
7021645aa75aecef0bdc5c45784dd06e26de8b2f
| 7,564
|
py
|
Python
|
ai.py
|
jaredjxyz/AIV
|
e84954e152d2311db2cd4fde088195d80fd970b1
|
[
"MIT"
] | null | null | null |
ai.py
|
jaredjxyz/AIV
|
e84954e152d2311db2cd4fde088195d80fd970b1
|
[
"MIT"
] | null | null | null |
ai.py
|
jaredjxyz/AIV
|
e84954e152d2311db2cd4fde088195d80fd970b1
|
[
"MIT"
] | null | null | null |
import serial
import signal
import sys
import os
import time
import random
from pyax12.connection import Connection
from weapon import WeaponArm
from datetime import datetime,timedelta
h_fov = 78.0 # TODO: Read this in from config.txt and calculate real horizontal angle
latest_instruction = 'aa0'
last_motorInstruction = 'AA0'
last_heading = 10000
last_power = 10000
#port = serial.Serial("/dev/ttyUSB0", 9600, timeout = 2)
def main():
signal.signal(signal.SIGINT, exit_gracefully)
# Take in 3 arguments: usually front.txt, back.txt, heading.txt
if len(sys.argv) != 4:
print("This requires 3 arguments: the front input file, the back input file, and the output file")
exit(1)
front_camera_filename = sys.argv[1]
back_camera_filename = sys.argv[2]
heading_filename = sys.argv[3]
global weapon_arm
weapon_arm = WeaponArm()
#weapon_arm.goToHomePosition()
weapon_arm.goToRange(up=1)
#spin_to_find_apriltags(front_camera_filename, back_camera_filename)
move_toward_tag(front_camera_filename, back_camera_filename)
def move_toward_tag(front_camera_filename, back_camera_filename):
global last_motorInstruction
global last_heading
global last_power
last = 0
d = datetime.now()
move_time = d
while True:
m = (datetime.now()-d).microseconds
if last != m - m % 100:
last = m - m % 100
displayTTYSend(last_motorInstruction)
if (datetime.now()-move_time).total_seconds()>0:
detections = detect_apriltags(front_camera_filename, back_camera_filename)
# Find an apriltag, move toward it.
if len(detections['front']) == 0 and len(detections['back']) == 0:
if last_motorInstruction not in ["AA0","aa0"]:
last_motorInstruction="AA0"
last_heading = 10000
last_power = 10000
weapon_arm.goToRange(up=1)
displayTTYSend(last_motorInstruction)
continue
# sendWeaponInstruction('1')
if len(detections['front']) > 0:
side = 'front'
active_detection = detections['front'][0]
else:
side = 'back'
active_detection = detections['back'][0]
distance = active_detection[2]
heading = active_detection[0]
power = distance * 10
power = int(min(power, 20))
if side == 'back':
power = -power
up = abs(power)/20
weapon_arm.goToRange(up=up,left=0.95 if side=="front" else 0.0,amplitude=up,t=(datetime.now()-d).total_seconds())
heading_char = degreesToMotorDirections(heading)
left_adjustment, right_adjustment = (motorDirectionsToPower(letter) for letter in heading_char)
if side == 'back':
left_adjustment, right_adjustment = -left_adjustment, -right_adjustment
leftPower = int(min(max(power + left_adjustment, -20), 20))
rightPower = int(min(max(power + right_adjustment, -20), 20))
#print(leftPower, rightPower)
if abs(power) < 10:
move_time=datetime.now()+timedelta(seconds=0.5)
elif abs(power)>=10 and abs(power) <=20:
move_time=datetime.now()+timedelta(seconds=1)
if (datetime.now()-move_time).total_seconds()<0 and abs(heading-last_heading)>1 or abs(power-last_power)>1:
last_heading = heading
last_power = power
last_motorInstruction = powerToMotorDirections(leftPower) + powerToMotorDirections(rightPower)
displayTTYSend(last_motorInstruction+"1")
# stops drive motors
def exit_gracefully(signal, frame):
displayTTYSend('AA0')
exit()
def apriltag_is_in_sight(front_camera_filename, back_camera_filename):
detections = detect_apriltags(front_camera_filename, back_camera_filename)
return len(detections['front']) > 0 or len(detections['back']) > 0
def start_spinning_incrementally(stop_condition=lambda: False):
start_time = time.time()
while not stop_condition():
if ((time.time() - start_time)//.40) % 2 == 1:
heading_string = degreesToMotorDirections(20.0)
sendMotorInstruction(heading_string)
else:
heading_string = degreesToMotorDirections(0.0)
sendMotorInstruction(heading_string)
time.sleep(1/30)
sendMotorInstruction('AA')
return
def spin_to_find_apriltags(front_camera_filename, back_camera_filename):
sees_apriltag = lambda: apriltag_is_in_sight(front_camera_filename, back_camera_filename)
while True:
start_spinning_incrementally(stop_condition=sees_apriltag)
start_following_tags(front_camera_filename, back_camera_filename, stop_condition=lambda: not sees_apriltag())
def start_following_tags(front_camera_filename, back_camera_filename, stop_condition=lambda: False):
while not stop_condition():
detections = detect_apriltags(front_camera_filename, back_camera_filename)
front_detections = detections['front']
back_detections = detections['back']
relevant_detections = front_detections or back_detections
if relevant_detections:
chosen_heading, tag_id, distance = relevant_detections[0]
else:
chosen_heading, tag_id, distance = 0, 0, 0
# Only attack even numbered april tags
if tag_id % 2 == 1:
chosen_heading = 0
heading_string = degreesToMotorDirections(chosen_heading)
print(heading_string)
sendMotorInstruction(heading_string)
def detect_apriltags(front_camera_filename, back_camera_filename):
front_heading = 0
back_heading = 0
front_id = 0
back_id = 0
detections = {'front': [], 'back': []}
with open(front_camera_filename, 'r') as front_file, open(back_camera_filename, 'r') as back_file:
for line in front_file:
detections['front'].append(tuple(float(number) for number in line.split()))
for line in back_file:
detections['back'].append(tuple(float(number) for number in line.split()))
return detections
def degreesToMotorDirections(angle):
"""Turns angle into AA/aa/UU/uu directions"""
# Get speed between 0 and 25
normalized_angle = angle / (h_fov / 2)
if normalized_angle < -1:
normalized_angle = -1
if normalized_angle > 1:
normalized_angle = 1
# Find alphanumeric letter
letter_number = abs(int(normalized_angle * 20))
if angle > 0:
leftLetter = chr(ord('a') + letter_number)
rightLetter = chr(ord('A') + letter_number)
else:
leftLetter = chr(ord('A') + letter_number)
rightLetter = chr(ord('a') + letter_number)
return leftLetter + rightLetter
def motorDirectionsToPower(letter):
if 'a' <= letter <= 'u': return ord(letter) - ord('a')
elif 'A' <= letter <= 'U': return -(ord(letter) - ord('A'))
def powerToMotorDirections(power):
return chr(power + ord('A')) if power > 0 else chr(-power + ord('a'))
def displayTTYSend(str1):
"""Sends a string to the motor controller.
"""
with open("debug.txt","a") as f:
port_mbed = serial.Serial("/dev/ttyUSB0", 9600, timeout = 2)
str2 = ('<' + str1 + '>').encode("ascii")
port_mbed.write(str2)
print(str2,len(str2))
port_mbed.close()
if __name__ == '__main__':
main()
import serial
import sys
| 37.261084
| 125
| 0.652168
|
46c5f8c425ee2f7c123ae766c80c612cf2f16422
| 4,376
|
py
|
Python
|
src/api_tumblr/paging.py
|
nostalgebraist/nostalgebraist-autoresponder
|
622349c4cad2a7aec1017837416c58a678151aae
|
[
"MIT"
] | 39
|
2020-06-19T05:38:11.000Z
|
2022-03-28T04:35:31.000Z
|
src/api_tumblr/paging.py
|
nostalgebraist/nostalgebraist-autoresponder
|
622349c4cad2a7aec1017837416c58a678151aae
|
[
"MIT"
] | null | null | null |
src/api_tumblr/paging.py
|
nostalgebraist/nostalgebraist-autoresponder
|
622349c4cad2a7aec1017837416c58a678151aae
|
[
"MIT"
] | 2
|
2021-04-13T18:12:03.000Z
|
2021-12-16T23:20:12.000Z
|
from typing import Optional
from collections import Counter
from tqdm.autonotebook import tqdm
from api_tumblr.client_pool import ClientPool
from util.error_handling import LogExceptionAndSkip
import config.bot_config_singleton
bot_specific_constants = config.bot_config_singleton.bot_specific_constants
bot_name = bot_specific_constants.blogName
from util.times import fromtimestamp_pst
# TODO: DRY (centralize paging helpers)
def fetch_next_page(client, offset, limit=50, blog_name: str = bot_name, before=None):
kwargs = dict(limit=limit, offset=offset)
if before:
kwargs["before"] = before
response = client.posts(blog_name, **kwargs)
posts = response["posts"]
total_posts = response["total_posts"]
next_offset = None
# TODO: DRY (use this module in tumbl.py)
#
# TODO: use `page_number` or w/e it is tumblr wants me to do now (8/19/21)
# with LogExceptionAndSkip("get next offset for /posts"):
# next_offset = response["_links"]["next"]["query_params"]["offset"]
if next_offset is None:
next_offset = offset + len(posts) # fallback
return posts, next_offset, total_posts
def fetch_posts(pool: ClientPool,
blog_name: str = bot_name,
n: Optional[int] = None,
offset: int = 0,
report_cadence=5000,
needs_private_client=False,
needs_dash_client=False,
stop_at_id=0,
before=None,
screener=None):
posts = []
ids = set()
since_last_report = 0
n_ok = 0
n_full = 0
rejection_reasons = Counter()
tqdm_bar = None
if needs_private_client and needs_dash_client:
raise ValueError("fetch_posts: only one of needs_private_client and needs_dash_client can be true")
client_getter = pool.get_client
if needs_private_client:
client_getter = pool.get_private_client
if needs_dash_client:
client_getter = pool.get_dashboard_client
while True:
client = client_getter()
page, next_offset, total_posts = fetch_next_page(client, offset=offset, blog_name=blog_name, before=before)
if not tqdm_bar:
tqdm_bar = tqdm(total=total_posts)
tqdm_bar.update(offset)
tqdm_bar.set_postfix(cl=pool.client_name(client))
if (len(page) == 0) or (next_offset == offset):
print(f"stopping, empty page after {len(posts)} posts")
return posts
since_last_report += len(page)
if since_last_report >= report_cadence:
pool.report()
since_last_report = 0
nraw = len(page)
page = [pp for pp in page if pp['id'] not in ids]
ndedup = len(page)
page = [pp for pp in page
if pp['id'] > stop_at_id
or pp.get('is_pinned') # pins make id non-monotonic
]
nafter = len(page)
nbefore = ndedup - nafter
page_ids = {pp['id'] for pp in page}
delta_full = len(page)
n_full += delta_full
if screener:
_page = []
reasons = []
for pp in page:
ok, reason, _ = screener(pp)
if ok:
_page.append(pp)
else:
reasons.append(reason)
rejection_reasons.update(reasons)
page = _page
n_ok += len(page)
ids.update(page_ids)
posts.extend(page)
offset = next_offset
if len(page) == 0:
min_ts = None
else:
min_ts = fromtimestamp_pst(min(pp['timestamp'] for pp in page)).isoformat()
tqdm_bar.update(delta_full)
tqdm_bar.set_postfix(cl=pool.client_name(client), min_ts=min_ts, n_ok=n_ok, n_full=n_full)
max_n = total_posts
if n:
max_n = min(n, max_n)
if n_full >= max_n:
print(f"stopping with {n_full} posts, {n_ok} OK: reached maximum {max_n}")
print(f"rejection_reasons: {rejection_reasons.most_common()}")
return posts
if nbefore > 0:
print(f"stopping with {n_full} posts, {n_ok} OK: {nbefore}/{ndedup} in current page are before id {stop_at_id}")
print(f"rejection_reasons: {rejection_reasons.most_common()}")
return posts
| 31.941606
| 124
| 0.606718
|
c14cea3166b9cc554ff3b0aa54875bb94e5f4c61
| 4,263
|
py
|
Python
|
GRURNN/updateRNN.py
|
tsudalab/bopp
|
a3e9259e05fea413854b09f9e8cd7215587bd430
|
[
"MIT"
] | 3
|
2020-07-14T15:51:56.000Z
|
2021-08-05T11:40:28.000Z
|
GRURNN/updateRNN.py
|
tsudalab/bopp
|
a3e9259e05fea413854b09f9e8cd7215587bd430
|
[
"MIT"
] | null | null | null |
GRURNN/updateRNN.py
|
tsudalab/bopp
|
a3e9259e05fea413854b09f9e8cd7215587bd430
|
[
"MIT"
] | 2
|
2021-03-28T06:55:35.000Z
|
2021-04-24T12:16:25.000Z
|
from __future__ import print_function
import numpy as np
import os
import sys
import multiprocessing as mp
from keras.models import Sequential
from keras.layers import Dense, Activation,TimeDistributed,MaxPooling1D
from keras.layers import LSTM,GRU
from keras.layers.embeddings import Embedding
from keras.optimizers import RMSprop, Adam
from keras.utils.data_utils import get_file
from keras.layers import Dropout
import numpy as np
import random
import sys
from keras.utils.np_utils import to_categorical
from keras.preprocessing import sequence
from keras.models import model_from_json
from random import sample
from parameter import *
#predefine amino acid list. B and space is for token and padding.
aalist=["B","A","R","N","D","C","Q","E","G","H","I","L","K","M","F","P","S","T","W","Y","V","X"," "]
def updatemodel(seqinp,maxlnpep):
#remove the tailing nextline character and adding token and padding seq
#print(aalist)
#tmp=seqinp.strip()
tmp=seqinp.strip()+"X"
while len(tmp)<=maxlnpep:
tmp=tmp+" "
#create the coding array for peptide sequence
coding=[]
seqid=[]
for x in range(0,maxlnpep+1):
#print(tmp[x])
tmpctgr=to_categorical(aalist.index(tmp[x]), num_classes=len(aalist),dtype="int32")
#print(tmpctgr)
coding.append(tmpctgr)
seqid.append(aalist.index(tmp[x]))
print("length of coding is "+str(len(coding)))
#print(seqid)
return seqid,coding
def loaddata(csvpath,csvpathneg,maxlnpep):
#load and read file
f=open(csvpath,'r')
ln=f.readlines()[1:]
lenln=len(ln)
clnpep=[]
clncoding=[]
f.close()
fn=open(csvpathneg,'r')
lnn=fn.readlines()[1:]
lenlnn=len(lnn)
fn.close()
#maxlnpep define the maximum length of the peptide
#clnpep is the format sequence array
#clean the line and add begin token and space padding to the data array
datacutoff=0
f=open("../AMP-data/RNN-dropoutdata-3Mar2019-GRU256-64.csv","w")
#seqlist=sample(range(0,lenln),lenln-1000)
seqlist=sample(range(0,lenln),lenln)
seqlistneg=sample(range(0,lenlnn),lenlnn)
for i in range(0,lenln):
print("process sequence "+str(i)+" over "+str(lenln))
if (len(ln[i])<=maxlnpep)&(i in seqlist):
frmseq,frmcod=seqfrmat(ln[i],maxlnpep)
#frmcod=to_categorical(1, num_classes=2,dtype="int32")
frmcod=[[1]]
clnpep.append(frmseq)
clncoding.append(frmcod)
else:
f.write(ln[i].strip()+"X"+"\n")
for i in range(0,lenlnn):
print("process negative sequence "+str(i)+" over "+str(lenlnn))
if (len(ln[i])<=maxlnpep)&(i in seqlistneg):
frmseq,frmcod=seqfrmat(lnn[i],maxlnpep)
#frmcod=to_categorical(0, num_classes=2,dtype="int32")
frmcod=[[0]]
clnpep.append(frmseq)
clncoding.append(frmcod)
else:
f.write(lnn[i].strip()+"X"+"\n")
f.close()
return clnpep,clncoding
def save_model(model):
# serialize model to JSON
model_json = model.to_json()
with open("AMPcls-GRU256-64.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("AMPcls-GRU256-64.h5")
print("Saved model to disk")
return
if __name__ == "__main__":
maxlnpep=55
nproc=4
X_data,Y_data=loaddata("../AMP-data/AMP-data-clean.csv","../AMP-data/nAMP_natrl.csv",maxlnpep)
X=np.array((X_data))
Y= np.array((Y_data))
#initialize NN model
model = Sequential()
aalstln=len(aalist)
print(aalstln)
dataln=X.shape[1]
print(dataln)
print(X.shape)
print(Y.shape)
model.add(Embedding(input_dim=aalstln, output_dim=len(aalist), input_length=dataln,mask_zero=False))
model.add(GRU(output_dim=256, activation='tanh',return_sequences=True))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.add(MaxPooling1D(pool_size=52))
optimizer=Adam(lr=0.00001) # try much smaller one 0.001 0.00001
print(model.summary())
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.fit(X,Y,epochs=3000, batch_size=512,validation_split=0.1)
save_model(model)
| 34.104
| 104
| 0.66221
|
abe859f3db745d6c3be051b4482b08b1761b28a6
| 7,877
|
py
|
Python
|
mcu/luatool.py
|
creationix/nodemcu-webide
|
acca18338074d257e7f9deec5016989f60f30d81
|
[
"MIT"
] | 24
|
2015-06-06T00:09:34.000Z
|
2021-04-06T01:58:51.000Z
|
mcu/luatool.py
|
creationix/nodemcu-webide
|
acca18338074d257e7f9deec5016989f60f30d81
|
[
"MIT"
] | 2
|
2016-03-07T13:09:22.000Z
|
2017-02-06T01:58:05.000Z
|
mcu/luatool.py
|
creationix/nodemcu-webide
|
acca18338074d257e7f9deec5016989f60f30d81
|
[
"MIT"
] | 14
|
2015-06-15T13:14:43.000Z
|
2021-02-06T15:23:08.000Z
|
#!/usr/bin/env python2
#
# ESP8266 luatool
# Author e-mail: 4ref0nt@gmail.com
# Site: http://esp8266.ru
# Contributions from: https://github.com/sej7278
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import serial
from time import sleep
import argparse
from os.path import basename
version = "0.6.3"
def writeln(data, check=1):
if s.inWaiting() > 0:
s.flushInput()
if len(data) > 0:
sys.stdout.write("\r\n->")
sys.stdout.write(data.split("\r")[0])
s.write(data)
sleep(0.3)
if check > 0:
line = ''
char = ''
while char != chr(62): # '>'
char = s.read(1)
if char == '':
raise Exception('No proper answer from MCU')
if char == chr(13) or char == chr(10): # LF or CR
if line != '':
line = line.strip()
if line+'\r' == data:
sys.stdout.write(" -> ok")
else:
if line[:4] == "lua:":
sys.stdout.write("\r\n\r\nLua ERROR: %s" % line)
raise Exception('ERROR from Lua interpreter\r\n\r\n')
else:
data = data.split("\r")[0]
sys.stdout.write("\r\n\r\nERROR")
sys.stdout.write("\r\n send string : '%s'" % data)
sys.stdout.write("\r\n expected echo : '%s'" % data)
sys.stdout.write("\r\n but got answer : '%s'" % line)
sys.stdout.write("\r\n\r\n")
raise Exception('Error sending data to MCU\r\n\r\n')
line = ''
else:
line += char
else:
sys.stdout.write(" -> send without check")
def writer(data):
writeln("file.writeline([==[" + data + "]==])\r")
def openserial(args):
# Open the selected serial port
try:
s = serial.Serial(args.port, args.baud)
except:
sys.stderr.write("Could not open port %s\n" % (args.port))
sys.exit(1)
if args.verbose:
sys.stderr.write("Set timeout %s\r\n" % s.timeout)
s.timeout = 3
if args.verbose:
sys.stderr.write("Set interCharTimeout %s\r\n" % s.interCharTimeout)
s.interCharTimeout = 3
return s
if __name__ == '__main__':
# parse arguments or use defaults
parser = argparse.ArgumentParser(description='ESP8266 Lua script uploader.')
parser.add_argument('-p', '--port', default='/dev/ttyUSB0', help='Device name, default /dev/ttyUSB0')
parser.add_argument('-b', '--baud', default=9600, help='Baudrate, default 9600')
parser.add_argument('-f', '--src', default='main.lua', help='Source file on computer, default main.lua')
parser.add_argument('-t', '--dest', default=None, help='Destination file on MCU, default to source file name')
parser.add_argument('-c', '--compile', action='store_true', help='Compile lua to lc after upload')
parser.add_argument('-r', '--restart', action='store_true', help='Restart MCU after upload')
parser.add_argument('-d', '--dofile', action='store_true', help='Run the Lua script after upload')
parser.add_argument('-v', '--verbose', action='store_true', help="Show progress messages.")
parser.add_argument('-l', '--list', action='store_true', help='List files on device')
parser.add_argument('-w', '--wipe', action='store_true', help='Delete all lua/lc files on device.')
args = parser.parse_args()
if args.list:
s = openserial(args)
writeln("local l = file.list();for k,v in pairs(l) do print('name:'..k..', size:'..v)end\r", 0)
while True:
char = s.read(1)
if char == '' or char == chr(62):
break
sys.stdout.write(char)
sys.exit(0)
if args.wipe:
s = openserial(args)
writeln("local l = file.list();for k,v in pairs(l) do print(k)end\r", 0)
file_list = []
fn = ""
while True:
char = s.read(1)
if char == '' or char == chr(62):
break
if char not in ['\r', '\n']:
fn += char
else:
if fn:
file_list.append(fn.strip())
fn = ''
for fn in file_list[1:]: # first line is the list command sent to device
if args.verbose:
sys.stderr.write("Delete file {} from device.\r\n".format(fn))
writeln("file.remove(\"" + fn + "\")\r")
sys.exit(0)
if args.dest is None:
args.dest = basename(args.src)
# open source file for reading
try:
f = open(args.src, "rt")
except:
sys.stderr.write("Could not open input file \"%s\"\n" % args.src)
sys.exit(1)
# Verify the selected file will not exceed the size of the serial buffer.
# The size of the buffer is 256. This script does not accept files with
# lines longer than 230 characters to have some room for command overhead.
for ln in f:
if len(ln) > 230:
sys.stderr.write("File \"%s\" contains a line with more than 240 "
"characters. This exceeds the size of the serial buffer.\n"
% args.src)
f.close()
sys.exit(1)
# Go back to the beginning of the file after verifying it has the correct
# line length
f.seek(0)
# Open the selected serial port
s = openserial(args)
# set serial timeout
if args.verbose:
sys.stderr.write("Upload starting\r\n")
# remove existing file on device
if args.verbose:
sys.stderr.write("Stage 1. Deleting old file from flash memory")
writeln("file.open(\"" + args.dest + "\", \"w\")\r")
writeln("file.close()\r")
writeln("file.remove(\"" + args.dest + "\")\r")
# read source file line by line and write to device
if args.verbose:
sys.stderr.write("\r\nStage 2. Creating file in flash memory and write first line")
writeln("file.open(\"" + args.dest + "\", \"w+\")\r")
line = f.readline()
if args.verbose:
sys.stderr.write("\r\nStage 3. Start writing data to flash memory...")
while line != '':
writer(line.strip())
line = f.readline()
# close both files
f.close()
if args.verbose:
sys.stderr.write("\r\nStage 4. Flush data and closing file")
writeln("file.flush()\r")
writeln("file.close()\r")
# compile?
if args.compile:
if args.verbose:
sys.stderr.write("\r\nStage 5. Compiling")
writeln("node.compile(\"" + args.dest + "\")\r")
writeln("file.remove(\"" + args.dest + "\")\r")
# restart or dofile
if args.restart:
writeln("node.restart()\r")
if args.dofile: # never exec if restart=1
writeln("dofile(\"" + args.dest + "\")\r", 0)
# close serial port
s.flush()
s.close()
# flush screen
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write("\r\n--->>> All done <<<---\r\n")
| 36.981221
| 127
| 0.55643
|
c0ddce1dca8364feb65dd73e1f27a6aaf8040995
| 66,393
|
py
|
Python
|
rom_operator_inference/_core.py
|
jomorlier/rom-operator-inference-Python3
|
25cdb5e411add16f8647b114c993738037c58d57
|
[
"MIT"
] | 1
|
2020-04-19T19:52:07.000Z
|
2020-04-19T19:52:07.000Z
|
rom_operator_inference/_core.py
|
jomorlier/rom-operator-inference-Python3
|
25cdb5e411add16f8647b114c993738037c58d57
|
[
"MIT"
] | null | null | null |
rom_operator_inference/_core.py
|
jomorlier/rom-operator-inference-Python3
|
25cdb5e411add16f8647b114c993738037c58d57
|
[
"MIT"
] | null | null | null |
# _core.py
"""Class for model order reduction of ODEs via operator inference."""
import warnings
import itertools
import numpy as np
from scipy import linalg as la
from scipy.interpolate import CubicSpline
from scipy.integrate import solve_ivp, IntegrationWarning
from .utils import (lstsq_reg,
expand_Hc as Hc2H,
compress_H as H2Hc,
kron_compact as kron2)
# Helper classes and functions ================================================
class AffineOperator:
"""Class for representing a linear operator with affine structure, i.e.,
A(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * A_{i}.
The matrix A(µ) is constructed by calling the object once the coefficient
functions and constituent matrices are set.
Attributes
----------
nterms : int
The number of terms in the sum defining the linear operator.
coefficient_functions : list of nterms callables
The coefficient scalar-valued functions that define the operator.
Each must take the same sized input and return a scalar.
matrices : list of nterms ndarrays of the same shape
The constituent matrices defining the linear operator.
"""
def __init__(self, coeffs, matrices=None):
self.coefficient_functions = coeffs
self.nterms = len(coeffs)
if matrices:
self.matrices = matrices
else:
self._ready = False
@property
def matrices(self):
"""Get the constituent matrices."""
return self._matrices
@matrices.setter
def matrices(self, ms):
"""Set the constituent matrices, checking that the shapes are equal."""
if len(ms) != self.nterms:
_noun = "matrix" if self.nterms == 1 else "matrices"
raise ValueError(f"expected {self.nterms} {_noun}, got {len(ms)}")
# Check that each matrix in the list has the same shape.
shape = ms[0].shape
for m in ms:
if m.shape != shape:
raise ValueError("affine operator matrix shapes do not match "
f"({m.shape} != {shape})")
# Store matrix list and shape, and mark as ready (for __call__()).
self._matrices = ms
self.shape = shape
self._ready = True
def validate_coeffs(self, µ):
"""Check that each coefficient function 1) is a callable function,
2) takes in the right sized inputs, and 3) returns scalar values.
Parameters
----------
µ : float or (p,) ndarray
A test input for the coefficient functions.
"""
for θ in self.coefficient_functions:
if not callable(θ):
raise ValueError("coefficients of affine operator must be "
"callable functions")
elif not np.isscalar(θ(µ)):
raise ValueError("coefficient functions of affine operator "
"must return a scalar")
def __call__(self, µ):
if not self._ready:
raise RuntimeError("constituent matrices not initialized!")
return np.sum([θi(µ)*Ai for θi,Ai in zip(self.coefficient_functions,
self.matrices)], axis=0)
def trained_model_from_operators(modelclass, modelform, Vr,
c_=None, A_=None, H_=None, Hc_=None, B_=None):
"""Construct a prediction-capable ROM object from the operators of
the reduced model.
Parameters
----------
modelclass : type
One of the ROM classes (e.g., IntrusiveContinuousROM).
modelform : str
The structure of the model, a substring of "cAHB".
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
c_ : (r,) ndarray or None
Reduced constant term, or None if 'c' is not in `modelform`.
A_ : (r,r) ndarray or None
Reduced linear state matrix, or None if 'c' is not in `modelform`.
H_ : (r,r**2) ndarray or None
Reduced quadratic state matrix (full size), or None if 'H' is not in
`modelform`.
Hc_ : (r,r(r+1)/2) ndarray or None
Reduced quadratic state matrix (compact), or None if 'H' is not in
`modelform`. Only used if `H_` is also None.
B_ : (r,m) ndarray or None
Reduced input matrix, or None if 'B' is not in `modelform`.
Returns
-------
model : modelclass object
A new model, ready for predict() calls.
"""
# Check that the modelclass is valid.
if not issubclass(modelclass, _BaseROM):
raise TypeError("modelclass must be derived from _BaseROM")
# Construct the new model object.
model = modelclass(modelform)
model._check_modelform(trained=False)
# Insert the attributes.
model.Vr = Vr
model.n, model.r = Vr.shape
model.m = None if B_ is None else 1 if B_.ndim == 1 else B_.shape[1]
model.c_, model.A_, model.B_ = c_, A_, B_
model.Hc_ = H2Hc(H_) if H_ else Hc_
# Check that the attributes match the modelform.
model._check_modelform(trained=True)
# Construct the ROM operator f_() if there are no system inputs.
if not model.has_inputs and issubclass(modelclass, _ContinuousROM):
model._construct_f_()
return model
# Base classes ================================================================
class _BaseROM:
"""Base class for all rom_operator_inference reduced model classes."""
_MODEL_KEYS = "cAHB" # Constant, Linear, Quadratic, Input
def __init__(self, modelform):
self.modelform = modelform
@property
def modelform(self):
return self._form
@modelform.setter
def modelform(self, form):
self._form = ''.join(sorted(form,
key=lambda k: self._MODEL_KEYS.find(k)))
@property
def has_constant(self):
return "c" in self.modelform
@property
def has_linear(self):
return "A" in self.modelform
@property
def has_quadratic(self):
return "H" in self.modelform
@property
def has_inputs(self):
return "B" in self.modelform
# @property
# def has_outputs(self):
# return "C" in self._form
def _check_modelform(self, trained=False):
"""Ensure that self.modelform is valid."""
for key in self.modelform:
if key not in self._MODEL_KEYS:
raise ValueError(f"invalid modelform key '{key}'; options "
"are " + ', '.join(self._MODEL_KEYS))
if trained:
# Make sure that the required attributes exist and aren't None,
# and that nonrequired attributes exist but are None.
for key, s in zip("cAHB", ["c_", "A_", "Hc_", "B_"]):
if not hasattr(self, s):
raise AttributeError(f"attribute '{s}' missing;"
" call fit() to train model")
attr = getattr(self, s)
if key in self.modelform and attr is None:
raise AttributeError(f"attribute '{s}' is None;"
" call fit() to train model")
elif key not in self.modelform and attr is not None:
raise AttributeError(f"attribute '{s}' should be None;"
" call fit() to train model")
def _check_inputargs(self, u, argname):
"""Check that self.has_inputs agrees with input arguments."""
if self.has_inputs and u is None:
raise ValueError(f"argument '{argname}' required"
" since 'B' in modelform")
if not self.has_inputs and u is not None:
raise ValueError(f"argument '{argname}' invalid"
" since 'B' in modelform")
class _ContinuousROM(_BaseROM):
"""Base class for models that solve the continuous (ODE) ROM problem,
dx / dt = f(t, x(t), u(t)), x(0) = x0.
The problem may also be parametric, i.e., x and f may depend on an
independent parameter µ.
"""
def _construct_f_(self, u=None):
"""Define the attribute self.f_ based on the computed operators and,
if appropriate, the input function u(t).
"""
if not self.has_inputs and u is None:
if self.modelform == "c":
f_ = lambda t,x_: self.c_
elif self.modelform == "A":
f_ = lambda t,x_: self.A_@x_
elif self.modelform == "cA":
f_ = lambda t,x_: self.c_ + self.A_@x_
elif self.modelform == "H":
f_ = lambda t,x_: self.Hc_@kron2(x_)
elif self.modelform == "cH":
f_ = lambda t,x_: self.c_ + self.Hc_@kron2(x_)
elif self.modelform == "AH":
f_ = lambda t,x_: self.A_@x_ + self.Hc_@kron2(x_)
elif self.modelform == "cAH":
f_ = lambda t,x_: self.c_ + self.A_@x_ + self.Hc_@kron2(x_)
elif self.has_inputs and u is not None:
u_ = u
if self.modelform == "B":
f_ = lambda t,x_: self.B_@u(t)
elif self.modelform == "cB":
f_ = lambda t,x_: self.c_ + self.B_@u_(t)
elif self.modelform == "AB":
f_ = lambda t,x_: self.A_@x_ + self.B_@u_(t)
elif self.modelform == "cAB":
f_ = lambda t,x_: self.c_ + self.A_@x_ + self.B_@u_(t)
elif self.modelform == "HB":
f_ = lambda t,x_: self.Hc_@kron2(x_) + self.B_@u_(t)
elif self.modelform == "cHB":
f_ = lambda t,x_: self.c_ + self.Hc_@kron2(x_) + self.B_@u_(t)
elif self.modelform == "AHB":
f_ = lambda t,x_: self.A_@x_ + self.Hc_@kron2(x_) + self.B_@u_(t)
elif self.modelform == "cAHB":
f_ = lambda t,x_: self.c_ + self.A_@x_ + self.Hc_@kron2(x_) + self.B_@u_(t)
else:
raise RuntimeError("improper use of _construct_f_()!")
self.f_ = f_
def __str__(self):
"""String representation: the structure of the model."""
self._check_modelform()
out = []
if self.has_constant: out.append("c")
if self.has_linear: out.append("Ax(t)")
if self.has_quadratic: out.append("H(x ⊗ x)(t)")
if self.has_inputs: out.append("Bu(t)")
return "Reduced-order model structure: dx / dt = " + " + ".join(out)
def fit(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("fit() must be implemented by child classes")
def predict(self, x0, t, u=None, **options):
"""Simulate the learned ROM with scipy.integrate.solve_ivp().
Parameters
----------
x0 : (n,) ndarray
The initial (high-dimensional) state vector to begin a simulation.
t : (nt,) ndarray
The time domain over which to integrate the reduced-order system.
u : callable or (m,nt) ndarray
The input as a function of time (preferred) or the input at the
times `t`. If given as an array, u(t) is approximated by a cubic
spline interpolating the known data points.
options
Arguments for solver.integrate.solve_ivp(), such as the following:
method : str
The ODE solver for the reduced-order system.
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4).
* 'RK23': Explicit Runge-Kutta method of order 3(2).
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family
of order 5.
* 'BDF': Implicit multi-step variable-order (1 to 5) method
based on a backward differentiation formula for the
derivative.
* 'LSODA': Adams/BDF method with automatic stiffness detection
and switching. This wraps the Fortran solver from ODEPACK.
max_step : float
The maximimum allowed integration step size.
See https://docs.scipy.org/doc/scipy/reference/integrate.html.
Returns
-------
X_ROM: (n,nt) ndarray
The reduced-order approximation to the full-order system over `t`.
"""
# Verify modelform.
self._check_modelform(trained=True)
self._check_inputargs(u, 'u')
# Check dimensions.
if x0.shape[0] != self.n:
raise ValueError("invalid initial state size "
f"({x0.shape[0]} != {self.n})")
if t.ndim != 1:
raise ValueError("time 't' must be one-dimensional")
nt = t.shape[0]
# Project initial conditions.
x0_ = self.Vr.T @ x0
# Interpret control input argument `u`.
if self.has_inputs:
if callable(u): # If u is a function, check output shape.
out = u(t[0])
if np.isscalar(out):
if self.m == 1: # u : R -> R, wrap output as array.
_u = u
u = lambda s: np.array([_u(s)])
else: # u : R -> R, but m != 1.
raise ValueError("input function u() must return"
f" ndarray of shape (m,)={(self.m,)}")
elif not isinstance(out, np.ndarray):
raise ValueError("input function u() must return"
f" ndarray of shape (m,)={(self.m,)}")
elif out.shape != (self.m,):
message = "input function u() must return" \
f" ndarray of shape (m,)={(self.m,)}"
if self.m == 1:
raise ValueError(message + " or scalar")
raise ValueError(message)
else: # u is an (m,nt) array.
U = np.atleast_2d(u.copy())
if U.shape != (self.m,nt):
raise ValueError("invalid input shape "
f"({U.shape} != {(self.m,nt)}")
u = CubicSpline(t, U, axis=1)
# Construct the ROM operator if needed (deferred due to u(t)).
self._construct_f_(u)
# Integrate the reduced-order model.
self.sol_ = solve_ivp(self.f_, # Integrate f_(t,x_)
[t[0], t[-1]], # over this time interval
x0_, # with this initial condition
t_eval=t, # evaluated at these points
**options) # with these solver options.
# Raise warnings if the integration failed.
if not self.sol_.success: # pragma: no cover
warnings.warn(self.sol_.message, IntegrationWarning)
# Reconstruct the approximation to the full-order model.
return self.Vr @ self.sol_.y
class _DiscreteROM(_BaseROM): # pragma: no cover
"""Base class for models that solve the discrete ROM problem,
x_{k+1} = f(x_{k}, u_{k}), x_{0} = x0.
The problem may also be parametric, i.e., x and f may depend on an
independent parameter µ.
"""
def fit(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("fit() must be implemented by child classes")
def predict(self, x0, niters, U=None, **options):
raise NotImplementedError("TODO")
def __str__(self):
"""String representation: the structure of the model."""
self._check_modelform()
out = []
if self.has_constant: out.append("c")
if self.has_linear: out.append("Ax_{k}")
if self.has_quadratic: out.append("H(x_{k} ⊗ x_{k})")
if self.has_inputs: out.append("Bu_{k}")
return "Reduced-order model structure: x_{k+1} = " + " + ".join(out)
class _AffineContinuousROM(_ContinuousROM):
"""Base class for models with affinely parametric operators."""
def predict(self, µ, x0, t, u=None, **options):
"""Construct a ROM for the parameter µ by exploiting the affine
structure of the ROM operators, then simulate the resulting ROM with
scipy.integrate.solve_ivp().
Parameters
----------
µ : (p,) ndarray
The parameter of interest for the prediction.
x0 : (n,) ndarray
The initial (high-dimensional) state vector to begin a simulation.
t : (nt,) ndarray
The time domain over which to integrate the reduced-order system.
u : callable or (m,nt) ndarray
The input as a function of time (preferred) or the input at the
times `t`. If given as an array, u(t) is approximated by a cubic
spline interpolating the known data points.
options
Arguments for solver.integrate.solve_ivp(), such as the following:
method : str
The ODE solver for the reduced-order system.
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4).
* 'RK23': Explicit Runge-Kutta method of order 3(2).
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family
of order 5.
* 'BDF': Implicit multi-step variable-order (1 to 5) method
based on a backward differentiation formula for the
derivative.
* 'LSODA': Adams/BDF method with automatic stiffness detection
and switching. This wraps the Fortran solver from ODEPACK.
max_step : float
The maximimum allowed integration step size.
See https://docs.scipy.org/doc/scipy/reference/integrate.html.
Returns
-------
X_ROM: (n,nt) ndarray
The reduced-order approximation to the full-order system over `t`.
"""
# Check modelform and inputs.
self._check_modelform(trained=True)
self._check_inputargs(u, 'u')
# TODO: Make sure the parameter µ has the correct dimension.
# Use the affine structure of the operators to construct a new model.
model = trained_model_from_operators(
modelclass=_ContinuousROM,
modelform=self.modelform,
Vr=self.Vr,
c_=self.c_(µ) if isinstance(self.c_, AffineOperator) else self.c_,
A_=self.A_(µ) if isinstance(self.A_, AffineOperator) else self.A_,
Hc_=self.Hc_(µ) if isinstance(self.Hc_, AffineOperator) \
else self.Hc_,
B_=self.B_(µ) if isinstance(self.B_, AffineOperator) else self.B_,
)
out = model.predict(x0, t, u, **options)
self.sol_ = model.sol_
return out
# Mixins ======================================================================
class _InferredMixin:
"""Mixin class for reduced model classes that use operator inference."""
@staticmethod
def _check_training_data_shapes(X, Xdot, Vr):
"""Ensure that X, Xdot, and Vr are aligned."""
if X.shape != Xdot.shape:
raise ValueError("shape of X != shape of Xdot "
f"({X.shape} != {Xdot.shape})")
if X.shape[0] != Vr.shape[0]:
raise ValueError("X and Vr not aligned, first dimension "
f"{X.shape[0]} != {Vr.shape[0]}")
@staticmethod
def _check_dataset_consistency(arrlist, label):
"""Ensure that each array in the list of arrays is the same shape."""
shape = arrlist[0].shape
for arr in arrlist:
if arr.shape != shape:
raise ValueError(f"shape of '{label}'"
" inconsistent across samples")
class _IntrusiveMixin:
"""Mixin class for reduced model classes that use intrusive projection."""
def _check_operators(self, operators):
"""Check the keys of the operators argument."""
# Check for missing operator keys.
missing = [repr(key) for key in self.modelform if key not in operators]
if missing:
_noun = "key" + ('' if len(missing) == 1 else 's')
raise KeyError(f"missing operator {_noun} {', '.join(missing)}")
# Check for unnecessary operator keys.
surplus = [repr(key) for key in operators if key not in self.modelform]
if surplus:
_noun = "key" + ('' if len(surplus) == 1 else 's')
raise KeyError(f"invalid operator {_noun} {', '.join(surplus)}")
class _NonparametricMixin:
"""Mixin class for non-parametric reduced model classes."""
@property
def H_(self):
"""Matricized quadratic tensor; operates on full Kronecker product."""
return None if self.Hc_ is None else Hc2H(self.Hc_)
class _ParametricMixin:
"""Mixin class for parametric reduced model classes."""
pass
class _AffineMixin(_ParametricMixin):
"""Mixin class for affinely parametric reduced model classes."""
def _check_affines(self, affines, µ=None):
"""Check the keys of the affines argument."""
# Check for unnecessary affine keys.
surplus = [repr(key) for key in affines if key not in self.modelform]
if surplus:
_noun = "key" + ('' if len(surplus) == 1 else 's')
raise KeyError(f"invalid affine {_noun} {', '.join(surplus)}")
if µ is not None:
for a in affines.values():
AffineOperator(a).validate_coeffs(µ)
# Useable classes =============================================================
# Continuous models (i.e., solving dx/dt = f(t,x,u)) --------------------------
class InferredContinuousROM(_ContinuousROM,
_InferredMixin, _NonparametricMixin):
"""Reduced order model for a system of high-dimensional ODEs of the form
dx / dt = f(t, x(t), u(t)), x(0) = x0.
The model form (structure) of the desired reduced model is user specified,
and the operators of the reduced model are inferred by solving a
regularized ordinary least-squares problem.
Parameters
----------
modelform : str containing 'c', 'A', 'H', and/or 'B'
The structure of the desired reduced-order model. Each character
indicates the presence of a different term in the model:
'c' : Constant term c
'A' : Linear state term Ax(t).
'H' : Quadratic state term H(x⊗x)(t).
'B' : Input term Bu(t).
For example, modelform=="AB" means f(t,x(t),u(t)) = Ax(t) + Bu(t).
Attributes
----------
has_consant : bool
Whether or not there is a constant term c.
has_linear : bool
Whether or not there is a linear state term Ax(t).
has_quadratic : bool
Whether or not there is a quadratic state term H(x⊗x)(t).
has_inputs : bool
Whether or not there is a linear input term Bu(t).
n : int
The dimension of the original full-order model (x.size).
r : int
The dimension of the learned reduced-order model (x_.size).
m : int or None
The dimension of the input u(t), or None if 'B' is not in `modelform`.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
datacond_ : float
Condition number of the data matrix for the least-squares problem.
residual_ : float
The squared Frobenius-norm residual of the least-squares problem for
computing the reduced-order model operators.
c_ : (r,) ndarray or None
Learned ROM constant term, or None if 'c' is not in `modelform`.
A_ : (r,r) ndarray or None
Learned ROM linear state matrix, or None if 'A' is not in `modelform`.
Hc_ : (r,r(r+1)//2) ndarray or None
Learned ROM quadratic state matrix (compact), or None if 'H' is not
in `modelform`. Used internally instead of the larger H_.
H_ : (r,r**2) ndarray or None
Learned ROM quadratic state matrix (full size), or None if 'H' is not
in `modelform`. Computed on the fly from Hc_ if desired; not used
directly in solving the ROM.
B_ : (r,m) ndarray or None
Learned ROM input matrix, or None if 'B' is not in `modelform`.
f_ : func(float, (r,) ndarray) -> (r,) ndarray
The complete learned ROM operator, defined by c_, A_, Hc_, and/or B_.
Note the signiture is f_(t, x_); that is, f_ maps time and reduced
state to reduced state. Calculated in fit() if 'B' is not in
`modelform`, and in predict() otherwise.
sol_ : Bunch object returned by scipy.integrate.solve_ivp(), the result
of integrating the learned ROM in predict(). For more details, see
https://docs.scipy.org/doc/scipy/reference/integrate.html.
"""
def fit(self, X, Xdot, Vr, U=None, P=0):
"""Solve for the reduced model operators via regularized least squares.
Parameters
----------
X : (n,k) ndarray
Column-wise snapshot training data (each column is a snapshot).
Xdot : (n,k) ndarray
Column-wise velocity training data.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
U : (m,k) or (k,) ndarray or None
Column-wise inputs corresponding to the snapshots. If m=1 (scalar
input), then U may be a one-dimensional array. Required if 'B' is
in `modelform`; must be None if 'B' is not in `modelform`.
P : (d,d) ndarray or float
Tikhonov regularization matrix. If nonzero, the least-squares
problem problem takes the form min_{x} ||Ax - b||^2 + ||Px||^2.
If a nonzero number is provided, the regularization matrix is
P * I (a scaled identity matrix). Here d is the dimension of the
data matrix for the least-squares problem, e.g., d = r + m for a
linear model with inputs.
Returns
-------
self
"""
# Check modelform and inputs.
self._check_modelform()
self._check_inputargs(U, 'U')
# Check and store dimensions.
self._check_training_data_shapes(X, Xdot, Vr)
n,k = X.shape # Dimension of system, number of shapshots.
r = Vr.shape[1] # Number of basis vectors.
self.n, self.r, self.m = n, r, None
# Project states and velocities to the reduced subspace.
X_ = Vr.T @ X
Xdot_ = Vr.T @ Xdot
self.Vr = Vr
# Construct the "Data matrix" D = [X^T, (X ⊗ X)^T, U^T, 1].
D_blocks = []
if self.has_constant:
D_blocks.append(np.ones(k).reshape((k,1)))
if self.has_linear:
D_blocks.append(X_.T)
if self.has_quadratic:
X2_ = kron2(X_)
D_blocks.append(X2_.T)
_r2 = X2_.shape[0] # = r(r+1)//2, size of the compact Kronecker.
if self.has_inputs:
if U.ndim == 1:
U = U.reshape((1,k))
D_blocks.append(U.T)
m = U.shape[0]
self.m = m
D = np.hstack(D_blocks)
self.datacond_ = np.linalg.cond(D) # Condition number of data.
# Solve for the reduced-order model operators via least squares.
OT, res = lstsq_reg(D, Xdot_.T, P)[0:2]
self.residual_ = np.sum(res)
# Extract the reduced operators from OT.
i = 0
if self.has_constant:
self.c_ = OT[i:i+1][0] # Note that c_ is one-dimensional.
i += 1
else:
self.c_ = None
if self.has_linear:
self.A_ = OT[i:i+self.r].T
i += self.r
else:
self.A_ = None
if self.has_quadratic:
self.Hc_ = OT[i:i+_r2].T
i += _r2
else:
self.Hc_ = None
if self.has_inputs:
self.B_ = OT[i:i+self.m].T
i += self.m
else:
self.B_ = None
self._construct_f_()
return self
class IntrusiveContinuousROM(_ContinuousROM,
_IntrusiveMixin, _NonparametricMixin):
"""Reduced order model for a system of high-dimensional ODEs of the form
dx / dt = f(t, x(t), u(t)), x(0) = x0.
The user must specify the model form of the full-order model (FOM)
operator f and the associated operators; the operators for the reduced
model (ROM) are computed explicitly by projecting the full-order operators.
Parameters
----------
modelform : str containing 'c', 'A', 'H', and/or 'B'
The structure of the desired reduced-order model. Each character
indicates the presence of a different term in the model:
'c' : Constant term c
'A' : Linear state term Ax(t).
'H' : Quadratic state term H(x⊗x)(t).
'B' : Input term Bu(t).
For example, modelform=="AB" means f(t,x(t),u(t)) = Ax(t) + Bu(t).
Attributes
----------
has_consant : bool
Whether or not there is a constant term c.
has_linear : bool
Whether or not there is a linear state term Ax(t).
has_quadratic : bool
Whether or not there is a quadratic state term H(x⊗x)(t).
has_inputs : bool
Whether or not there is a linear input term Bu(t).
n : int
The dimension of the original full-order model (x.size).
r : int
The dimension of the projected reduced-order model (x_.size).
m : int or None
The dimension of the input u(t), or None if 'B' is not in `modelform`.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
c : (n,) ndarray or None
FOM constant term, or None if 'c' is not in `modelform`.
A : (n,n) ndarray or None
FOM linear state matrix, or None if 'A' is not in `modelform`.
Hc : (n,n(n+1)//2) ndarray or None
FOM quadratic state matrix (compact), or None if 'H' is not
in `modelform`.
H : (n,n**2) ndarray or None
FOM quadratic state matrix (full size), or None if 'H' is not
in `modelform`.
B : (n,m) ndarray or None
Learned ROM input matrix, or None if 'B' is not in `modelform`.
c_ : (r,) ndarray or None
Learned ROM constant term, or None if 'c' is not in `modelform`.
A_ : (r,r) ndarray or None
Learned ROM linear state matrix, or None if 'A' is not in `modelform`.
Hc_ : (r,r(r+1)//2) ndarray or None
Learned ROM quadratic state matrix (compact), or None if 'H' is not
in `modelform`. Used internally instead of the larger H_.
H_ : (r,r**2) ndarray or None
Learned ROM quadratic state matrix (full size), or None if 'H' is not
in `modelform`. Computed on the fly from Hc_ if desired; not used in
solving the ROM.
B_ : (r,m) ndarray or None
Learned ROM input matrix, or None if 'B' is not in `modelform`.
f_ : func(float, (r,) ndarray) -> (r,) ndarray
The complete learned ROM operator, defined by c_, A_, Hc_, and/or B_.
Note the signiture is f_(t, x_); that is, f_ maps time and reduced
state to reduced state. Calculated in fit() if 'B' is not in
`modelform`, and in predict() otherwise.
sol_ : Bunch object returned by scipy.integrate.solve_ivp(), the result
of integrating the learned ROM in predict(). For more details, see
https://docs.scipy.org/doc/scipy/reference/integrate.html.
"""
def fit(self, operators, Vr):
"""Compute the reduced model operators via intrusive projection.
Parameters
----------
operators: dict(str -> ndarray)
The operators that define the full-order model f(t,x).
Keys must match the modelform:
* 'c': constant term c.
* 'A': linear state matrix A.
* 'H': quadratic state matrix H (either full H or compact Hc).
* 'B': input matrix B.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
Returns
-------
self
"""
# Verify modelform.
self._check_modelform()
self._check_operators(operators)
# Store dimensions.
n,r = Vr.shape # Dimension of system, number of basis vectors.
self.Vr = Vr
self.n, self.r = n, r
# Project FOM operators.
if self.has_constant: # Constant term.
self.c = operators['c']
if self.c.shape != (n,):
raise ValueError("basis Vr and FOM operator c not aligned")
self.c_ = self.Vr.T @ self.c
else:
self.c, self.c_ = None, None
if self.has_linear: # Linear state matrix.
self.A = operators['A']
if self.A.shape != (self.n,self.n):
raise ValueError("basis Vr and FOM operator A not aligned")
self.A_ = self.Vr.T @ self.A @ self.Vr
else:
self.A, self.A_ = None, None
if self.has_quadratic: # Quadratic state matrix.
H_or_Hc = operators['H']
_n2 = self.n * (self.n + 1) // 2
if H_or_Hc.shape == (self.n,self.n**2): # It's H.
self.H = H_or_Hc
self.Hc = H2Hc(self.H)
elif H_or_Hc.shape == (self.n,_n2): # It's Hc.
self.Hc = H_or_Hc
self.H = Hc2H(self.Hc)
else:
raise ValueError("basis Vr and FOM operator H not aligned")
H_ = self.Vr.T @ self.H @ np.kron(self.Vr, self.Vr)
self.Hc_ = H2Hc(H_)
else:
self.Hc, self.H, self.Hc_ = None, None, None
if self.has_inputs: # Linear input matrix.
self.B = operators['B']
if self.B.shape[0] != self.n:
raise ValueError("basis Vr and FOM operator B not aligned")
if self.B.ndim == 2:
self.m = self.B.shape[1]
else: # One-dimensional input
self.B = self.B.reshape((-1,1))
self.m = 1
self.B_ = self.Vr.T @ self.B
else:
self.B, self.B_, self.m = None, None, None
self._construct_f_()
return self
class InterpolatedInferredContinuousROM(_ContinuousROM,
_InferredMixin, _ParametricMixin):
"""Reduced order model for a system of high-dimensional ODEs, parametrized
by a scalar µ, of the form
dx / dt = f(t, x(t;µ), u(t); µ), x(0;µ) = x0(µ),
where µ is a scalar. The model form (structure) of the desired reduced
model is user specified, and the operators of the reduced model are
inferred by solving several regularized ordinary least-squares problems,
then interpolating those models with respect to the scalar parameter µ.
Parameters
----------
modelform : str containing 'c', 'A', 'H', and/or 'B'
The structure of the desired reduced-order model. Each character
indicates the presence of a different term in the model:
'c' : Constant term c(µ)
'A' : Linear state term A(µ)x(t).
'H' : Quadratic state term H(µ)(x⊗x)(t).
'B' : Input term B(µ)u(t).
For example, modelform=="cA" means f(t, x(t); µ) = c(µ) + A(µ)x(t;µ).
Attributes
----------
has_consant : bool
Whether or not there is a constant term c(µ).
has_linear : bool
Whether or not there is a linear state term A(µ)x(t).
has_quadratic : bool
Whether or not there is a quadratic state term H(µ)(x⊗x)(t).
has_inputs : bool
Whether or not there is a linear input term B(µ)u(t).
n : int
The dimension of the original model.
r : int
The dimension of the learned reduced-order model (x_.size).
m : int or None
The dimension of the input u(t), or None if 'B' is not in `modelform`.
s : int
The number of training parameter samples, so also the number of reduced
models computed via inference and used in the interpolation.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
dataconds_ : float
Condition number of the data matrix for each least-squares problem.
residuals_ : (s,) ndarray
The squared Frobenius-norm residual of each least-squares problem (one
per parameter) for computing the reduced-order model operators.
As_ : list of s (r,r) ndarrays or None
Learned ROM linear state matrices, or None if 'A' not in `modelform`.
Hcs_ : list of s (r,r(r+1)//2) ndarrays or None
Learned ROM quadratic state matrices (compact), or None if 'H' is not
in `modelform`. Used internally instead of the larger H_.
Hs_ : list of s (r,r**2) ndarrays or None
Learned ROM quadratic state matrices (full size), or None if 'H' is not
in `modelform`. Computed on the fly from Hcs_ if desired; not used in
solving the ROM.
cs_ : list of s (r,) ndarrays or None
Learned ROM constant terms, or None if 'c' is not in `modelform`.
Bs_ : list of s (r,m) ndarrays or None
Learned ROM input matrices, or None if 'B' not in `modelform`.
fs_ : list of func(float, (r,) ndarray) -> (r,) ndarray
The complete ROM operators for each parameter sample, defined by
cs_, As_, and/or Hcs_. Only available after calling fit() and only if
there are no inputs ('B' is not in the modelform).
sol_ : Bunch object returned by scipy.integrate.solve_ivp(), the result
of integrating the learned ROM in predict(). For more details, see
https://docs.scipy.org/doc/scipy/reference/integrate.html.
"""
def fit(self, µs, Xs, Xdots, Vr, Us=None, P=0):
"""Solve for the reduced model operators via regularized least squares,
contructing one ROM per parameter value.
Parameters
----------
µs : (s,) ndarray
Parameter values at which the snapshot data is collected.
Xs : list of s (n,k) ndarrays
Column-wise snapshot training data (each column is a snapshot).
The ith array Xs[i] corresponds to the ith parameter, µs[i].
Xdots : list of s (n,k) ndarrays
Column-wise velocity training data. The ith array Xdots[i]
corresponds to the ith parameter, µs[i].
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
Us : list of s (m,k) or (k,) ndarrays or None
Column-wise inputs corresponding to the snapshots. If m=1 (scalar
input), then U may be a one-dimensional array. Required if 'B' is
in `modelform`; must be None if 'B' is not in `modelform`.
P : (d,d) ndarray or float
Tikhonov regularization matrix. If nonzero, the least-squares
problem problem takes the form min_{x} ||Ax - b||^2 + ||Px||^2.
If a nonzero number is provided, the regularization matrix is
P * I (a scaled identity matrix). Here d is the dimension of the
data matrix for the least-squares problem, e.g., d = r + m for a
linear model with inputs.
Returns
-------
self
"""
# Check modelform and inputs.
self._check_modelform()
self._check_inputargs(Us, 'Us')
# Check that parameters are one-dimensional.
if not np.isscalar(µs[0]):
raise ValueError("only scalar parameter values are supported")
# Check that the number of params matches the number of snapshot sets.
s = len(µs)
if len(Xs) != s:
raise ValueError("num parameter samples != num state snapshot "
f"sets ({s} != {len(Xs)})")
if len(Xdots) != s:
raise ValueError("num parameter samples != num velocity snapshot "
f"sets ({s} != {len(Xdots)})")
# Check and store dimensions.
for X, Xdot in zip(Xs, Xdots):
self._check_training_data_shapes(X, Xdot, Vr)
n,k = Xs[0].shape # Dimension of system, number of shapshots.
r = Vr.shape[1] # Number of basis vectors.
self.n, self.r, self.m = n, r, None
# Check that all arrays in each list of arrays are the same sizes.
_tocheck = [(Xs, "X"), (Xdots, "Xdot")]
if self.has_inputs:
_tocheck += [(Us, "U")]
self.m = Us[0].shape[0] if Us[0].ndim == 2 else 1
else:
Us = [None]*s
for dataset, label in _tocheck:
self._check_dataset_consistency(dataset, label)
# TODO: figure out how to handle P (scalar, array, list(arrays)).
# Train one model per parameter sample.
self.Vr = Vr
self.models_ = []
for µ, X, Xdot, U in zip(µs, Xs, Xdots, Us):
model = InferredContinuousROM(self.modelform)
model.fit(X, Xdot, Vr, U, P)
model.parameter = µ
self.models_.append(model)
# Construct interpolators.
self.A_ = CubicSpline(µs, self.As_) if self.has_linear else None
self.Hc_= CubicSpline(µs, self.Hcs_) if self.has_quadratic else None
self.H_ = CubicSpline(µs, self.Hs_) if self.has_quadratic else None
self.c_ = CubicSpline(µs, self.cs_) if self.has_constant else None
self.B_ = CubicSpline(µs, self.Bs_) if self.has_inputs else None
return self
def predict(self, µ, x0, t, u=None, **options):
"""Construct a ROM for the parameter µ by interolating the entries of
the learned models, then simulate this interpolated ROM with
scipy.integrate.solve_ivp().
Parameters
----------
µ : float
The parameter of interest for the prediction.
x0 : (n,) ndarray
The initial (high-dimensional) state vector to begin a simulation.
t : (nt,) ndarray
The time domain over which to integrate the reduced-order system.
u : callable or (m,nt) ndarray
The input as a function of time (preferred) or the input at the
times `t`. If given as an array, u(t) is approximated by a cubic
spline interpolating the known data points.
options
Arguments for solver.integrate.solve_ivp(), such as the following:
method : str
The ODE solver for the reduced-order system.
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4).
* 'RK23': Explicit Runge-Kutta method of order 3(2).
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family
of order 5.
* 'BDF': Implicit multi-step variable-order (1 to 5) method
based on a backward differentiation formula for the
derivative.
* 'LSODA': Adams/BDF method with automatic stiffness detection
and switching. This wraps the Fortran solver from ODEPACK.
max_step : float
The maximimum allowed integration step size.
See https://docs.scipy.org/doc/scipy/reference/integrate.html.
Returns
-------
X_ROM: (n,nt) ndarray
The reduced-order approximation to the full-order system over `t`.
"""
# Check modelform and inputs.
self._check_modelform(trained=True)
self._check_inputargs(u, 'u')
model = trained_model_from_operators(
modelclass=_ContinuousROM,
modelform=self.modelform,
Vr=self.Vr,
A_=self.A_(µ) if self.A_ is not None else None,
Hc_=self.Hc_(µ) if self.Hc_ is not None else None,
c_=self.c_(µ) if self.c_ is not None else None,
B_=self.B_(µ) if self.B_ is not None else None,
)
out = model.predict(x0, t, u, **options)
self.sol_ = model.sol_
return out
@property
def As_(self):
"""The linear state matrices for each submodel."""
return [m.A_ for m in self.models_] if self.has_linear else None
@property
def Hs_(self):
"""The full quadratic state matrices for each submodel."""
return [m.H_ for m in self.models_] if self.has_quadratic else None
@property
def Hcs_(self):
"""The compact quadratic state matrices for each submodel."""
return [m.Hc_ for m in self.models_] if self.has_quadratic else None
@property
def cs_(self):
"""The constant terms for each submodel."""
return [m.c_ for m in self.models_] if self.has_constant else None
@property
def Bs_(self):
"""The linear input matrices for each submodel."""
return [m.B_ for m in self.models_] if self.has_inputs else None
@property
def fs_(self):
"""The reduced-order operators for each submodel."""
return [m.f_ for m in self.models_]
@property
def dataconds_(self):
"""The condition numbers of the data matrices for each submodel."""
return np.array([m.datacond_ for m in self.models_])
@property
def residuals_(self):
"""The residuals for each submodel."""
return np.array([m.residual_ for m in self.models_])
def __len__(self):
"""The number of trained models."""
return len(self.models_) if hasattr(self, "models_") else 0
class AffineInferredContinuousROM(_AffineContinuousROM,
_InferredMixin, _AffineMixin):
"""Reduced order model for a system of high-dimensional ODEs of the form
dx / dt = f(t, x(t), u(t); µ), x(0;µ) = x0(µ).
The user must specify the model form of the full-order model (FOM)
operator f and the associated operators; the operators for the reduced
model (ROM) are explicitly computed by projecting the full-order operators.
Parameters
----------
modelform : str containing 'c', 'A', 'H', and/or 'B'
The structure of the desired reduced-order model. Each character
indicates the presence of a different term in the model:
* 'c' : Constant term c(µ).
* 'A' : Linear state term A(µ)x(t).
* 'H' : Quadratic state term H(µ)(x⊗x)(t).
* 'B' : Linear input term B(µ)u(t).
For example, modelform=="cA" means f(t, x(t); µ) = c(µ) + A(µ)x(t;µ).
Attributes
----------
has_consant : bool
Whether or not there is a constant term c(µ).
has_linear : bool
Whether or not there is a linear term A(µ)x(t).
has_quadratic : bool
Whether or not there is a quadratic term H(µ)(x⊗x)(t).
has_inputs : bool
Whether or not there is an input term B(µ)u(t).
n : int
The dimension of the original full-order model (x.size).
r : int
The dimension of the projected reduced-order model (x_.size).
m : int or None
The dimension of the input u(t), or None if 'B' is not in `modelform`.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
c_ : func(µ) -> (r,) ndarray; (r,) ndarray; or None
Learned ROM constant term, or None if 'c' is not in `modelform`.
A_ : func(µ) -> (r,r) ndarray; (r,r) ndarray; or None
Learned ROM linear state matrix, or None if 'A' is not in `modelform`.
Hc_ : func(µ) -> (r,r(r+1)//2) ndarray; (r,r(r+1)//2) ndarray; or None
Learned ROM quadratic state matrix (compact), or None if 'H' is not
in `modelform`. Used internally instead of the larger H_.
H_ : func(µ) -> (r,r**2) ndarray; (r,r**2) ndarray; or None
Learned ROM quadratic state matrix (full size), or None if 'H' is not
in `modelform`. Computed on the fly from Hc_ if desired; not used in
solving the ROM.
B_ : func(µ) -> (r,m) ndarray; (r,m) ndarray; or None
Learned ROM input matrix, or None if 'B' is not in `modelform`.
sol_ : Bunch object returned by scipy.integrate.solve_ivp(), the result
of integrating the learned ROM in predict(). For more details, see
https://docs.scipy.org/doc/scipy/reference/integrate.html.
"""
def fit(self, µs, affines, Xs, Xdots, Vr, Us=None, P=0):
"""Solve for the reduced model operators via regularized least squares.
For terms with affine structure, solve for the constituent operators.
Parameters
----------
µs : list of s scalars or (p,) ndarrays
Parameter values at which the snapshot data is collected.
affines : dict(str -> list(functions))
Functions that define the structures of the affine operators.
Keys must match the modelform:
* 'c': Constant term c(µ).
* 'A': Linear state matrix A(µ).
* 'H': Quadratic state matrix H(µ).
* 'B': Linear input matrix B(µ).
For example, if the constant term has the affine structure
c(µ) = θ1(µ)c1 + θ2(µ)c2 + θ3(µ)c3, then 'c' -> [θ1, θ2, θ3].
Xs : list of s (n,k) ndarrays
Column-wise snapshot training data (each column is a snapshot).
The ith array Xs[i] corresponds to the ith parameter, µs[i].
Xdots : list of s (n,k) ndarrays
Column-wise velocity training data. The ith array Xdots[i]
corresponds to the ith parameter, µs[i].
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
Us : list of s (m,k) or (k,) ndarrays or None
Column-wise inputs corresponding to the snapshots. If m=1 (scalar
input), then U may be a one-dimensional array. Required if 'B' is
in `modelform`; must be None if 'B' is not in `modelform`.
P : (d,d) ndarray or float
Tikhonov regularization matrix. If nonzero, the least-squares
problem problem takes the form min_{x} ||Ax - b||^2 + ||Px||^2.
If a nonzero number is provided, the regularization matrix is
P * I (a scaled identity matrix). Here d is the dimension of the
data matrix for the least-squares problem, e.g., d = r + m for a
linear model with inputs.
Returns
-------
self
"""
# Check modelform and inputs.
self._check_modelform()
self._check_affines(affines, µs[0])
self._check_inputargs(Us, 'Us')
# Check and store dimensions.
for X, Xdot in zip(Xs, Xdots):
self._check_training_data_shapes(X, Xdot, Vr)
n,k = Xs[0].shape # Dimension of system, number of shapshots.
r = Vr.shape[1] # Number of basis vectors.
self.n, self.r, self.m = n, r, None
# Check that all arrays in each list of arrays are the same sizes.
_tocheck = [(Xs, "X"), (Xdots, "Xdot")]
if self.has_inputs:
_tocheck += [(Us, "U")]
self.m = Us[0].shape[0] if Us[0].ndim == 2 else 1
for dataset, label in _tocheck:
self._check_dataset_consistency(dataset, label)
# Project states and velocities to the reduced subspace.
Xs_ = [Vr.T @ X for X in Xs]
Xdots_ = [Vr.T @ Xdot for Xdot in Xdots]
self.Vr = Vr
# Construct the "Data matrix" D.
D_blockrows = []
for i in range(len(µs)):
row = []
µ = µs[i]
k = Xs[i].shape[1]
if self.has_constant:
ones = np.ones(k).reshape((k,1))
if 'c' in affines:
for j in range(len(affines['c'])):
row.append(affines['c'][j](µ) * ones)
else:
row.append(ones)
if self.has_linear:
if 'A' in affines:
for j in range(len(affines['A'])):
row.append(affines['A'][j](µ) * Xs_[i].T)
else:
row.append(Xs_[i].T)
if self.has_quadratic:
X2i_ = kron2(Xs_[i])
if 'H' in affines:
for j in range(len(affines['H'])):
row.append(affines['H'][j](µ) * X2i_.T)
else:
row.append(X2i_.T)
if self.has_inputs:
Ui = Us[i]
if self.m == 1:
Ui = Ui.reshape((1,k))
if 'B' in affines:
for j in range(len(affines['B'])):
row.append(affines['B'][j](µ) * Ui.T)
else:
row.append(Ui.T)
D_blockrows.append(np.hstack(row))
D = np.vstack(D_blockrows)
self.datacond_ = np.linalg.cond(D) # Condition number of data.
R = np.hstack(Xdots_).T
# Solve for the reduced-order model operators via least squares.
OT, res = lstsq_reg(D, R, P)[0:2]
self.residual_ = np.sum(res)
# Extract the reduced operators from OT.
i = 0
if self.has_constant:
if 'c' in affines:
cs_ = []
for j in range(len(affines['c'])):
cs_.append(OT[i:i+1][0]) # c_ is one-dimensional.
i += 1
self.c_ = AffineOperator(affines['c'], cs_)
else:
self.c_ = OT[i:i+1][0] # c_ is one-dimensional.
i += 1
else:
self.c_, self.cs_ = None, None
if self.has_linear:
if 'A' in affines:
As_ = []
for j in range(len(affines['A'])):
As_.append(OT[i:i+self.r].T)
i += self.r
self.A_ = AffineOperator(affines['A'], As_)
else:
self.A_ = OT[i:i+self.r].T
i += self.r
else:
self.A_ = None
if self.has_quadratic:
_r2 = self.r * (self.r + 1) // 2
if 'H' in affines:
Hcs_ = []
for j in range(len(affines['H'])):
Hcs_.append(OT[i:i+_r2].T)
i += _r2
self.Hc_ = AffineOperator(affines['H'], Hcs_)
self.H_ = lambda µ: Hc2H(self.Hc_(µ))
else:
self.Hc_ = OT[i:i+_r2].T
i += _r2
self.H_ = Hc2H(self.Hc)
else:
self.Hc_, self.H_ = None, None
if self.has_inputs:
if 'B' in affines:
Bs_ = []
for j in range(len(affines['B'])):
Bs_.append(OT[i:i+self.m].T)
i += self.m
self.B_ = AffineOperator(affines['B'], Bs_)
else:
self.B_ = OT[i:i+self.m].T
i += self.m
else:
self.B_ = None
return self
class AffineIntrusiveContinuousROM(_AffineContinuousROM,
_IntrusiveMixin, _AffineMixin):
"""Reduced order model for a system of high-dimensional ODEs of the form
dx / dt = f(t, x(t), u(t); µ), x(0;µ) = x0(µ).
The user must specify the model form of the full-order model (FOM)
operator f and the associated operators; the operators for the reduced
model (ROM) are explicitly computed by projecting the full-order operators.
Parameters
----------
modelform : str containing 'c', 'A', 'H', and/or 'B'
The structure of the desired reduced-order model. Each character
indicates the presence of a different term in the model:
* 'c' : Constant term c(µ).
* 'A' : Linear state term A(µ)x(t).
* 'H' : Quadratic state term H(µ)(x⊗x)(t).
* 'B' : Linear input term B(µ)u(t).
For example, modelform=="cA" means f(t, x(t); µ) = c(µ) + A(µ)x(t;µ).
Attributes
----------
has_consant : bool
Whether or not there is a constant term c(µ).
has_linear : bool
Whether or not there is a linear term A(µ)x(t).
has_quadratic : bool
Whether or not there is a quadratic term H(µ)(x⊗x)(t).
has_inputs : bool
Whether or not there is an input term B(µ)u(t).
n : int
The dimension of the original full-order model (x.size).
r : int
The dimension of the projected reduced-order model (x_.size).
m : int or None
The dimension of the input u(t), or None if 'B' is not in `modelform`.
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
c : func(µ) -> (n,) ndarray; (n,) ndarray; or None
FOM constant term, or None if 'c' is not in `modelform`.
A : func(µ) -> (n,n) ndarray; (n,n) ndarray; or None
FOM linear state matrix, or None if 'A' is not in `modelform`.
Hc : func(µ) -> (n,n(n+1)//2) ndarray; (n,n(n+1)//2) ndarray; or None
FOM quadratic state matrix (compact), or None if 'H' is not
in `modelform`.
H : func(µ) -> (n,n**2) ndarray; (n,n**2) ndarray; or None
FOM quadratic state matrix (full size), or None if 'H' is not
in `modelform`.
B : func(µ) -> (n,m) ndarray; (n,m) ndarray; or None
FOM input matrix, or None if 'B' is not in `modelform`.
c_ : func(µ) -> (r,) ndarray; (r,) ndarray; or None
Computed ROM constant term, or None if 'c' is not in `modelform`.
A_ : func(µ) -> (r,r) ndarray; (r,r) ndarray; or None
Computed ROM linear state matrix, or None if 'A' is not in `modelform`.
Hc_ : func(µ) -> (r,r(r+1)//2) ndarray; (r,r(r+1)//2) ndarray; or None
Computed ROM quadratic state matrix (compact), or None if 'H' is not
in `modelform`. Used internally instead of the larger H_.
H_ : func(µ) -> (r,r**2) ndarray; (r,r**2) ndarray; or None
Computed ROM quadratic state matrix (full size), or None if 'H' is not
in `modelform`. Computed on the fly from Hc_ if desired; not used in
solving the ROM.
B_ : func(µ) -> (r,m) ndarray; (r,m) ndarray; or None
Computed ROM input matrix, or None if 'B' is not in `modelform`.
sol_ : Bunch object returned by scipy.integrate.solve_ivp(), the result
of integrating the learned ROM in predict(). For more details, see
https://docs.scipy.org/doc/scipy/reference/integrate.html.
"""
def fit(self, affines, operators, Vr):
"""Solve for the reduced model operators via intrusive projection.
Parameters
----------
affines : dict(str -> list(functions))
Functions that define the structures of the affine operators.
Keys must match the modelform:
* 'c': Constant term c(µ).
* 'A': Linear state matrix A(µ).
* 'H': Quadratic state matrix H(µ).
* 'B': linear Input matrix B(µ).
For example, if the constant term has the affine structure
c(µ) = θ1(µ)c1 + θ2(µ)c2 + θ3(µ)c3, then 'c' -> [θ1, θ2, θ3].
operators: dict(str -> ndarray or list(ndarrays))
The operators that define the full-order model f(t,x;µ).
Keys must match the modelform:
* 'c': constant term c(µ).
* 'A': linear state matrix A(µ).
* 'H': quadratic state matrix H(µ).
* 'B': input matrix B(µ).
Terms with affine structure should be given as a list of the
constituent matrices. For example, if the linear state matrix has
the form A(µ) = θ1(µ)A1 + θ2(µ)A2, then 'A' -> [A1, A2].
Vr : (n,r) ndarray
The basis for the linear reduced space (e.g., POD basis matrix).
Returns
-------
self
"""
# Verify modelform, affines, and operators.
self._check_modelform()
self._check_affines(affines, None)
self._check_operators(operators)
# Store dimensions.
n,r = Vr.shape # Dimension of system, number of basis vectors.
self.Vr = Vr
self.n, self.r = n, r
# Project FOM operators.
if self.has_constant: # Constant term.
if 'c' in affines:
self.c = AffineOperator(affines['c'], operators['c'])
if self.c.shape != (n,):
raise ValueError("basis Vr and FOM operator c not aligned")
self.c_ = AffineOperator(affines['c'],
[self.Vr.T @ c
for c in self.c.matrices])
else:
self.c = operators['c']
if self.c.shape != (n,):
raise ValueError("basis Vr and FOM operator c not aligned")
self.c_ = self.Vr.T @ self.c
else:
self.c, self.c_ = None, None
if self.has_linear: # Linear state matrix.
if 'A' in affines:
self.A = AffineOperator(affines['A'], operators['A'])
if self.A.shape != (self.n,self.n):
raise ValueError("basis Vr and FOM operator A not aligned")
self.A_ = AffineOperator(affines['A'],
[self.Vr.T @ A @ self.Vr
for A in self.A.matrices])
else:
self.A = operators['A']
if self.A.shape != (self.n,self.n):
raise ValueError("basis Vr and FOM operator A not aligned")
self.A_ = self.Vr.T @ self.A @ self.Vr
else:
self.A, self.A_ = None, None
if self.has_quadratic: # Quadratic state matrix.
_n2 = self.n * (self.n + 1) // 2
if 'H' in affines:
H_or_Hc = AffineOperator(affines['H'], operators['H'])
if H_or_Hc.shape == (self.n,self.n**2): # It's H.
self.H = H_or_Hc
self.Hc = AffineOperator(affines['H'],
[H2Hc(H)
for H in H_or_Hc.matrices])
elif H_or_Hc.shape == (self.n,_n2): # It's Hc.
self.Hc = H_or_Hc
self.H = AffineOperator(affines['H'],
[Hc2H(Hc)
for Hc in H_or_Hc.matrices])
else:
raise ValueError("basis VR and FOM operator H not aligned")
Vr2 = np.kron(self.Vr, self.Vr)
self.H_ = AffineOperator(affines['H'],
[self.Vr.T @ H @ Vr2
for H in self.H.matrices])
self.Hc_ = AffineOperator(affines['H'],
[H2Hc(H_)
for H_ in self.H_.matrices])
else:
H_or_Hc = operators['H']
if H_or_Hc.shape == (self.n,self.n**2): # It's H.
self.H = H_or_Hc
self.Hc = H2Hc(self.H)
elif H_or_Hc.shape == (self.n,_n2): # It's Hc.
self.Hc = H_or_Hc
self.H = Hc2H(self.Hc)
else:
raise ValueError("basis Vr and FOM operator H not aligned")
self.H_ = self.Vr.T @ self.H @ np.kron(self.Vr, self.Vr)
self.Hc_ = H2Hc(self.H_)
else:
self.Hc, self.H, self.Hc_ = None, None, None
if self.has_inputs: # Linear input matrix.
if 'B' in affines:
self.B = AffineOperator(affines['B'], operators['B'])
if self.B.shape[0] != self.n:
raise ValueError("basis Vr and FOM operator B not aligned")
if len(self.B.shape) == 2:
self.m = self.B.shape[1]
else: # One-dimensional input
self.B = AffineOperator(affines['B'],
[B.reshape((-1,1))
for B in self.B.matrices])
self.m = 1
self.B_ = AffineOperator(affines['B'],
[self.Vr.T @ B
for B in self.B.matrices])
else:
self.B = operators['B']
if self.B.shape[0] != self.n:
raise ValueError("basis Vr and FOM operator B not aligned")
if self.B.ndim == 2:
self.m = self.B.shape[1]
else: # One-dimensional input
self.B = self.B.reshape((-1,1))
self.m = 1
self.B_ = self.Vr.T @ self.B
else:
self.B, self.B_, self.m = None, None, None
return self
# Discrete models (i.e., solving x_{k+1} = f(x_{k},u_{k})) --------------------
__all__ = [
"InferredContinuousROM",
"IntrusiveContinuousROM",
"AffineInferredContinuousROM",
"AffineIntrusiveContinuousROM",
"InterpolatedInferredContinuousROM",
]
# Future additions ------------------------------------------------------------
# TODO: discrete analogs.
# TODO: jacobians for each model form in the continuous case.
# TODO: better __str__() for parametric classes.
| 39.193034
| 91
| 0.548416
|
674eb00381133bd0bcb07f999a2ebf36e70c6535
| 1,587
|
py
|
Python
|
functions/mil_cross_val.py
|
jmarrietar/MILpy
|
8b51771e41ec43dcc489947352133b76a642faf5
|
[
"FSFAP"
] | 18
|
2016-11-14T18:53:57.000Z
|
2022-02-14T00:40:23.000Z
|
functions/mil_cross_val.py
|
jmarrietar/MILpy
|
8b51771e41ec43dcc489947352133b76a642faf5
|
[
"FSFAP"
] | 1
|
2017-08-02T01:41:41.000Z
|
2018-03-05T19:09:38.000Z
|
functions/mil_cross_val.py
|
jmarrietar/MILpy
|
8b51771e41ec43dcc489947352133b76a642faf5
|
[
"FSFAP"
] | 5
|
2017-06-09T15:35:33.000Z
|
2020-06-04T22:56:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 22 20:09:20 2016
MIL K stratified fold representation
@author: josemiguelarrieta
"""
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import roc_auc_score
import numpy as np
import sys
import timeit
def mil_cross_val(bags,labels,model,folds,parameters={},timer=False):
start_time = timeit.default_timer()
skf = StratifiedKFold(labels.reshape(len(labels)), n_folds=folds)
results_accuracie = []
results_auc = []
fold = 0
for train_index, test_index in skf:
X_train = [bags[i] for i in train_index]
Y_train = labels[train_index]
X_test = [bags[i] for i in test_index]
Y_test = labels[test_index]
sys.stdout.write('Fold# '+str(fold)+'...')
if len(parameters) > 0:
model.fit(X_train, Y_train, **parameters)
else:
model.fit(bags, labels)
predictions = model.predict(X_test)
if (isinstance(predictions, tuple)):
predictions = predictions[0]
accuracie = np.average(Y_test.T == np.sign(predictions))
results_accuracie.append(100 * accuracie)
auc_score = roc_auc_score(Y_test,predictions)
results_auc.append(100 * auc_score)
fold = fold+1
elapsed = timeit.default_timer() - start_time
if timer==True:
return np.mean(results_accuracie), results_accuracie, np.mean(results_auc), results_auc, elapsed
else:
return np.mean(results_accuracie), results_accuracie, np.mean(results_auc), results_auc
| 35.266667
| 104
| 0.660996
|
aee8577daad8244d6e7895d64642ed4e6c7778ff
| 4,848
|
py
|
Python
|
src/runner/trainers/acdc_vsr_trainer.py
|
cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI
|
ec01b783f8acd41a7056431bad615896b8495f95
|
[
"MIT"
] | 11
|
2020-08-09T08:08:56.000Z
|
2022-01-18T14:25:22.000Z
|
src/runner/trainers/acdc_vsr_trainer.py
|
cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI
|
ec01b783f8acd41a7056431bad615896b8495f95
|
[
"MIT"
] | 2
|
2021-09-13T09:48:41.000Z
|
2021-11-08T14:20:58.000Z
|
src/runner/trainers/acdc_vsr_trainer.py
|
cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI
|
ec01b783f8acd41a7056431bad615896b8495f95
|
[
"MIT"
] | 4
|
2020-08-30T14:13:35.000Z
|
2021-09-14T09:26:55.000Z
|
import torch
from tqdm import tqdm
import functools
from src.runner.trainers.base_trainer import BaseTrainer
from src.utils import denormalize
class AcdcVSRTrainer(BaseTrainer):
"""The ACDC trainer for the Video Super-Resolution.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._denormalize = functools.partial(denormalize, dataset='acdc')
def _run_epoch(self, mode):
"""Run an epoch for training.
Args:
mode (str): The mode of running an epoch ('training' or 'validation').
Returns:
log (dict): The log information.
batch (dict or sequence): The last batch of the data.
outputs (torch.Tensor or sequence of torch.Tensor): The corresponding model outputs.
"""
if mode == 'training':
self.net.train()
else:
self.net.eval()
dataloader = self.train_dataloader if mode == 'training' else self.valid_dataloader
trange = tqdm(dataloader,
total=len(dataloader),
desc=mode)
log = self._init_log()
count = 0
for batch in trange:
batch = self._allocate_data(batch)
inputs, targets = self._get_inputs_targets(batch)
T = len(inputs)
if mode == 'training':
outputs = self.net(inputs)
losses = self._compute_losses(outputs, targets)
loss = (torch.stack(losses) * self.loss_weights).sum()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
else:
with torch.no_grad():
outputs = self.net(inputs)
losses = self._compute_losses(outputs, targets)
loss = (torch.stack(losses) * self.loss_weights).sum()
metrics = self._compute_metrics(outputs, targets)
batch_size = self.train_dataloader.batch_size if mode == 'training' else self.valid_dataloader.batch_size
self._update_log(log, batch_size, T, loss, losses, metrics)
count += batch_size * T
trange.set_postfix(**dict((key, f'{value / count: .3f}') for key, value in log.items()))
for key in log:
log[key] /= count
return log, batch, outputs
def _get_inputs_targets(self, batch):
"""Specify the data inputs and targets.
Args:
batch (dict): A batch of data.
Returns:
inputs (list of torch.Tensor): The data inputs.
targets (list of torch.Tensor): The data targets.
"""
return batch['lr_imgs'], batch['hr_imgs']
def _compute_losses(self, outputs, targets):
"""Compute the losses.
Args:
outputs (list of torch.Tensor): The model outputs.
targets (list of torch.Tensor): The data targets.
Returns:
losses (list of torch.Tensor): The computed losses.
"""
losses = []
for loss_fn in self.loss_fns:
# Average the losses computed at every time steps.
loss = torch.stack([loss_fn(output, target) for output, target in zip(outputs, targets)]).mean()
losses.append(loss)
return losses
def _compute_metrics(self, outputs, targets):
"""Compute the metrics.
Args:
outputs (list of torch.Tensor): The model outputs.
targets (list of torch.Tensor): The data targets.
Returns:
metrics (list of torch.Tensor): The computed metrics.
"""
outputs = list(map(self._denormalize, outputs))
targets = list(map(self._denormalize, targets))
# Average the metric of every frame in a video.
metrics = []
for metric_fn in self.metric_fns:
metric = torch.stack([metric_fn(output, target) for output, target in zip(outputs, targets)]).mean()
metrics.append(metric)
return metrics
def _update_log(self, log, batch_size, T, loss, losses, metrics):
"""Update the log.
Args:
log (dict): The log to be updated.
batch_size (int): The batch size.
T (int): The total number of the frames.
loss (torch.Tensor): The weighted sum of the computed losses.
losses (sequence of torch.Tensor): The computed losses.
metrics (sequence of torch.Tensor): The computed metrics.
"""
log['Loss'] += loss.item() * batch_size * T
for loss_fn, loss in zip(self.loss_fns, losses):
log[loss_fn.__class__.__name__] += loss.item() * batch_size * T
for metric_fn, metric in zip(self.metric_fns, metrics):
log[metric_fn.__class__.__name__] += metric.item() * batch_size * T
| 39.414634
| 117
| 0.584571
|
c2112860741fc4f527f858dd157ecbfcfdc1dd5a
| 1,108
|
py
|
Python
|
nvtabular/dispatch.py
|
thibaultcharrin/NVTabular
|
f2f11d8184b3c9777ce5549f3d3ac1f83ec4a438
|
[
"Apache-2.0"
] | 124
|
2021-10-08T19:59:52.000Z
|
2022-03-27T22:13:26.000Z
|
nvtabular/dispatch.py
|
thibaultcharrin/NVTabular
|
f2f11d8184b3c9777ce5549f3d3ac1f83ec4a438
|
[
"Apache-2.0"
] | 325
|
2021-10-08T19:58:49.000Z
|
2022-03-31T21:27:39.000Z
|
nvtabular/dispatch.py
|
mikemckiernan/NVTabular
|
efb93340653c4a69b1c3a60c88a82116d7906148
|
[
"Apache-2.0"
] | 26
|
2021-10-13T21:43:22.000Z
|
2022-03-29T14:33:58.000Z
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=wildcard-import,unused-import,unused-wildcard-import
import warnings
# Re-export classes/modules from the core library for backwards compatibility
from merlin.core.dispatch import * # noqa
warnings.warn(
"The `nvtabular.dispatch` module has moved to `merlin.core.dispatch`. "
"Support for importing from `nvtabular.dispatch` is deprecated, "
"and will be removed in a future version. Please update "
"your imports to refer to `merlin.core.dispatch`.",
DeprecationWarning,
)
| 36.933333
| 77
| 0.75361
|
5bb8f557f7a634dc63d7fee5cb9be654bb0eb3ad
| 1,565
|
py
|
Python
|
GRAPHS/G7.py
|
777irug/Covid-19
|
447f0d5c203e46ee5b828c57913afeb309076956
|
[
"MIT"
] | null | null | null |
GRAPHS/G7.py
|
777irug/Covid-19
|
447f0d5c203e46ee5b828c57913afeb309076956
|
[
"MIT"
] | null | null | null |
GRAPHS/G7.py
|
777irug/Covid-19
|
447f0d5c203e46ee5b828c57913afeb309076956
|
[
"MIT"
] | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
df=pd.read_csv("covid_19_india.csv",parse_dates=["Date"])
df.columns
df.dtypes
for x in df.columns:
print(x,df[x].isna().sum())
df['year']=df['Date'].dt.year
df['month']=df['Date'].dt.month
df=df.sort_values(by=['month'])
df.dtypes
def month(x):
if x==1:
return 'January'
elif x==2:
return 'Febraury'
elif x==3:
return 'March'
elif x==4:
return 'April'
elif x==5:
return 'May'
elif x==6:
return 'June'
elif x==7:
return 'July'
elif x==8:
return 'August'
elif x==9:
return 'September'
elif x==10:
return 'October'
elif x==11:
return 'November'
elif x==12:
return 'December'
df['month']=df['month'].apply(month)
c1=input("Enter the State or Union Territory name: ")
c2=input("Enter the State or Union Territory name: ")
y=int(input("Enter the year: "))
m1=int(input("Enter the from month: "))
m2=int(input("Enter the to month: "))
df=df[(df['month']>=m1) & (df['month']<=m2)]
state_info= df[(df['State/UnionTerritory']==c1) | (df['State/UnionTerritory']==c2)]
state_info=state_info[state_info['year']==y]
ax=sns.lineplot(
data=state_info,
x="Date", y="Confirmed", hue="State/UnionTerritory", style="month"
)
plt.xticks(rotation=70)
plt.legend(bbox_to_anchor=(0, 1), loc=2, borderaxespad=0)
ax.set_title('No of cases for '+c1+','+c2+', for year '+str())
| 23.712121
| 84
| 0.583387
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.